VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 48064

Last change on this file since 48064 was 48064, checked in by vboxsync, 11 years ago

comment

  • Property svn:eol-style set to native
File size: 201.8 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205/**
206 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
207 * returns the updated e2.
208 *
209 * @returns e2 with A set.
210 * @param e2 The 2nd selector DWORD.
211 */
212static uint32_t set_segment_accessed(int selector, uint32_t e2)
213{
214 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
215 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
216
217 e2 |= DESC_A_MASK;
218 stl_kernel(ptr + 4, e2);
219 return e2;
220}
221
222#endif /* VBOX */
223
224/* return non zero if error */
225static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
226 int selector)
227{
228 SegmentCache *dt;
229 int index;
230 target_ulong ptr;
231
232#ifdef VBOX
233 /* Trying to load a selector with CPL=1? */
234 /** @todo this is a hack to correct the incorrect checking order for pending interrupts in the patm iret replacement code (corrected in the ring-1 version) */
235 /** @todo in theory the iret could fault and we'd still need this. */
236 /** @todo r=bird: In fact this is just a log statement and has no function at
237 * all beyond that as the selector RPL is NOT used in this function!
238 * Guess this code lived elsewhere and got modified over time as well
239 * as obsoleted. */
240 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0) && !EMIsRawRing1Enabled(env->pVM))
241 {
242 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
243 selector = selector & 0xfffc;
244 }
245#endif /* VBOX */
246
247 if (selector & 0x4)
248 dt = &env->ldt;
249 else
250 dt = &env->gdt;
251 index = selector & ~7;
252 if ((index + 7) > dt->limit)
253 return -1;
254 ptr = dt->base + index;
255 *e1_ptr = ldl_kernel(ptr);
256 *e2_ptr = ldl_kernel(ptr + 4);
257 return 0;
258}
259
260static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
261{
262 unsigned int limit;
263 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
264 if (e2 & DESC_G_MASK)
265 limit = (limit << 12) | 0xfff;
266 return limit;
267}
268
269static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
270{
271 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
272}
273
274static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
275{
276 sc->base = get_seg_base(e1, e2);
277 sc->limit = get_seg_limit(e1, e2);
278#ifndef VBOX
279 sc->flags = e2;
280#else
281 sc->flags = e2 & DESC_RAW_FLAG_BITS;
282 sc->newselector = 0;
283 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
284#endif
285}
286
287/* init the segment cache in vm86 mode. */
288static inline void load_seg_vm(int seg, int selector)
289{
290 selector &= 0xffff;
291#ifdef VBOX
292 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
293 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
294 flags |= (3 << DESC_DPL_SHIFT);
295
296 cpu_x86_load_seg_cache(env, seg, selector,
297 (selector << 4), 0xffff, flags);
298#else /* VBOX */
299 cpu_x86_load_seg_cache(env, seg, selector,
300 (selector << 4), 0xffff, 0);
301#endif /* VBOX */
302}
303
304static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
305 uint32_t *esp_ptr, int dpl)
306{
307#ifndef VBOX
308 int type, index, shift;
309#else
310 unsigned int type, index, shift;
311#endif
312
313#if 0
314 {
315 int i;
316 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
317 for(i=0;i<env->tr.limit;i++) {
318 printf("%02x ", env->tr.base[i]);
319 if ((i & 7) == 7) printf("\n");
320 }
321 printf("\n");
322 }
323#endif
324
325 if (!(env->tr.flags & DESC_P_MASK))
326 cpu_abort(env, "invalid tss");
327 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
328 if ((type & 7) != 1)
329 cpu_abort(env, "invalid tss type");
330 shift = type >> 3;
331 index = (dpl * 4 + 2) << shift;
332 if (index + (4 << shift) - 1 > env->tr.limit)
333 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
334 if (shift == 0) {
335 *esp_ptr = lduw_kernel(env->tr.base + index);
336 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
337 } else {
338 *esp_ptr = ldl_kernel(env->tr.base + index);
339 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
340 }
341}
342
343/* XXX: merge with load_seg() */
344static void tss_load_seg(int seg_reg, int selector)
345{
346 uint32_t e1, e2;
347 int rpl, dpl, cpl;
348
349#ifdef VBOX
350 e1 = e2 = 0; /* gcc warning? */
351 cpl = env->hflags & HF_CPL_MASK;
352 /* Trying to load a selector with CPL=1? */
353 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
354 {
355 Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc));
356 selector = selector & 0xfffc;
357 }
358#endif /* VBOX */
359
360 if ((selector & 0xfffc) != 0) {
361 if (load_segment(&e1, &e2, selector) != 0)
362 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
363 if (!(e2 & DESC_S_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 rpl = selector & 3;
366 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
367 cpl = env->hflags & HF_CPL_MASK;
368 if (seg_reg == R_CS) {
369 if (!(e2 & DESC_CS_MASK))
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 /* XXX: is it correct ? */
372 if (dpl != rpl)
373 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
374 if ((e2 & DESC_C_MASK) && dpl > rpl)
375 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
376 } else if (seg_reg == R_SS) {
377 /* SS must be writable data */
378 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
379 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
380 if (dpl != cpl || dpl != rpl)
381 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
382 } else {
383 /* not readable code */
384 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386 /* if data or non conforming code, checks the rights */
387 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
388 if (dpl < cpl || dpl < rpl)
389 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
390 }
391 }
392 if (!(e2 & DESC_P_MASK))
393 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
394 cpu_x86_load_seg_cache(env, seg_reg, selector,
395 get_seg_base(e1, e2),
396 get_seg_limit(e1, e2),
397 e2);
398 } else {
399 if (seg_reg == R_SS || seg_reg == R_CS)
400 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
401#ifdef VBOX
402# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
403 cpu_x86_load_seg_cache(env, seg_reg, selector,
404 0, 0, 0);
405# endif
406#endif /* VBOX */
407 }
408}
409
410#define SWITCH_TSS_JMP 0
411#define SWITCH_TSS_IRET 1
412#define SWITCH_TSS_CALL 2
413
414/* XXX: restore CPU state in registers (PowerPC case) */
415static void switch_tss(int tss_selector,
416 uint32_t e1, uint32_t e2, int source,
417 uint32_t next_eip)
418{
419 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
420 target_ulong tss_base;
421 uint32_t new_regs[8], new_segs[6];
422 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
423 uint32_t old_eflags, eflags_mask;
424 SegmentCache *dt;
425#ifndef VBOX
426 int index;
427#else
428 unsigned int index;
429#endif
430 target_ulong ptr;
431
432 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
433 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
434
435 /* if task gate, we read the TSS segment and we load it */
436 if (type == 5) {
437 if (!(e2 & DESC_P_MASK))
438 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
439 tss_selector = e1 >> 16;
440 if (tss_selector & 4)
441 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
442 if (load_segment(&e1, &e2, tss_selector) != 0)
443 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
444 if (e2 & DESC_S_MASK)
445 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
446 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
447 if ((type & 7) != 1)
448 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
449 }
450
451 if (!(e2 & DESC_P_MASK))
452 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
453
454 if (type & 8)
455 tss_limit_max = 103;
456 else
457 tss_limit_max = 43;
458 tss_limit = get_seg_limit(e1, e2);
459 tss_base = get_seg_base(e1, e2);
460 if ((tss_selector & 4) != 0 ||
461 tss_limit < tss_limit_max)
462 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
463 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
464 if (old_type & 8)
465 old_tss_limit_max = 103;
466 else
467 old_tss_limit_max = 43;
468
469#ifndef VBOX /* The old TSS is written first... */
470 /* read all the registers from the new TSS */
471 if (type & 8) {
472 /* 32 bit */
473 new_cr3 = ldl_kernel(tss_base + 0x1c);
474 new_eip = ldl_kernel(tss_base + 0x20);
475 new_eflags = ldl_kernel(tss_base + 0x24);
476 for(i = 0; i < 8; i++)
477 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
478 for(i = 0; i < 6; i++)
479 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
480 new_ldt = lduw_kernel(tss_base + 0x60);
481 new_trap = ldl_kernel(tss_base + 0x64);
482 } else {
483 /* 16 bit */
484 new_cr3 = 0;
485 new_eip = lduw_kernel(tss_base + 0x0e);
486 new_eflags = lduw_kernel(tss_base + 0x10);
487 for(i = 0; i < 8; i++)
488 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
489 for(i = 0; i < 4; i++)
490 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
491 new_ldt = lduw_kernel(tss_base + 0x2a);
492 new_segs[R_FS] = 0;
493 new_segs[R_GS] = 0;
494 new_trap = 0;
495 }
496#endif
497
498 /* NOTE: we must avoid memory exceptions during the task switch,
499 so we make dummy accesses before */
500 /* XXX: it can still fail in some cases, so a bigger hack is
501 necessary to valid the TLB after having done the accesses */
502
503 v1 = ldub_kernel(env->tr.base);
504 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
505 stb_kernel(env->tr.base, v1);
506 stb_kernel(env->tr.base + old_tss_limit_max, v2);
507
508 /* clear busy bit (it is restartable) */
509 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
510 target_ulong ptr;
511 uint32_t e2;
512 ptr = env->gdt.base + (env->tr.selector & ~7);
513 e2 = ldl_kernel(ptr + 4);
514 e2 &= ~DESC_TSS_BUSY_MASK;
515 stl_kernel(ptr + 4, e2);
516 }
517 old_eflags = compute_eflags();
518 if (source == SWITCH_TSS_IRET)
519 old_eflags &= ~NT_MASK;
520
521 /* save the current state in the old TSS */
522 if (type & 8) {
523 /* 32 bit */
524 stl_kernel(env->tr.base + 0x20, next_eip);
525 stl_kernel(env->tr.base + 0x24, old_eflags);
526 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
527 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
528 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
529 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
530 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
531 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
532 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
533 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
534 for(i = 0; i < 6; i++)
535 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
536#if defined(VBOX) && defined(DEBUG)
537 printf("TSS 32 bits switch\n");
538 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
539#endif
540 } else {
541 /* 16 bit */
542 stw_kernel(env->tr.base + 0x0e, next_eip);
543 stw_kernel(env->tr.base + 0x10, old_eflags);
544 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
545 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
546 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
547 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
548 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
549 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
550 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
551 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
552 for(i = 0; i < 4; i++)
553 stw_kernel(env->tr.base + (0x22 + i * 2), env->segs[i].selector);
554 }
555
556#ifdef VBOX
557 /* read all the registers from the new TSS - may be the same as the old one */
558 if (type & 8) {
559 /* 32 bit */
560 new_cr3 = ldl_kernel(tss_base + 0x1c);
561 new_eip = ldl_kernel(tss_base + 0x20);
562 new_eflags = ldl_kernel(tss_base + 0x24);
563 for(i = 0; i < 8; i++)
564 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
565 for(i = 0; i < 6; i++)
566 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
567 new_ldt = lduw_kernel(tss_base + 0x60);
568 new_trap = ldl_kernel(tss_base + 0x64);
569 } else {
570 /* 16 bit */
571 new_cr3 = 0;
572 new_eip = lduw_kernel(tss_base + 0x0e);
573 new_eflags = lduw_kernel(tss_base + 0x10);
574 for(i = 0; i < 8; i++)
575 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
576 for(i = 0; i < 4; i++)
577 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 2));
578 new_ldt = lduw_kernel(tss_base + 0x2a);
579 new_segs[R_FS] = 0;
580 new_segs[R_GS] = 0;
581 new_trap = 0;
582 }
583#endif
584
585 /* now if an exception occurs, it will occurs in the next task
586 context */
587
588 if (source == SWITCH_TSS_CALL) {
589 stw_kernel(tss_base, env->tr.selector);
590 new_eflags |= NT_MASK;
591 }
592
593 /* set busy bit */
594 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
595 target_ulong ptr;
596 uint32_t e2;
597 ptr = env->gdt.base + (tss_selector & ~7);
598 e2 = ldl_kernel(ptr + 4);
599 e2 |= DESC_TSS_BUSY_MASK;
600 stl_kernel(ptr + 4, e2);
601 }
602
603 /* set the new CPU state */
604 /* from this point, any exception which occurs can give problems */
605 env->cr[0] |= CR0_TS_MASK;
606 env->hflags |= HF_TS_MASK;
607 env->tr.selector = tss_selector;
608 env->tr.base = tss_base;
609 env->tr.limit = tss_limit;
610#ifndef VBOX
611 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
612#else
613 env->tr.flags = e2 & (DESC_RAW_FLAG_BITS & ~(DESC_TSS_BUSY_MASK)); /** @todo stop clearing the busy bit, VT-x and AMD-V seems to set it in the hidden bits. */
614 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
615 env->tr.newselector = 0;
616#endif
617
618 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
619 cpu_x86_update_cr3(env, new_cr3);
620 }
621
622 /* load all registers without an exception, then reload them with
623 possible exception */
624 env->eip = new_eip;
625 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
626 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
627 if (!(type & 8))
628 eflags_mask &= 0xffff;
629 load_eflags(new_eflags, eflags_mask);
630 /* XXX: what to do in 16 bit case ? */
631 EAX = new_regs[0];
632 ECX = new_regs[1];
633 EDX = new_regs[2];
634 EBX = new_regs[3];
635 ESP = new_regs[4];
636 EBP = new_regs[5];
637 ESI = new_regs[6];
638 EDI = new_regs[7];
639 if (new_eflags & VM_MASK) {
640 for(i = 0; i < 6; i++)
641 load_seg_vm(i, new_segs[i]);
642 /* in vm86, CPL is always 3 */
643 cpu_x86_set_cpl(env, 3);
644 } else {
645 /* CPL is set the RPL of CS */
646 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
647 /* first just selectors as the rest may trigger exceptions */
648 for(i = 0; i < 6; i++)
649 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
650 }
651
652 env->ldt.selector = new_ldt & ~4;
653 env->ldt.base = 0;
654 env->ldt.limit = 0;
655 env->ldt.flags = 0;
656#ifdef VBOX
657 env->ldt.flags = DESC_INTEL_UNUSABLE;
658 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
659 env->ldt.newselector = 0;
660#endif
661
662 /* load the LDT */
663 if (new_ldt & 4)
664 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
665
666 if ((new_ldt & 0xfffc) != 0) {
667 dt = &env->gdt;
668 index = new_ldt & ~7;
669 if ((index + 7) > dt->limit)
670 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
671 ptr = dt->base + index;
672 e1 = ldl_kernel(ptr);
673 e2 = ldl_kernel(ptr + 4);
674 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
675 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
676 if (!(e2 & DESC_P_MASK))
677 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
678 load_seg_cache_raw_dt(&env->ldt, e1, e2);
679 }
680
681 /* load the segments */
682 if (!(new_eflags & VM_MASK)) {
683 tss_load_seg(R_CS, new_segs[R_CS]);
684 tss_load_seg(R_SS, new_segs[R_SS]);
685 tss_load_seg(R_ES, new_segs[R_ES]);
686 tss_load_seg(R_DS, new_segs[R_DS]);
687 tss_load_seg(R_FS, new_segs[R_FS]);
688 tss_load_seg(R_GS, new_segs[R_GS]);
689 }
690
691 /* check that EIP is in the CS segment limits */
692 if (new_eip > env->segs[R_CS].limit) {
693 /* XXX: different exception if CALL ? */
694 raise_exception_err(EXCP0D_GPF, 0);
695 }
696
697#ifndef CONFIG_USER_ONLY
698 /* reset local breakpoints */
699 if (env->dr[7] & 0x55) {
700 for (i = 0; i < 4; i++) {
701 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
702 hw_breakpoint_remove(env, i);
703 }
704 env->dr[7] &= ~0x55;
705 }
706#endif
707}
708
709/* check if Port I/O is allowed in TSS */
710static inline void check_io(int addr, int size)
711{
712#ifndef VBOX
713 int io_offset, val, mask;
714#else
715 int val, mask;
716 unsigned int io_offset;
717#endif /* VBOX */
718
719 /* TSS must be a valid 32 bit one */
720 if (!(env->tr.flags & DESC_P_MASK) ||
721 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
722 env->tr.limit < 103)
723 goto fail;
724 io_offset = lduw_kernel(env->tr.base + 0x66);
725 io_offset += (addr >> 3);
726 /* Note: the check needs two bytes */
727 if ((io_offset + 1) > env->tr.limit)
728 goto fail;
729 val = lduw_kernel(env->tr.base + io_offset);
730 val >>= (addr & 7);
731 mask = (1 << size) - 1;
732 /* all bits must be zero to allow the I/O */
733 if ((val & mask) != 0) {
734 fail:
735 raise_exception_err(EXCP0D_GPF, 0);
736 }
737}
738
739#ifdef VBOX
740
741/* Keep in sync with gen_check_external_event() */
742void helper_check_external_event()
743{
744 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
745 | CPU_INTERRUPT_EXTERNAL_EXIT
746 | CPU_INTERRUPT_EXTERNAL_TIMER
747 | CPU_INTERRUPT_EXTERNAL_DMA))
748 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
749 && (env->eflags & IF_MASK)
750 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
751 {
752 helper_external_event();
753 }
754
755}
756
757void helper_sync_seg(uint32_t reg)
758{
759 if (env->segs[reg].newselector)
760 sync_seg(env, reg, env->segs[reg].newselector);
761}
762
763#endif /* VBOX */
764
765void helper_check_iob(uint32_t t0)
766{
767 check_io(t0, 1);
768}
769
770void helper_check_iow(uint32_t t0)
771{
772 check_io(t0, 2);
773}
774
775void helper_check_iol(uint32_t t0)
776{
777 check_io(t0, 4);
778}
779
780void helper_outb(uint32_t port, uint32_t data)
781{
782#ifndef VBOX
783 cpu_outb(port, data & 0xff);
784#else
785 cpu_outb(env, port, data & 0xff);
786#endif
787}
788
789target_ulong helper_inb(uint32_t port)
790{
791#ifndef VBOX
792 return cpu_inb(port);
793#else
794 return cpu_inb(env, port);
795#endif
796}
797
798void helper_outw(uint32_t port, uint32_t data)
799{
800#ifndef VBOX
801 cpu_outw(port, data & 0xffff);
802#else
803 cpu_outw(env, port, data & 0xffff);
804#endif
805}
806
807target_ulong helper_inw(uint32_t port)
808{
809#ifndef VBOX
810 return cpu_inw(port);
811#else
812 return cpu_inw(env, port);
813#endif
814}
815
816void helper_outl(uint32_t port, uint32_t data)
817{
818#ifndef VBOX
819 cpu_outl(port, data);
820#else
821 cpu_outl(env, port, data);
822#endif
823}
824
825target_ulong helper_inl(uint32_t port)
826{
827#ifndef VBOX
828 return cpu_inl(port);
829#else
830 return cpu_inl(env, port);
831#endif
832}
833
834static inline unsigned int get_sp_mask(unsigned int e2)
835{
836 if (e2 & DESC_B_MASK)
837 return 0xffffffff;
838 else
839 return 0xffff;
840}
841
842static int exeption_has_error_code(int intno)
843{
844 switch(intno) {
845 case 8:
846 case 10:
847 case 11:
848 case 12:
849 case 13:
850 case 14:
851 case 17:
852 return 1;
853 }
854 return 0;
855}
856
857#ifdef TARGET_X86_64
858#define SET_ESP(val, sp_mask)\
859do {\
860 if ((sp_mask) == 0xffff)\
861 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
862 else if ((sp_mask) == 0xffffffffLL)\
863 ESP = (uint32_t)(val);\
864 else\
865 ESP = (val);\
866} while (0)
867#else
868#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
869#endif
870
871/* in 64-bit machines, this can overflow. So this segment addition macro
872 * can be used to trim the value to 32-bit whenever needed */
873#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
874
875/* XXX: add a is_user flag to have proper security support */
876#define PUSHW(ssp, sp, sp_mask, val)\
877{\
878 sp -= 2;\
879 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
880}
881
882#define PUSHL(ssp, sp, sp_mask, val)\
883{\
884 sp -= 4;\
885 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
886}
887
888#define POPW(ssp, sp, sp_mask, val)\
889{\
890 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
891 sp += 2;\
892}
893
894#define POPL(ssp, sp, sp_mask, val)\
895{\
896 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
897 sp += 4;\
898}
899
900/* protected mode interrupt */
901static void do_interrupt_protected(int intno, int is_int, int error_code,
902 unsigned int next_eip, int is_hw)
903{
904 SegmentCache *dt;
905 target_ulong ptr, ssp;
906 int type, dpl, selector, ss_dpl, cpl;
907 int has_error_code, new_stack, shift;
908 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
909 uint32_t old_eip, sp_mask;
910
911#ifdef VBOX
912 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
913 cpu_loop_exit();
914#endif
915
916 has_error_code = 0;
917 if (!is_int && !is_hw)
918 has_error_code = exeption_has_error_code(intno);
919 if (is_int)
920 old_eip = next_eip;
921 else
922 old_eip = env->eip;
923
924 dt = &env->idt;
925#ifndef VBOX
926 if (intno * 8 + 7 > dt->limit)
927#else
928 if ((unsigned)intno * 8 + 7 > dt->limit)
929#endif
930 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
931 ptr = dt->base + intno * 8;
932 e1 = ldl_kernel(ptr);
933 e2 = ldl_kernel(ptr + 4);
934 /* check gate type */
935 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
936 switch(type) {
937 case 5: /* task gate */
938#ifdef VBOX
939 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
940 cpl = env->hflags & HF_CPL_MASK;
941 /* check privilege if software int */
942 if (is_int && dpl < cpl)
943 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
944#endif
945 /* must do that check here to return the correct error code */
946 if (!(e2 & DESC_P_MASK))
947 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
948 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
949 if (has_error_code) {
950 int type;
951 uint32_t mask;
952 /* push the error code */
953 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
954 shift = type >> 3;
955 if (env->segs[R_SS].flags & DESC_B_MASK)
956 mask = 0xffffffff;
957 else
958 mask = 0xffff;
959 esp = (ESP - (2 << shift)) & mask;
960 ssp = env->segs[R_SS].base + esp;
961 if (shift)
962 stl_kernel(ssp, error_code);
963 else
964 stw_kernel(ssp, error_code);
965 SET_ESP(esp, mask);
966 }
967 return;
968 case 6: /* 286 interrupt gate */
969 case 7: /* 286 trap gate */
970 case 14: /* 386 interrupt gate */
971 case 15: /* 386 trap gate */
972 break;
973 default:
974 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
975 break;
976 }
977 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
978 cpl = env->hflags & HF_CPL_MASK;
979 /* check privilege if software int */
980 if (is_int && dpl < cpl)
981 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
982 /* check valid bit */
983 if (!(e2 & DESC_P_MASK))
984 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
985 selector = e1 >> 16;
986 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
987 if ((selector & 0xfffc) == 0)
988 raise_exception_err(EXCP0D_GPF, 0);
989
990 if (load_segment(&e1, &e2, selector) != 0)
991 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
992#ifdef VBOX /** @todo figure out when this is done one day... */
993 if (!(e2 & DESC_A_MASK))
994 e2 = set_segment_accessed(selector, e2);
995#endif
996 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
997 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
998 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
999 if (dpl > cpl)
1000 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1001 if (!(e2 & DESC_P_MASK))
1002 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1003 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1004 /* to inner privilege */
1005 get_ss_esp_from_tss(&ss, &esp, dpl);
1006 if ((ss & 0xfffc) == 0)
1007 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1008 if ((ss & 3) != dpl)
1009 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1010 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1011 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1012#ifdef VBOX /** @todo figure out when this is done one day... */
1013 if (!(ss_e2 & DESC_A_MASK))
1014 ss_e2 = set_segment_accessed(ss, ss_e2);
1015#endif
1016 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1017 if (ss_dpl != dpl)
1018 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1019 if (!(ss_e2 & DESC_S_MASK) ||
1020 (ss_e2 & DESC_CS_MASK) ||
1021 !(ss_e2 & DESC_W_MASK))
1022 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1023 if (!(ss_e2 & DESC_P_MASK))
1024#ifdef VBOX /* See page 3-477 of 253666.pdf */
1025 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
1026#else
1027 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1028#endif
1029 new_stack = 1;
1030 sp_mask = get_sp_mask(ss_e2);
1031 ssp = get_seg_base(ss_e1, ss_e2);
1032#if defined(VBOX) && defined(DEBUG)
1033 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
1034#endif
1035 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1036 /* to same privilege */
1037 if (env->eflags & VM_MASK)
1038 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1039 new_stack = 0;
1040 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1041 ssp = env->segs[R_SS].base;
1042 esp = ESP;
1043 dpl = cpl;
1044 } else {
1045 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1046 new_stack = 0; /* avoid warning */
1047 sp_mask = 0; /* avoid warning */
1048 ssp = 0; /* avoid warning */
1049 esp = 0; /* avoid warning */
1050 }
1051
1052 shift = type >> 3;
1053
1054#if 0
1055 /* XXX: check that enough room is available */
1056 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1057 if (env->eflags & VM_MASK)
1058 push_size += 8;
1059 push_size <<= shift;
1060#endif
1061 if (shift == 1) {
1062 if (new_stack) {
1063 if (env->eflags & VM_MASK) {
1064 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1065 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1066 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1067 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1068 }
1069 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1070 PUSHL(ssp, esp, sp_mask, ESP);
1071 }
1072 PUSHL(ssp, esp, sp_mask, compute_eflags());
1073 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1074 PUSHL(ssp, esp, sp_mask, old_eip);
1075 if (has_error_code) {
1076 PUSHL(ssp, esp, sp_mask, error_code);
1077 }
1078 } else {
1079 if (new_stack) {
1080 if (env->eflags & VM_MASK) {
1081 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1082 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1083 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1084 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1085 }
1086 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1087 PUSHW(ssp, esp, sp_mask, ESP);
1088 }
1089 PUSHW(ssp, esp, sp_mask, compute_eflags());
1090 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1091 PUSHW(ssp, esp, sp_mask, old_eip);
1092 if (has_error_code) {
1093 PUSHW(ssp, esp, sp_mask, error_code);
1094 }
1095 }
1096
1097 if (new_stack) {
1098 if (env->eflags & VM_MASK) {
1099 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1100 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1101 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1102 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1103 }
1104 ss = (ss & ~3) | dpl;
1105 cpu_x86_load_seg_cache(env, R_SS, ss,
1106 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1107 }
1108 SET_ESP(esp, sp_mask);
1109
1110 selector = (selector & ~3) | dpl;
1111 cpu_x86_load_seg_cache(env, R_CS, selector,
1112 get_seg_base(e1, e2),
1113 get_seg_limit(e1, e2),
1114 e2);
1115 cpu_x86_set_cpl(env, dpl);
1116 env->eip = offset;
1117
1118 /* interrupt gate clear IF mask */
1119 if ((type & 1) == 0) {
1120 env->eflags &= ~IF_MASK;
1121 }
1122#ifndef VBOX
1123 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1124#else
1125 /*
1126 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1127 * gets confused by seemingly changed EFLAGS. See #3491 and
1128 * public bug #2341.
1129 */
1130 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1131#endif
1132}
1133
1134#ifdef VBOX
1135
1136/* check if VME interrupt redirection is enabled in TSS */
1137DECLINLINE(bool) is_vme_irq_redirected(int intno)
1138{
1139 unsigned int io_offset, intredir_offset;
1140 unsigned char val, mask;
1141
1142 /* TSS must be a valid 32 bit one */
1143 if (!(env->tr.flags & DESC_P_MASK) ||
1144 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1145 env->tr.limit < 103)
1146 goto fail;
1147 io_offset = lduw_kernel(env->tr.base + 0x66);
1148 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1149 if (io_offset < 0x68 + 0x20)
1150 io_offset = 0x68 + 0x20;
1151 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1152 intredir_offset = io_offset - 0x20;
1153
1154 intredir_offset += (intno >> 3);
1155 if ((intredir_offset) > env->tr.limit)
1156 goto fail;
1157
1158 val = ldub_kernel(env->tr.base + intredir_offset);
1159 mask = 1 << (unsigned char)(intno & 7);
1160
1161 /* bit set means no redirection. */
1162 if ((val & mask) != 0) {
1163 return false;
1164 }
1165 return true;
1166
1167fail:
1168 raise_exception_err(EXCP0D_GPF, 0);
1169 return true;
1170}
1171
1172/* V86 mode software interrupt with CR4.VME=1 */
1173static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1174{
1175 target_ulong ptr, ssp;
1176 int selector;
1177 uint32_t offset, esp;
1178 uint32_t old_cs, old_eflags;
1179 uint32_t iopl;
1180
1181 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1182
1183 if (!is_vme_irq_redirected(intno))
1184 {
1185 if (iopl == 3)
1186 {
1187 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1188 return;
1189 }
1190 else
1191 raise_exception_err(EXCP0D_GPF, 0);
1192 }
1193
1194 /* virtual mode idt is at linear address 0 */
1195 ptr = 0 + intno * 4;
1196 offset = lduw_kernel(ptr);
1197 selector = lduw_kernel(ptr + 2);
1198 esp = ESP;
1199 ssp = env->segs[R_SS].base;
1200 old_cs = env->segs[R_CS].selector;
1201
1202 old_eflags = compute_eflags();
1203 if (iopl < 3)
1204 {
1205 /* copy VIF into IF and set IOPL to 3 */
1206 if (env->eflags & VIF_MASK)
1207 old_eflags |= IF_MASK;
1208 else
1209 old_eflags &= ~IF_MASK;
1210
1211 old_eflags |= (3 << IOPL_SHIFT);
1212 }
1213
1214 /* XXX: use SS segment size ? */
1215 PUSHW(ssp, esp, 0xffff, old_eflags);
1216 PUSHW(ssp, esp, 0xffff, old_cs);
1217 PUSHW(ssp, esp, 0xffff, next_eip);
1218
1219 /* update processor state */
1220 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1221 env->eip = offset;
1222 env->segs[R_CS].selector = selector;
1223 env->segs[R_CS].base = (selector << 4);
1224 env->eflags &= ~(TF_MASK | RF_MASK);
1225
1226 if (iopl < 3)
1227 env->eflags &= ~VIF_MASK;
1228 else
1229 env->eflags &= ~IF_MASK;
1230}
1231
1232#endif /* VBOX */
1233
1234#ifdef TARGET_X86_64
1235
1236#define PUSHQ(sp, val)\
1237{\
1238 sp -= 8;\
1239 stq_kernel(sp, (val));\
1240}
1241
1242#define POPQ(sp, val)\
1243{\
1244 val = ldq_kernel(sp);\
1245 sp += 8;\
1246}
1247
1248static inline target_ulong get_rsp_from_tss(int level)
1249{
1250 int index;
1251
1252#if 0
1253 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1254 env->tr.base, env->tr.limit);
1255#endif
1256
1257 if (!(env->tr.flags & DESC_P_MASK))
1258 cpu_abort(env, "invalid tss");
1259 index = 8 * level + 4;
1260 if ((index + 7) > env->tr.limit)
1261 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1262 return ldq_kernel(env->tr.base + index);
1263}
1264
1265/* 64 bit interrupt */
1266static void do_interrupt64(int intno, int is_int, int error_code,
1267 target_ulong next_eip, int is_hw)
1268{
1269 SegmentCache *dt;
1270 target_ulong ptr;
1271 int type, dpl, selector, cpl, ist;
1272 int has_error_code, new_stack;
1273 uint32_t e1, e2, e3, ss;
1274 target_ulong old_eip, esp, offset;
1275
1276#ifdef VBOX
1277 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1278 cpu_loop_exit();
1279#endif
1280
1281 has_error_code = 0;
1282 if (!is_int && !is_hw)
1283 has_error_code = exeption_has_error_code(intno);
1284 if (is_int)
1285 old_eip = next_eip;
1286 else
1287 old_eip = env->eip;
1288
1289 dt = &env->idt;
1290 if (intno * 16 + 15 > dt->limit)
1291 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1292 ptr = dt->base + intno * 16;
1293 e1 = ldl_kernel(ptr);
1294 e2 = ldl_kernel(ptr + 4);
1295 e3 = ldl_kernel(ptr + 8);
1296 /* check gate type */
1297 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1298 switch(type) {
1299 case 14: /* 386 interrupt gate */
1300 case 15: /* 386 trap gate */
1301 break;
1302 default:
1303 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1304 break;
1305 }
1306 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1307 cpl = env->hflags & HF_CPL_MASK;
1308 /* check privilege if software int */
1309 if (is_int && dpl < cpl)
1310 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1311 /* check valid bit */
1312 if (!(e2 & DESC_P_MASK))
1313 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1314 selector = e1 >> 16;
1315 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1316 ist = e2 & 7;
1317 if ((selector & 0xfffc) == 0)
1318 raise_exception_err(EXCP0D_GPF, 0);
1319
1320 if (load_segment(&e1, &e2, selector) != 0)
1321 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1322 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1323 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1324 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1325 if (dpl > cpl)
1326 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1327 if (!(e2 & DESC_P_MASK))
1328 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1329 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1330 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1331 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1332 /* to inner privilege */
1333 if (ist != 0)
1334 esp = get_rsp_from_tss(ist + 3);
1335 else
1336 esp = get_rsp_from_tss(dpl);
1337 esp &= ~0xfLL; /* align stack */
1338 ss = 0;
1339 new_stack = 1;
1340 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1341 /* to same privilege */
1342 if (env->eflags & VM_MASK)
1343 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1344 new_stack = 0;
1345 if (ist != 0)
1346 esp = get_rsp_from_tss(ist + 3);
1347 else
1348 esp = ESP;
1349 esp &= ~0xfLL; /* align stack */
1350 dpl = cpl;
1351 } else {
1352 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1353 new_stack = 0; /* avoid warning */
1354 esp = 0; /* avoid warning */
1355 }
1356
1357 PUSHQ(esp, env->segs[R_SS].selector);
1358 PUSHQ(esp, ESP);
1359 PUSHQ(esp, compute_eflags());
1360 PUSHQ(esp, env->segs[R_CS].selector);
1361 PUSHQ(esp, old_eip);
1362 if (has_error_code) {
1363 PUSHQ(esp, error_code);
1364 }
1365
1366 if (new_stack) {
1367 ss = 0 | dpl;
1368#ifndef VBOX
1369 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1370#else
1371 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1372#endif
1373 }
1374 ESP = esp;
1375
1376 selector = (selector & ~3) | dpl;
1377 cpu_x86_load_seg_cache(env, R_CS, selector,
1378 get_seg_base(e1, e2),
1379 get_seg_limit(e1, e2),
1380 e2);
1381 cpu_x86_set_cpl(env, dpl);
1382 env->eip = offset;
1383
1384 /* interrupt gate clear IF mask */
1385 if ((type & 1) == 0) {
1386 env->eflags &= ~IF_MASK;
1387 }
1388#ifndef VBOX
1389 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1390#else /* VBOX */
1391 /*
1392 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1393 * gets confused by seemingly changed EFLAGS. See #3491 and
1394 * public bug #2341.
1395 */
1396 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1397#endif /* VBOX */
1398}
1399#endif
1400
1401#ifdef TARGET_X86_64
1402#if defined(CONFIG_USER_ONLY)
1403void helper_syscall(int next_eip_addend)
1404{
1405 env->exception_index = EXCP_SYSCALL;
1406 env->exception_next_eip = env->eip + next_eip_addend;
1407 cpu_loop_exit();
1408}
1409#else
1410void helper_syscall(int next_eip_addend)
1411{
1412 int selector;
1413
1414 if (!(env->efer & MSR_EFER_SCE)) {
1415 raise_exception_err(EXCP06_ILLOP, 0);
1416 }
1417 selector = (env->star >> 32) & 0xffff;
1418 if (env->hflags & HF_LMA_MASK) {
1419 int code64;
1420
1421 ECX = env->eip + next_eip_addend;
1422 env->regs[11] = compute_eflags();
1423
1424 code64 = env->hflags & HF_CS64_MASK;
1425
1426 cpu_x86_set_cpl(env, 0);
1427 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1428 0, 0xffffffff,
1429 DESC_G_MASK | DESC_P_MASK |
1430 DESC_S_MASK |
1431 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1432 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1433 0, 0xffffffff,
1434 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1435 DESC_S_MASK |
1436 DESC_W_MASK | DESC_A_MASK);
1437 env->eflags &= ~env->fmask;
1438 load_eflags(env->eflags, 0);
1439 if (code64)
1440 env->eip = env->lstar;
1441 else
1442 env->eip = env->cstar;
1443 } else {
1444 ECX = (uint32_t)(env->eip + next_eip_addend);
1445
1446 cpu_x86_set_cpl(env, 0);
1447 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1448 0, 0xffffffff,
1449 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1450 DESC_S_MASK |
1451 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1452 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1453 0, 0xffffffff,
1454 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1455 DESC_S_MASK |
1456 DESC_W_MASK | DESC_A_MASK);
1457 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1458 env->eip = (uint32_t)env->star;
1459 }
1460}
1461#endif
1462#endif
1463
1464#ifdef TARGET_X86_64
1465void helper_sysret(int dflag)
1466{
1467 int cpl, selector;
1468
1469 if (!(env->efer & MSR_EFER_SCE)) {
1470 raise_exception_err(EXCP06_ILLOP, 0);
1471 }
1472 cpl = env->hflags & HF_CPL_MASK;
1473 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1474 raise_exception_err(EXCP0D_GPF, 0);
1475 }
1476 selector = (env->star >> 48) & 0xffff;
1477 if (env->hflags & HF_LMA_MASK) {
1478 if (dflag == 2) {
1479 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1480 0, 0xffffffff,
1481 DESC_G_MASK | DESC_P_MASK |
1482 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1483 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1484 DESC_L_MASK);
1485 env->eip = ECX;
1486 } else {
1487 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1488 0, 0xffffffff,
1489 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1490 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1491 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1492 env->eip = (uint32_t)ECX;
1493 }
1494 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1495 0, 0xffffffff,
1496 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1497 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1498 DESC_W_MASK | DESC_A_MASK);
1499 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1500 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1501 cpu_x86_set_cpl(env, 3);
1502 } else {
1503 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1504 0, 0xffffffff,
1505 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1506 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1507 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1508 env->eip = (uint32_t)ECX;
1509 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1510 0, 0xffffffff,
1511 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1512 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1513 DESC_W_MASK | DESC_A_MASK);
1514 env->eflags |= IF_MASK;
1515 cpu_x86_set_cpl(env, 3);
1516 }
1517}
1518#endif
1519
1520#ifdef VBOX
1521
1522/**
1523 * Checks and processes external VMM events.
1524 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1525 */
1526void helper_external_event(void)
1527{
1528# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1529 uintptr_t uSP;
1530# ifdef RT_ARCH_AMD64
1531 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1532# else
1533 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1534# endif
1535 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1536# endif
1537 /* Keep in sync with flags checked by gen_check_external_event() */
1538 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1539 {
1540 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1541 ~CPU_INTERRUPT_EXTERNAL_HARD);
1542 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1543 }
1544 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1545 {
1546 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1547 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1548 cpu_exit(env);
1549 }
1550 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1551 {
1552 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1553 ~CPU_INTERRUPT_EXTERNAL_DMA);
1554 remR3DmaRun(env);
1555 }
1556 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1557 {
1558 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1559 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1560 remR3TimersRun(env);
1561 }
1562 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1563 {
1564 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1565 ~CPU_INTERRUPT_EXTERNAL_HARD);
1566 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1567 }
1568}
1569
1570/* helper for recording call instruction addresses for later scanning */
1571void helper_record_call()
1572{
1573 if ( !(env->state & CPU_RAW_RING0)
1574 && (env->cr[0] & CR0_PG_MASK)
1575 && !(env->eflags & X86_EFL_IF))
1576 remR3RecordCall(env);
1577}
1578
1579#endif /* VBOX */
1580
1581/* real mode interrupt */
1582static void do_interrupt_real(int intno, int is_int, int error_code,
1583 unsigned int next_eip)
1584{
1585 SegmentCache *dt;
1586 target_ulong ptr, ssp;
1587 int selector;
1588 uint32_t offset, esp;
1589 uint32_t old_cs, old_eip;
1590
1591 /* real mode (simpler !) */
1592 dt = &env->idt;
1593#ifndef VBOX
1594 if (intno * 4 + 3 > dt->limit)
1595#else
1596 if ((unsigned)intno * 4 + 3 > dt->limit)
1597#endif
1598 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1599 ptr = dt->base + intno * 4;
1600 offset = lduw_kernel(ptr);
1601 selector = lduw_kernel(ptr + 2);
1602 esp = ESP;
1603 ssp = env->segs[R_SS].base;
1604 if (is_int)
1605 old_eip = next_eip;
1606 else
1607 old_eip = env->eip;
1608 old_cs = env->segs[R_CS].selector;
1609 /* XXX: use SS segment size ? */
1610 PUSHW(ssp, esp, 0xffff, compute_eflags());
1611 PUSHW(ssp, esp, 0xffff, old_cs);
1612 PUSHW(ssp, esp, 0xffff, old_eip);
1613
1614 /* update processor state */
1615 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1616 env->eip = offset;
1617 env->segs[R_CS].selector = selector;
1618 env->segs[R_CS].base = (selector << 4);
1619 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1620}
1621
1622/* fake user mode interrupt */
1623void do_interrupt_user(int intno, int is_int, int error_code,
1624 target_ulong next_eip)
1625{
1626 SegmentCache *dt;
1627 target_ulong ptr;
1628 int dpl, cpl, shift;
1629 uint32_t e2;
1630
1631 dt = &env->idt;
1632 if (env->hflags & HF_LMA_MASK) {
1633 shift = 4;
1634 } else {
1635 shift = 3;
1636 }
1637 ptr = dt->base + (intno << shift);
1638 e2 = ldl_kernel(ptr + 4);
1639
1640 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1641 cpl = env->hflags & HF_CPL_MASK;
1642 /* check privilege if software int */
1643 if (is_int && dpl < cpl)
1644 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1645
1646 /* Since we emulate only user space, we cannot do more than
1647 exiting the emulation with the suitable exception and error
1648 code */
1649 if (is_int)
1650 EIP = next_eip;
1651}
1652
1653#if !defined(CONFIG_USER_ONLY)
1654static void handle_even_inj(int intno, int is_int, int error_code,
1655 int is_hw, int rm)
1656{
1657 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1658 if (!(event_inj & SVM_EVTINJ_VALID)) {
1659 int type;
1660 if (is_int)
1661 type = SVM_EVTINJ_TYPE_SOFT;
1662 else
1663 type = SVM_EVTINJ_TYPE_EXEPT;
1664 event_inj = intno | type | SVM_EVTINJ_VALID;
1665 if (!rm && exeption_has_error_code(intno)) {
1666 event_inj |= SVM_EVTINJ_VALID_ERR;
1667 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1668 }
1669 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1670 }
1671}
1672#endif
1673
1674/*
1675 * Begin execution of an interruption. is_int is TRUE if coming from
1676 * the int instruction. next_eip is the EIP value AFTER the interrupt
1677 * instruction. It is only relevant if is_int is TRUE.
1678 */
1679void do_interrupt(int intno, int is_int, int error_code,
1680 target_ulong next_eip, int is_hw)
1681{
1682 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1683 if ((env->cr[0] & CR0_PE_MASK)) {
1684 static int count;
1685 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1686 count, intno, error_code, is_int,
1687 env->hflags & HF_CPL_MASK,
1688 env->segs[R_CS].selector, EIP,
1689 (int)env->segs[R_CS].base + EIP,
1690 env->segs[R_SS].selector, ESP);
1691 if (intno == 0x0e) {
1692 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1693 } else {
1694 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1695 }
1696 qemu_log("\n");
1697 log_cpu_state(env, X86_DUMP_CCOP);
1698#if 0
1699 {
1700 int i;
1701 uint8_t *ptr;
1702 qemu_log(" code=");
1703 ptr = env->segs[R_CS].base + env->eip;
1704 for(i = 0; i < 16; i++) {
1705 qemu_log(" %02x", ldub(ptr + i));
1706 }
1707 qemu_log("\n");
1708 }
1709#endif
1710 count++;
1711 }
1712 }
1713#ifdef VBOX
1714 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1715 if (is_int) {
1716 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1717 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1718 } else {
1719 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1720 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1721 }
1722 }
1723#endif
1724 if (env->cr[0] & CR0_PE_MASK) {
1725#if !defined(CONFIG_USER_ONLY)
1726 if (env->hflags & HF_SVMI_MASK)
1727 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1728#endif
1729#ifdef TARGET_X86_64
1730 if (env->hflags & HF_LMA_MASK) {
1731 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1732 } else
1733#endif
1734 {
1735#ifdef VBOX
1736 /* int xx *, v86 code and VME enabled? */
1737 if ( (env->eflags & VM_MASK)
1738 && (env->cr[4] & CR4_VME_MASK)
1739 && is_int
1740 && !is_hw
1741 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1742 )
1743 do_soft_interrupt_vme(intno, error_code, next_eip);
1744 else
1745#endif /* VBOX */
1746 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1747 }
1748 } else {
1749#if !defined(CONFIG_USER_ONLY)
1750 if (env->hflags & HF_SVMI_MASK)
1751 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1752#endif
1753 do_interrupt_real(intno, is_int, error_code, next_eip);
1754 }
1755
1756#if !defined(CONFIG_USER_ONLY)
1757 if (env->hflags & HF_SVMI_MASK) {
1758 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1759 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1760 }
1761#endif
1762}
1763
1764/* This should come from sysemu.h - if we could include it here... */
1765void qemu_system_reset_request(void);
1766
1767/*
1768 * Check nested exceptions and change to double or triple fault if
1769 * needed. It should only be called, if this is not an interrupt.
1770 * Returns the new exception number.
1771 */
1772static int check_exception(int intno, int *error_code)
1773{
1774 int first_contributory = env->old_exception == 0 ||
1775 (env->old_exception >= 10 &&
1776 env->old_exception <= 13);
1777 int second_contributory = intno == 0 ||
1778 (intno >= 10 && intno <= 13);
1779
1780 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1781 env->old_exception, intno);
1782
1783#if !defined(CONFIG_USER_ONLY)
1784 if (env->old_exception == EXCP08_DBLE) {
1785 if (env->hflags & HF_SVMI_MASK)
1786 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1787
1788 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1789
1790# ifndef VBOX
1791 qemu_system_reset_request();
1792# else
1793 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1794# endif
1795 return EXCP_HLT;
1796 }
1797#endif
1798
1799 if ((first_contributory && second_contributory)
1800 || (env->old_exception == EXCP0E_PAGE &&
1801 (second_contributory || (intno == EXCP0E_PAGE)))) {
1802 intno = EXCP08_DBLE;
1803 *error_code = 0;
1804 }
1805
1806 if (second_contributory || (intno == EXCP0E_PAGE) ||
1807 (intno == EXCP08_DBLE))
1808 env->old_exception = intno;
1809
1810 return intno;
1811}
1812
1813/*
1814 * Signal an interruption. It is executed in the main CPU loop.
1815 * is_int is TRUE if coming from the int instruction. next_eip is the
1816 * EIP value AFTER the interrupt instruction. It is only relevant if
1817 * is_int is TRUE.
1818 */
1819static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1820 int next_eip_addend)
1821{
1822#if defined(VBOX) && defined(DEBUG)
1823 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1824#endif
1825 if (!is_int) {
1826 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1827 intno = check_exception(intno, &error_code);
1828 } else {
1829 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1830 }
1831
1832 env->exception_index = intno;
1833 env->error_code = error_code;
1834 env->exception_is_int = is_int;
1835 env->exception_next_eip = env->eip + next_eip_addend;
1836 cpu_loop_exit();
1837}
1838
1839/* shortcuts to generate exceptions */
1840
1841void raise_exception_err(int exception_index, int error_code)
1842{
1843 raise_interrupt(exception_index, 0, error_code, 0);
1844}
1845
1846void raise_exception(int exception_index)
1847{
1848 raise_interrupt(exception_index, 0, 0, 0);
1849}
1850
1851void raise_exception_env(int exception_index, CPUState *nenv)
1852{
1853 env = nenv;
1854 raise_exception(exception_index);
1855}
1856/* SMM support */
1857
1858#if defined(CONFIG_USER_ONLY)
1859
1860void do_smm_enter(void)
1861{
1862}
1863
1864void helper_rsm(void)
1865{
1866}
1867
1868#else
1869
1870#ifdef TARGET_X86_64
1871#define SMM_REVISION_ID 0x00020064
1872#else
1873#define SMM_REVISION_ID 0x00020000
1874#endif
1875
1876void do_smm_enter(void)
1877{
1878 target_ulong sm_state;
1879 SegmentCache *dt;
1880 int i, offset;
1881
1882 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1883 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1884
1885 env->hflags |= HF_SMM_MASK;
1886 cpu_smm_update(env);
1887
1888 sm_state = env->smbase + 0x8000;
1889
1890#ifdef TARGET_X86_64
1891 for(i = 0; i < 6; i++) {
1892 dt = &env->segs[i];
1893 offset = 0x7e00 + i * 16;
1894 stw_phys(sm_state + offset, dt->selector);
1895 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1896 stl_phys(sm_state + offset + 4, dt->limit);
1897 stq_phys(sm_state + offset + 8, dt->base);
1898 }
1899
1900 stq_phys(sm_state + 0x7e68, env->gdt.base);
1901 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1902
1903 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1904 stq_phys(sm_state + 0x7e78, env->ldt.base);
1905 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1906 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1907
1908 stq_phys(sm_state + 0x7e88, env->idt.base);
1909 stl_phys(sm_state + 0x7e84, env->idt.limit);
1910
1911 stw_phys(sm_state + 0x7e90, env->tr.selector);
1912 stq_phys(sm_state + 0x7e98, env->tr.base);
1913 stl_phys(sm_state + 0x7e94, env->tr.limit);
1914 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1915
1916 stq_phys(sm_state + 0x7ed0, env->efer);
1917
1918 stq_phys(sm_state + 0x7ff8, EAX);
1919 stq_phys(sm_state + 0x7ff0, ECX);
1920 stq_phys(sm_state + 0x7fe8, EDX);
1921 stq_phys(sm_state + 0x7fe0, EBX);
1922 stq_phys(sm_state + 0x7fd8, ESP);
1923 stq_phys(sm_state + 0x7fd0, EBP);
1924 stq_phys(sm_state + 0x7fc8, ESI);
1925 stq_phys(sm_state + 0x7fc0, EDI);
1926 for(i = 8; i < 16; i++)
1927 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1928 stq_phys(sm_state + 0x7f78, env->eip);
1929 stl_phys(sm_state + 0x7f70, compute_eflags());
1930 stl_phys(sm_state + 0x7f68, env->dr[6]);
1931 stl_phys(sm_state + 0x7f60, env->dr[7]);
1932
1933 stl_phys(sm_state + 0x7f48, env->cr[4]);
1934 stl_phys(sm_state + 0x7f50, env->cr[3]);
1935 stl_phys(sm_state + 0x7f58, env->cr[0]);
1936
1937 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1938 stl_phys(sm_state + 0x7f00, env->smbase);
1939#else
1940 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1941 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1942 stl_phys(sm_state + 0x7ff4, compute_eflags());
1943 stl_phys(sm_state + 0x7ff0, env->eip);
1944 stl_phys(sm_state + 0x7fec, EDI);
1945 stl_phys(sm_state + 0x7fe8, ESI);
1946 stl_phys(sm_state + 0x7fe4, EBP);
1947 stl_phys(sm_state + 0x7fe0, ESP);
1948 stl_phys(sm_state + 0x7fdc, EBX);
1949 stl_phys(sm_state + 0x7fd8, EDX);
1950 stl_phys(sm_state + 0x7fd4, ECX);
1951 stl_phys(sm_state + 0x7fd0, EAX);
1952 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1953 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1954
1955 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1956 stl_phys(sm_state + 0x7f64, env->tr.base);
1957 stl_phys(sm_state + 0x7f60, env->tr.limit);
1958 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1959
1960 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1961 stl_phys(sm_state + 0x7f80, env->ldt.base);
1962 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1963 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1964
1965 stl_phys(sm_state + 0x7f74, env->gdt.base);
1966 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1967
1968 stl_phys(sm_state + 0x7f58, env->idt.base);
1969 stl_phys(sm_state + 0x7f54, env->idt.limit);
1970
1971 for(i = 0; i < 6; i++) {
1972 dt = &env->segs[i];
1973 if (i < 3)
1974 offset = 0x7f84 + i * 12;
1975 else
1976 offset = 0x7f2c + (i - 3) * 12;
1977 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1978 stl_phys(sm_state + offset + 8, dt->base);
1979 stl_phys(sm_state + offset + 4, dt->limit);
1980 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1981 }
1982 stl_phys(sm_state + 0x7f14, env->cr[4]);
1983
1984 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1985 stl_phys(sm_state + 0x7ef8, env->smbase);
1986#endif
1987 /* init SMM cpu state */
1988
1989#ifdef TARGET_X86_64
1990 cpu_load_efer(env, 0);
1991#endif
1992 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1993 env->eip = 0x00008000;
1994 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1995 0xffffffff, 0);
1996 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1997 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1998 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1999 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
2000 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
2001
2002 cpu_x86_update_cr0(env,
2003 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
2004 cpu_x86_update_cr4(env, 0);
2005 env->dr[7] = 0x00000400;
2006 CC_OP = CC_OP_EFLAGS;
2007}
2008
2009void helper_rsm(void)
2010{
2011#ifdef VBOX
2012 cpu_abort(env, "helper_rsm");
2013#else /* !VBOX */
2014 target_ulong sm_state;
2015 int i, offset;
2016 uint32_t val;
2017
2018 sm_state = env->smbase + 0x8000;
2019#ifdef TARGET_X86_64
2020 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
2021
2022 for(i = 0; i < 6; i++) {
2023 offset = 0x7e00 + i * 16;
2024 cpu_x86_load_seg_cache(env, i,
2025 lduw_phys(sm_state + offset),
2026 ldq_phys(sm_state + offset + 8),
2027 ldl_phys(sm_state + offset + 4),
2028 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
2029 }
2030
2031 env->gdt.base = ldq_phys(sm_state + 0x7e68);
2032 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
2033
2034 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
2035 env->ldt.base = ldq_phys(sm_state + 0x7e78);
2036 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
2037 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
2038#ifdef VBOX
2039 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2040 env->ldt.newselector = 0;
2041#endif
2042
2043 env->idt.base = ldq_phys(sm_state + 0x7e88);
2044 env->idt.limit = ldl_phys(sm_state + 0x7e84);
2045
2046 env->tr.selector = lduw_phys(sm_state + 0x7e90);
2047 env->tr.base = ldq_phys(sm_state + 0x7e98);
2048 env->tr.limit = ldl_phys(sm_state + 0x7e94);
2049 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
2050#ifdef VBOX
2051 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2052 env->tr.newselector = 0;
2053#endif
2054
2055 EAX = ldq_phys(sm_state + 0x7ff8);
2056 ECX = ldq_phys(sm_state + 0x7ff0);
2057 EDX = ldq_phys(sm_state + 0x7fe8);
2058 EBX = ldq_phys(sm_state + 0x7fe0);
2059 ESP = ldq_phys(sm_state + 0x7fd8);
2060 EBP = ldq_phys(sm_state + 0x7fd0);
2061 ESI = ldq_phys(sm_state + 0x7fc8);
2062 EDI = ldq_phys(sm_state + 0x7fc0);
2063 for(i = 8; i < 16; i++)
2064 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2065 env->eip = ldq_phys(sm_state + 0x7f78);
2066 load_eflags(ldl_phys(sm_state + 0x7f70),
2067 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2068 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2069 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2070
2071 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2072 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2073 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2074
2075 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2076 if (val & 0x20000) {
2077 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2078 }
2079#else
2080 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2081 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2082 load_eflags(ldl_phys(sm_state + 0x7ff4),
2083 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2084 env->eip = ldl_phys(sm_state + 0x7ff0);
2085 EDI = ldl_phys(sm_state + 0x7fec);
2086 ESI = ldl_phys(sm_state + 0x7fe8);
2087 EBP = ldl_phys(sm_state + 0x7fe4);
2088 ESP = ldl_phys(sm_state + 0x7fe0);
2089 EBX = ldl_phys(sm_state + 0x7fdc);
2090 EDX = ldl_phys(sm_state + 0x7fd8);
2091 ECX = ldl_phys(sm_state + 0x7fd4);
2092 EAX = ldl_phys(sm_state + 0x7fd0);
2093 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2094 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2095
2096 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2097 env->tr.base = ldl_phys(sm_state + 0x7f64);
2098 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2099 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2100#ifdef VBOX
2101 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2102 env->tr.newselector = 0;
2103#endif
2104
2105 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2106 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2107 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2108 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2109#ifdef VBOX
2110 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2111 env->ldt.newselector = 0;
2112#endif
2113
2114 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2115 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2116
2117 env->idt.base = ldl_phys(sm_state + 0x7f58);
2118 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2119
2120 for(i = 0; i < 6; i++) {
2121 if (i < 3)
2122 offset = 0x7f84 + i * 12;
2123 else
2124 offset = 0x7f2c + (i - 3) * 12;
2125 cpu_x86_load_seg_cache(env, i,
2126 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2127 ldl_phys(sm_state + offset + 8),
2128 ldl_phys(sm_state + offset + 4),
2129 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2130 }
2131 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2132
2133 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2134 if (val & 0x20000) {
2135 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2136 }
2137#endif
2138 CC_OP = CC_OP_EFLAGS;
2139 env->hflags &= ~HF_SMM_MASK;
2140 cpu_smm_update(env);
2141
2142 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2143 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2144#endif /* !VBOX */
2145}
2146
2147#endif /* !CONFIG_USER_ONLY */
2148
2149
2150/* division, flags are undefined */
2151
2152void helper_divb_AL(target_ulong t0)
2153{
2154 unsigned int num, den, q, r;
2155
2156 num = (EAX & 0xffff);
2157 den = (t0 & 0xff);
2158 if (den == 0) {
2159 raise_exception(EXCP00_DIVZ);
2160 }
2161 q = (num / den);
2162 if (q > 0xff)
2163 raise_exception(EXCP00_DIVZ);
2164 q &= 0xff;
2165 r = (num % den) & 0xff;
2166 EAX = (EAX & ~0xffff) | (r << 8) | q;
2167}
2168
2169void helper_idivb_AL(target_ulong t0)
2170{
2171 int num, den, q, r;
2172
2173 num = (int16_t)EAX;
2174 den = (int8_t)t0;
2175 if (den == 0) {
2176 raise_exception(EXCP00_DIVZ);
2177 }
2178 q = (num / den);
2179 if (q != (int8_t)q)
2180 raise_exception(EXCP00_DIVZ);
2181 q &= 0xff;
2182 r = (num % den) & 0xff;
2183 EAX = (EAX & ~0xffff) | (r << 8) | q;
2184}
2185
2186void helper_divw_AX(target_ulong t0)
2187{
2188 unsigned int num, den, q, r;
2189
2190 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2191 den = (t0 & 0xffff);
2192 if (den == 0) {
2193 raise_exception(EXCP00_DIVZ);
2194 }
2195 q = (num / den);
2196 if (q > 0xffff)
2197 raise_exception(EXCP00_DIVZ);
2198 q &= 0xffff;
2199 r = (num % den) & 0xffff;
2200 EAX = (EAX & ~0xffff) | q;
2201 EDX = (EDX & ~0xffff) | r;
2202}
2203
2204void helper_idivw_AX(target_ulong t0)
2205{
2206 int num, den, q, r;
2207
2208 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2209 den = (int16_t)t0;
2210 if (den == 0) {
2211 raise_exception(EXCP00_DIVZ);
2212 }
2213 q = (num / den);
2214 if (q != (int16_t)q)
2215 raise_exception(EXCP00_DIVZ);
2216 q &= 0xffff;
2217 r = (num % den) & 0xffff;
2218 EAX = (EAX & ~0xffff) | q;
2219 EDX = (EDX & ~0xffff) | r;
2220}
2221
2222void helper_divl_EAX(target_ulong t0)
2223{
2224 unsigned int den, r;
2225 uint64_t num, q;
2226
2227 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2228 den = t0;
2229 if (den == 0) {
2230 raise_exception(EXCP00_DIVZ);
2231 }
2232 q = (num / den);
2233 r = (num % den);
2234 if (q > 0xffffffff)
2235 raise_exception(EXCP00_DIVZ);
2236 EAX = (uint32_t)q;
2237 EDX = (uint32_t)r;
2238}
2239
2240void helper_idivl_EAX(target_ulong t0)
2241{
2242 int den, r;
2243 int64_t num, q;
2244
2245 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2246 den = t0;
2247 if (den == 0) {
2248 raise_exception(EXCP00_DIVZ);
2249 }
2250 q = (num / den);
2251 r = (num % den);
2252 if (q != (int32_t)q)
2253 raise_exception(EXCP00_DIVZ);
2254 EAX = (uint32_t)q;
2255 EDX = (uint32_t)r;
2256}
2257
2258/* bcd */
2259
2260/* XXX: exception */
2261void helper_aam(int base)
2262{
2263 int al, ah;
2264 al = EAX & 0xff;
2265 ah = al / base;
2266 al = al % base;
2267 EAX = (EAX & ~0xffff) | al | (ah << 8);
2268 CC_DST = al;
2269}
2270
2271void helper_aad(int base)
2272{
2273 int al, ah;
2274 al = EAX & 0xff;
2275 ah = (EAX >> 8) & 0xff;
2276 al = ((ah * base) + al) & 0xff;
2277 EAX = (EAX & ~0xffff) | al;
2278 CC_DST = al;
2279}
2280
2281void helper_aaa(void)
2282{
2283 int icarry;
2284 int al, ah, af;
2285 int eflags;
2286
2287 eflags = helper_cc_compute_all(CC_OP);
2288 af = eflags & CC_A;
2289 al = EAX & 0xff;
2290 ah = (EAX >> 8) & 0xff;
2291
2292 icarry = (al > 0xf9);
2293 if (((al & 0x0f) > 9 ) || af) {
2294 al = (al + 6) & 0x0f;
2295 ah = (ah + 1 + icarry) & 0xff;
2296 eflags |= CC_C | CC_A;
2297 } else {
2298 eflags &= ~(CC_C | CC_A);
2299 al &= 0x0f;
2300 }
2301 EAX = (EAX & ~0xffff) | al | (ah << 8);
2302 CC_SRC = eflags;
2303}
2304
2305void helper_aas(void)
2306{
2307 int icarry;
2308 int al, ah, af;
2309 int eflags;
2310
2311 eflags = helper_cc_compute_all(CC_OP);
2312 af = eflags & CC_A;
2313 al = EAX & 0xff;
2314 ah = (EAX >> 8) & 0xff;
2315
2316 icarry = (al < 6);
2317 if (((al & 0x0f) > 9 ) || af) {
2318 al = (al - 6) & 0x0f;
2319 ah = (ah - 1 - icarry) & 0xff;
2320 eflags |= CC_C | CC_A;
2321 } else {
2322 eflags &= ~(CC_C | CC_A);
2323 al &= 0x0f;
2324 }
2325 EAX = (EAX & ~0xffff) | al | (ah << 8);
2326 CC_SRC = eflags;
2327}
2328
2329void helper_daa(void)
2330{
2331 int al, af, cf;
2332 int eflags;
2333
2334 eflags = helper_cc_compute_all(CC_OP);
2335 cf = eflags & CC_C;
2336 af = eflags & CC_A;
2337 al = EAX & 0xff;
2338
2339 eflags = 0;
2340 if (((al & 0x0f) > 9 ) || af) {
2341 al = (al + 6) & 0xff;
2342 eflags |= CC_A;
2343 }
2344 if ((al > 0x9f) || cf) {
2345 al = (al + 0x60) & 0xff;
2346 eflags |= CC_C;
2347 }
2348 EAX = (EAX & ~0xff) | al;
2349 /* well, speed is not an issue here, so we compute the flags by hand */
2350 eflags |= (al == 0) << 6; /* zf */
2351 eflags |= parity_table[al]; /* pf */
2352 eflags |= (al & 0x80); /* sf */
2353 CC_SRC = eflags;
2354}
2355
2356void helper_das(void)
2357{
2358 int al, al1, af, cf;
2359 int eflags;
2360
2361 eflags = helper_cc_compute_all(CC_OP);
2362 cf = eflags & CC_C;
2363 af = eflags & CC_A;
2364 al = EAX & 0xff;
2365
2366 eflags = 0;
2367 al1 = al;
2368 if (((al & 0x0f) > 9 ) || af) {
2369 eflags |= CC_A;
2370 if (al < 6 || cf)
2371 eflags |= CC_C;
2372 al = (al - 6) & 0xff;
2373 }
2374 if ((al1 > 0x99) || cf) {
2375 al = (al - 0x60) & 0xff;
2376 eflags |= CC_C;
2377 }
2378 EAX = (EAX & ~0xff) | al;
2379 /* well, speed is not an issue here, so we compute the flags by hand */
2380 eflags |= (al == 0) << 6; /* zf */
2381 eflags |= parity_table[al]; /* pf */
2382 eflags |= (al & 0x80); /* sf */
2383 CC_SRC = eflags;
2384}
2385
2386void helper_into(int next_eip_addend)
2387{
2388 int eflags;
2389 eflags = helper_cc_compute_all(CC_OP);
2390 if (eflags & CC_O) {
2391 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2392 }
2393}
2394
2395void helper_cmpxchg8b(target_ulong a0)
2396{
2397 uint64_t d;
2398 int eflags;
2399
2400 eflags = helper_cc_compute_all(CC_OP);
2401 d = ldq(a0);
2402 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2403 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2404 eflags |= CC_Z;
2405 } else {
2406 /* always do the store */
2407 stq(a0, d);
2408 EDX = (uint32_t)(d >> 32);
2409 EAX = (uint32_t)d;
2410 eflags &= ~CC_Z;
2411 }
2412 CC_SRC = eflags;
2413}
2414
2415#ifdef TARGET_X86_64
2416void helper_cmpxchg16b(target_ulong a0)
2417{
2418 uint64_t d0, d1;
2419 int eflags;
2420
2421 if ((a0 & 0xf) != 0)
2422 raise_exception(EXCP0D_GPF);
2423 eflags = helper_cc_compute_all(CC_OP);
2424 d0 = ldq(a0);
2425 d1 = ldq(a0 + 8);
2426 if (d0 == EAX && d1 == EDX) {
2427 stq(a0, EBX);
2428 stq(a0 + 8, ECX);
2429 eflags |= CC_Z;
2430 } else {
2431 /* always do the store */
2432 stq(a0, d0);
2433 stq(a0 + 8, d1);
2434 EDX = d1;
2435 EAX = d0;
2436 eflags &= ~CC_Z;
2437 }
2438 CC_SRC = eflags;
2439}
2440#endif
2441
2442void helper_single_step(void)
2443{
2444#ifndef CONFIG_USER_ONLY
2445 check_hw_breakpoints(env, 1);
2446 env->dr[6] |= DR6_BS;
2447#endif
2448 raise_exception(EXCP01_DB);
2449}
2450
2451void helper_cpuid(void)
2452{
2453 uint32_t eax, ebx, ecx, edx;
2454
2455 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2456
2457 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2458 EAX = eax;
2459 EBX = ebx;
2460 ECX = ecx;
2461 EDX = edx;
2462}
2463
2464void helper_enter_level(int level, int data32, target_ulong t1)
2465{
2466 target_ulong ssp;
2467 uint32_t esp_mask, esp, ebp;
2468
2469 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2470 ssp = env->segs[R_SS].base;
2471 ebp = EBP;
2472 esp = ESP;
2473 if (data32) {
2474 /* 32 bit */
2475 esp -= 4;
2476 while (--level) {
2477 esp -= 4;
2478 ebp -= 4;
2479 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2480 }
2481 esp -= 4;
2482 stl(ssp + (esp & esp_mask), t1);
2483 } else {
2484 /* 16 bit */
2485 esp -= 2;
2486 while (--level) {
2487 esp -= 2;
2488 ebp -= 2;
2489 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2490 }
2491 esp -= 2;
2492 stw(ssp + (esp & esp_mask), t1);
2493 }
2494}
2495
2496#ifdef TARGET_X86_64
2497void helper_enter64_level(int level, int data64, target_ulong t1)
2498{
2499 target_ulong esp, ebp;
2500 ebp = EBP;
2501 esp = ESP;
2502
2503 if (data64) {
2504 /* 64 bit */
2505 esp -= 8;
2506 while (--level) {
2507 esp -= 8;
2508 ebp -= 8;
2509 stq(esp, ldq(ebp));
2510 }
2511 esp -= 8;
2512 stq(esp, t1);
2513 } else {
2514 /* 16 bit */
2515 esp -= 2;
2516 while (--level) {
2517 esp -= 2;
2518 ebp -= 2;
2519 stw(esp, lduw(ebp));
2520 }
2521 esp -= 2;
2522 stw(esp, t1);
2523 }
2524}
2525#endif
2526
2527void helper_lldt(int selector)
2528{
2529 SegmentCache *dt;
2530 uint32_t e1, e2;
2531#ifndef VBOX
2532 int index, entry_limit;
2533#else
2534 unsigned int index, entry_limit;
2535#endif
2536 target_ulong ptr;
2537
2538#ifdef VBOX
2539 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2540 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2541#endif
2542
2543 selector &= 0xffff;
2544 if ((selector & 0xfffc) == 0) {
2545 /* XXX: NULL selector case: invalid LDT */
2546 env->ldt.base = 0;
2547 env->ldt.limit = 0;
2548#ifdef VBOX
2549 env->ldt.flags = DESC_INTEL_UNUSABLE;
2550 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2551 env->ldt.newselector = 0;
2552#endif
2553 } else {
2554 if (selector & 0x4)
2555 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2556 dt = &env->gdt;
2557 index = selector & ~7;
2558#ifdef TARGET_X86_64
2559 if (env->hflags & HF_LMA_MASK)
2560 entry_limit = 15;
2561 else
2562#endif
2563 entry_limit = 7;
2564 if ((index + entry_limit) > dt->limit)
2565 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2566 ptr = dt->base + index;
2567 e1 = ldl_kernel(ptr);
2568 e2 = ldl_kernel(ptr + 4);
2569 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2570 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2571 if (!(e2 & DESC_P_MASK))
2572 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2573#ifdef TARGET_X86_64
2574 if (env->hflags & HF_LMA_MASK) {
2575 uint32_t e3;
2576 e3 = ldl_kernel(ptr + 8);
2577 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2578 env->ldt.base |= (target_ulong)e3 << 32;
2579 } else
2580#endif
2581 {
2582 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2583 }
2584 }
2585 env->ldt.selector = selector;
2586#ifdef VBOX
2587 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2588 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2589#endif
2590}
2591
2592void helper_ltr(int selector)
2593{
2594 SegmentCache *dt;
2595 uint32_t e1, e2;
2596#ifndef VBOX
2597 int index, type, entry_limit;
2598#else
2599 unsigned int index;
2600 int type, entry_limit;
2601#endif
2602 target_ulong ptr;
2603
2604#ifdef VBOX
2605 Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2606 (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2607 env->tr.flags, (RTSEL)(selector & 0xffff)));
2608#endif
2609 selector &= 0xffff;
2610 if ((selector & 0xfffc) == 0) {
2611 /* NULL selector case: invalid TR */
2612#ifdef VBOX
2613 raise_exception_err(EXCP0A_TSS, 0);
2614#else
2615 env->tr.base = 0;
2616 env->tr.limit = 0;
2617 env->tr.flags = 0;
2618#endif
2619 } else {
2620 if (selector & 0x4)
2621 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2622 dt = &env->gdt;
2623 index = selector & ~7;
2624#ifdef TARGET_X86_64
2625 if (env->hflags & HF_LMA_MASK)
2626 entry_limit = 15;
2627 else
2628#endif
2629 entry_limit = 7;
2630 if ((index + entry_limit) > dt->limit)
2631 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2632 ptr = dt->base + index;
2633 e1 = ldl_kernel(ptr);
2634 e2 = ldl_kernel(ptr + 4);
2635 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2636 if ((e2 & DESC_S_MASK) ||
2637 (type != 1 && type != 9))
2638 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2639 if (!(e2 & DESC_P_MASK))
2640 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2641#ifdef TARGET_X86_64
2642 if (env->hflags & HF_LMA_MASK) {
2643 uint32_t e3, e4;
2644 e3 = ldl_kernel(ptr + 8);
2645 e4 = ldl_kernel(ptr + 12);
2646 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2647 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2648 load_seg_cache_raw_dt(&env->tr, e1, e2);
2649 env->tr.base |= (target_ulong)e3 << 32;
2650 } else
2651#endif
2652 {
2653 load_seg_cache_raw_dt(&env->tr, e1, e2);
2654 }
2655 e2 |= DESC_TSS_BUSY_MASK;
2656 stl_kernel(ptr + 4, e2);
2657 }
2658 env->tr.selector = selector;
2659#ifdef VBOX
2660 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2661 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2662 env->tr.flags, (RTSEL)(selector & 0xffff)));
2663#endif
2664}
2665
2666/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2667void helper_load_seg(int seg_reg, int selector)
2668{
2669 uint32_t e1, e2;
2670 int cpl, dpl, rpl;
2671 SegmentCache *dt;
2672#ifndef VBOX
2673 int index;
2674#else
2675 unsigned int index;
2676#endif
2677 target_ulong ptr;
2678
2679 selector &= 0xffff;
2680 cpl = env->hflags & HF_CPL_MASK;
2681#ifdef VBOX
2682
2683 /* Trying to load a selector with CPL=1? */
2684 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2685 {
2686 Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc));
2687 selector = selector & 0xfffc;
2688 }
2689#endif /* VBOX */
2690 if ((selector & 0xfffc) == 0) {
2691 /* null selector case */
2692#ifndef VBOX
2693 if (seg_reg == R_SS
2694#ifdef TARGET_X86_64
2695 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2696#endif
2697 )
2698 raise_exception_err(EXCP0D_GPF, 0);
2699 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2700#else
2701 if (seg_reg == R_SS) {
2702 if (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2703 raise_exception_err(EXCP0D_GPF, 0);
2704 e2 = (cpl << DESC_DPL_SHIFT) | DESC_INTEL_UNUSABLE;
2705 } else {
2706 e2 = DESC_INTEL_UNUSABLE;
2707 }
2708 cpu_x86_load_seg_cache_with_clean_flags(env, seg_reg, selector, 0, 0, e2);
2709#endif
2710 } else {
2711
2712 if (selector & 0x4)
2713 dt = &env->ldt;
2714 else
2715 dt = &env->gdt;
2716 index = selector & ~7;
2717 if ((index + 7) > dt->limit)
2718 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2719 ptr = dt->base + index;
2720 e1 = ldl_kernel(ptr);
2721 e2 = ldl_kernel(ptr + 4);
2722
2723 if (!(e2 & DESC_S_MASK))
2724 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2725 rpl = selector & 3;
2726 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2727 if (seg_reg == R_SS) {
2728 /* must be writable segment */
2729 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2730 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2731 if (rpl != cpl || dpl != cpl)
2732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2733 } else {
2734 /* must be readable segment */
2735 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2736 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2737
2738 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2739 /* if not conforming code, test rights */
2740 if (dpl < cpl || dpl < rpl)
2741 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2742 }
2743 }
2744
2745 if (!(e2 & DESC_P_MASK)) {
2746 if (seg_reg == R_SS)
2747 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2748 else
2749 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2750 }
2751
2752 /* set the access bit if not already set */
2753 if (!(e2 & DESC_A_MASK)) {
2754 e2 |= DESC_A_MASK;
2755 stl_kernel(ptr + 4, e2);
2756 }
2757
2758 cpu_x86_load_seg_cache(env, seg_reg, selector,
2759 get_seg_base(e1, e2),
2760 get_seg_limit(e1, e2),
2761 e2);
2762#if 0
2763 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2764 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2765#endif
2766 }
2767}
2768
2769/* protected mode jump */
2770void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2771 int next_eip_addend)
2772{
2773 int gate_cs, type;
2774 uint32_t e1, e2, cpl, dpl, rpl, limit;
2775 target_ulong next_eip;
2776
2777#ifdef VBOX /** @todo Why do we do this? */
2778 e1 = e2 = 0;
2779#endif
2780 if ((new_cs & 0xfffc) == 0)
2781 raise_exception_err(EXCP0D_GPF, 0);
2782 if (load_segment(&e1, &e2, new_cs) != 0)
2783 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2784 cpl = env->hflags & HF_CPL_MASK;
2785 if (e2 & DESC_S_MASK) {
2786 if (!(e2 & DESC_CS_MASK))
2787 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2788 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2789 if (e2 & DESC_C_MASK) {
2790 /* conforming code segment */
2791 if (dpl > cpl)
2792 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2793 } else {
2794 /* non conforming code segment */
2795 rpl = new_cs & 3;
2796 if (rpl > cpl)
2797 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2798 if (dpl != cpl)
2799 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2800 }
2801 if (!(e2 & DESC_P_MASK))
2802 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2803 limit = get_seg_limit(e1, e2);
2804 if (new_eip > limit &&
2805 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2806 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2807#ifdef VBOX
2808 if (!(e2 & DESC_A_MASK))
2809 e2 = set_segment_accessed(new_cs, e2);
2810#endif
2811 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2812 get_seg_base(e1, e2), limit, e2);
2813 EIP = new_eip;
2814 } else {
2815 /* jump to call or task gate */
2816 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2817 rpl = new_cs & 3;
2818 cpl = env->hflags & HF_CPL_MASK;
2819 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2820 switch(type) {
2821 case 1: /* 286 TSS */
2822 case 9: /* 386 TSS */
2823 case 5: /* task gate */
2824 if (dpl < cpl || dpl < rpl)
2825 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2826 next_eip = env->eip + next_eip_addend;
2827 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2828 CC_OP = CC_OP_EFLAGS;
2829 break;
2830 case 4: /* 286 call gate */
2831 case 12: /* 386 call gate */
2832 if ((dpl < cpl) || (dpl < rpl))
2833 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2834 if (!(e2 & DESC_P_MASK))
2835 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2836 gate_cs = e1 >> 16;
2837 new_eip = (e1 & 0xffff);
2838 if (type == 12)
2839 new_eip |= (e2 & 0xffff0000);
2840 if (load_segment(&e1, &e2, gate_cs) != 0)
2841 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2842 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2843 /* must be code segment */
2844 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2845 (DESC_S_MASK | DESC_CS_MASK)))
2846 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2847 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2848 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2849 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2850 if (!(e2 & DESC_P_MASK))
2851#ifdef VBOX /* See page 3-514 of 253666.pdf */
2852 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2853#else
2854 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2855#endif
2856 limit = get_seg_limit(e1, e2);
2857 if (new_eip > limit)
2858 raise_exception_err(EXCP0D_GPF, 0);
2859 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2860 get_seg_base(e1, e2), limit, e2);
2861 EIP = new_eip;
2862 break;
2863 default:
2864 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2865 break;
2866 }
2867 }
2868}
2869
2870/* real mode call */
2871void helper_lcall_real(int new_cs, target_ulong new_eip1,
2872 int shift, int next_eip)
2873{
2874 int new_eip;
2875 uint32_t esp, esp_mask;
2876 target_ulong ssp;
2877
2878 new_eip = new_eip1;
2879 esp = ESP;
2880 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2881 ssp = env->segs[R_SS].base;
2882 if (shift) {
2883 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2884 PUSHL(ssp, esp, esp_mask, next_eip);
2885 } else {
2886 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2887 PUSHW(ssp, esp, esp_mask, next_eip);
2888 }
2889
2890 SET_ESP(esp, esp_mask);
2891 env->eip = new_eip;
2892 env->segs[R_CS].selector = new_cs;
2893 env->segs[R_CS].base = (new_cs << 4);
2894}
2895
2896/* protected mode call */
2897void helper_lcall_protected(int new_cs, target_ulong new_eip,
2898 int shift, int next_eip_addend)
2899{
2900 int new_stack, i;
2901 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2902 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2903 uint32_t val, limit, old_sp_mask;
2904 target_ulong ssp, old_ssp, next_eip;
2905
2906#ifdef VBOX /** @todo Why do we do this? */
2907 e1 = e2 = 0;
2908#endif
2909 next_eip = env->eip + next_eip_addend;
2910 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2911 LOG_PCALL_STATE(env);
2912 if ((new_cs & 0xfffc) == 0)
2913 raise_exception_err(EXCP0D_GPF, 0);
2914 if (load_segment(&e1, &e2, new_cs) != 0)
2915 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2916 cpl = env->hflags & HF_CPL_MASK;
2917 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2918 if (e2 & DESC_S_MASK) {
2919 if (!(e2 & DESC_CS_MASK))
2920 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2921 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2922 if (e2 & DESC_C_MASK) {
2923 /* conforming code segment */
2924 if (dpl > cpl)
2925 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2926 } else {
2927 /* non conforming code segment */
2928 rpl = new_cs & 3;
2929 if (rpl > cpl)
2930 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2931 if (dpl != cpl)
2932 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2933 }
2934 if (!(e2 & DESC_P_MASK))
2935 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2936#ifdef VBOX
2937 if (!(e2 & DESC_A_MASK))
2938 e2 = set_segment_accessed(new_cs, e2);
2939#endif
2940
2941#ifdef TARGET_X86_64
2942 /* XXX: check 16/32 bit cases in long mode */
2943 if (shift == 2) {
2944 target_ulong rsp;
2945 /* 64 bit case */
2946 rsp = ESP;
2947 PUSHQ(rsp, env->segs[R_CS].selector);
2948 PUSHQ(rsp, next_eip);
2949 /* from this point, not restartable */
2950 ESP = rsp;
2951 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2952 get_seg_base(e1, e2),
2953 get_seg_limit(e1, e2), e2);
2954 EIP = new_eip;
2955 } else
2956#endif
2957 {
2958 sp = ESP;
2959 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2960 ssp = env->segs[R_SS].base;
2961 if (shift) {
2962 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2963 PUSHL(ssp, sp, sp_mask, next_eip);
2964 } else {
2965 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2966 PUSHW(ssp, sp, sp_mask, next_eip);
2967 }
2968
2969 limit = get_seg_limit(e1, e2);
2970 if (new_eip > limit)
2971 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2972 /* from this point, not restartable */
2973 SET_ESP(sp, sp_mask);
2974 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2975 get_seg_base(e1, e2), limit, e2);
2976 EIP = new_eip;
2977 }
2978 } else {
2979 /* check gate type */
2980 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2981 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2982 rpl = new_cs & 3;
2983 switch(type) {
2984 case 1: /* available 286 TSS */
2985 case 9: /* available 386 TSS */
2986 case 5: /* task gate */
2987 if (dpl < cpl || dpl < rpl)
2988 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2989 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2990 CC_OP = CC_OP_EFLAGS;
2991 return;
2992 case 4: /* 286 call gate */
2993 case 12: /* 386 call gate */
2994 break;
2995 default:
2996 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2997 break;
2998 }
2999 shift = type >> 3;
3000
3001 if (dpl < cpl || dpl < rpl)
3002 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3003 /* check valid bit */
3004 if (!(e2 & DESC_P_MASK))
3005 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3006 selector = e1 >> 16;
3007 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
3008 param_count = e2 & 0x1f;
3009 if ((selector & 0xfffc) == 0)
3010 raise_exception_err(EXCP0D_GPF, 0);
3011
3012 if (load_segment(&e1, &e2, selector) != 0)
3013 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3014 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
3015 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3016 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3017 if (dpl > cpl)
3018 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3019 if (!(e2 & DESC_P_MASK))
3020 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3021
3022 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3023 /* to inner privilege */
3024 get_ss_esp_from_tss(&ss, &sp, dpl);
3025 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3026 ss, sp, param_count, ESP);
3027 if ((ss & 0xfffc) == 0)
3028 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3029 if ((ss & 3) != dpl)
3030 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3031 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3032 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3033 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3034 if (ss_dpl != dpl)
3035 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3036 if (!(ss_e2 & DESC_S_MASK) ||
3037 (ss_e2 & DESC_CS_MASK) ||
3038 !(ss_e2 & DESC_W_MASK))
3039 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3040 if (!(ss_e2 & DESC_P_MASK))
3041#ifdef VBOX /* See page 3-99 of 253666.pdf */
3042 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3043#else
3044 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3045#endif
3046
3047 // push_size = ((param_count * 2) + 8) << shift;
3048
3049 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3050 old_ssp = env->segs[R_SS].base;
3051
3052 sp_mask = get_sp_mask(ss_e2);
3053 ssp = get_seg_base(ss_e1, ss_e2);
3054 if (shift) {
3055 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3056 PUSHL(ssp, sp, sp_mask, ESP);
3057 for(i = param_count - 1; i >= 0; i--) {
3058 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3059 PUSHL(ssp, sp, sp_mask, val);
3060 }
3061 } else {
3062 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3063 PUSHW(ssp, sp, sp_mask, ESP);
3064 for(i = param_count - 1; i >= 0; i--) {
3065 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3066 PUSHW(ssp, sp, sp_mask, val);
3067 }
3068 }
3069 new_stack = 1;
3070 } else {
3071 /* to same privilege */
3072 sp = ESP;
3073 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3074 ssp = env->segs[R_SS].base;
3075 // push_size = (4 << shift);
3076 new_stack = 0;
3077 }
3078
3079 if (shift) {
3080 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3081 PUSHL(ssp, sp, sp_mask, next_eip);
3082 } else {
3083 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3084 PUSHW(ssp, sp, sp_mask, next_eip);
3085 }
3086
3087 /* from this point, not restartable */
3088
3089 if (new_stack) {
3090 ss = (ss & ~3) | dpl;
3091 cpu_x86_load_seg_cache(env, R_SS, ss,
3092 ssp,
3093 get_seg_limit(ss_e1, ss_e2),
3094 ss_e2);
3095 }
3096
3097 selector = (selector & ~3) | dpl;
3098 cpu_x86_load_seg_cache(env, R_CS, selector,
3099 get_seg_base(e1, e2),
3100 get_seg_limit(e1, e2),
3101 e2);
3102 cpu_x86_set_cpl(env, dpl);
3103 SET_ESP(sp, sp_mask);
3104 EIP = offset;
3105 }
3106}
3107
3108/* real and vm86 mode iret */
3109void helper_iret_real(int shift)
3110{
3111 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3112 target_ulong ssp;
3113 int eflags_mask;
3114#ifdef VBOX
3115 bool fVME = false;
3116
3117 remR3TrapClear(env->pVM);
3118#endif /* VBOX */
3119
3120 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3121 sp = ESP;
3122 ssp = env->segs[R_SS].base;
3123 if (shift == 1) {
3124 /* 32 bits */
3125 POPL(ssp, sp, sp_mask, new_eip);
3126 POPL(ssp, sp, sp_mask, new_cs);
3127 new_cs &= 0xffff;
3128 POPL(ssp, sp, sp_mask, new_eflags);
3129 } else {
3130 /* 16 bits */
3131 POPW(ssp, sp, sp_mask, new_eip);
3132 POPW(ssp, sp, sp_mask, new_cs);
3133 POPW(ssp, sp, sp_mask, new_eflags);
3134 }
3135#ifdef VBOX
3136 if ( (env->eflags & VM_MASK)
3137 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3138 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3139 {
3140 fVME = true;
3141 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3142 /* if TF will be set -> #GP */
3143 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3144 || (new_eflags & TF_MASK))
3145 raise_exception(EXCP0D_GPF);
3146 }
3147#endif /* VBOX */
3148 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3149 env->segs[R_CS].selector = new_cs;
3150 env->segs[R_CS].base = (new_cs << 4);
3151 env->eip = new_eip;
3152#ifdef VBOX
3153 if (fVME)
3154 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3155 else
3156#endif
3157 if (env->eflags & VM_MASK)
3158 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3159 else
3160 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3161 if (shift == 0)
3162 eflags_mask &= 0xffff;
3163 load_eflags(new_eflags, eflags_mask);
3164 env->hflags2 &= ~HF2_NMI_MASK;
3165#ifdef VBOX
3166 if (fVME)
3167 {
3168 if (new_eflags & IF_MASK)
3169 env->eflags |= VIF_MASK;
3170 else
3171 env->eflags &= ~VIF_MASK;
3172 }
3173#endif /* VBOX */
3174}
3175
3176static inline void validate_seg(int seg_reg, int cpl)
3177{
3178 int dpl;
3179 uint32_t e2;
3180
3181 /* XXX: on x86_64, we do not want to nullify FS and GS because
3182 they may still contain a valid base. I would be interested to
3183 know how a real x86_64 CPU behaves */
3184 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3185 (env->segs[seg_reg].selector & 0xfffc) == 0)
3186 return;
3187
3188 e2 = env->segs[seg_reg].flags;
3189 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3190 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3191 /* data or non conforming code segment */
3192 if (dpl < cpl) {
3193 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3194 }
3195 }
3196}
3197
3198/* protected mode iret */
3199static inline void helper_ret_protected(int shift, int is_iret, int addend)
3200{
3201 uint32_t new_cs, new_eflags, new_ss;
3202 uint32_t new_es, new_ds, new_fs, new_gs;
3203 uint32_t e1, e2, ss_e1, ss_e2;
3204 int cpl, dpl, rpl, eflags_mask, iopl;
3205 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3206
3207#ifdef VBOX /** @todo Why do we do this? */
3208 ss_e1 = ss_e2 = e1 = e2 = 0;
3209#endif
3210
3211#ifdef TARGET_X86_64
3212 if (shift == 2)
3213 sp_mask = -1;
3214 else
3215#endif
3216 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3217 sp = ESP;
3218 ssp = env->segs[R_SS].base;
3219 new_eflags = 0; /* avoid warning */
3220#ifdef TARGET_X86_64
3221 if (shift == 2) {
3222 POPQ(sp, new_eip);
3223 POPQ(sp, new_cs);
3224 new_cs &= 0xffff;
3225 if (is_iret) {
3226 POPQ(sp, new_eflags);
3227 }
3228 } else
3229#endif
3230 if (shift == 1) {
3231 /* 32 bits */
3232 POPL(ssp, sp, sp_mask, new_eip);
3233 POPL(ssp, sp, sp_mask, new_cs);
3234 new_cs &= 0xffff;
3235 if (is_iret) {
3236 POPL(ssp, sp, sp_mask, new_eflags);
3237#define LOG_GROUP LOG_GROUP_REM
3238#if defined(VBOX) && defined(DEBUG)
3239 Log(("iret: new CS %04X (old=%x)\n", new_cs, env->segs[R_CS].selector));
3240 Log(("iret: new EIP %08X\n", (uint32_t)new_eip));
3241 Log(("iret: new EFLAGS %08X\n", new_eflags));
3242 Log(("iret: EAX=%08x\n", (uint32_t)EAX));
3243#endif
3244 if (new_eflags & VM_MASK)
3245 goto return_to_vm86;
3246 }
3247#ifdef VBOX
3248 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3249 {
3250 if ( !EMIsRawRing1Enabled(env->pVM)
3251 || env->segs[R_CS].selector == (new_cs & 0xfffc))
3252 {
3253 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
3254 new_cs = new_cs & 0xfffc;
3255 }
3256 else
3257 {
3258 /* Ugly assumption: assume a genuine switch to ring-1. */
3259 Log(("Genuine switch to ring-1 (iret)\n"));
3260 }
3261 }
3262 else if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))
3263 {
3264 Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));
3265 new_cs = (new_cs & 0xfffc) | 1;
3266 }
3267#endif
3268 } else {
3269 /* 16 bits */
3270 POPW(ssp, sp, sp_mask, new_eip);
3271 POPW(ssp, sp, sp_mask, new_cs);
3272 if (is_iret)
3273 POPW(ssp, sp, sp_mask, new_eflags);
3274 }
3275 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3276 new_cs, new_eip, shift, addend);
3277 LOG_PCALL_STATE(env);
3278 if ((new_cs & 0xfffc) == 0)
3279 {
3280#if defined(VBOX) && defined(DEBUG)
3281 Log(("new_cs & 0xfffc) == 0\n"));
3282#endif
3283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3284 }
3285 if (load_segment(&e1, &e2, new_cs) != 0)
3286 {
3287#if defined(VBOX) && defined(DEBUG)
3288 Log(("load_segment failed\n"));
3289#endif
3290 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3291 }
3292 if (!(e2 & DESC_S_MASK) ||
3293 !(e2 & DESC_CS_MASK))
3294 {
3295#if defined(VBOX) && defined(DEBUG)
3296 Log(("e2 mask %08x\n", e2));
3297#endif
3298 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3299 }
3300 cpl = env->hflags & HF_CPL_MASK;
3301 rpl = new_cs & 3;
3302 if (rpl < cpl)
3303 {
3304#if defined(VBOX) && defined(DEBUG)
3305 Log(("rpl < cpl (%d vs %d)\n", rpl, cpl));
3306#endif
3307 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3308 }
3309 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3310
3311 if (e2 & DESC_C_MASK) {
3312 if (dpl > rpl)
3313 {
3314#if defined(VBOX) && defined(DEBUG)
3315 Log(("dpl > rpl (%d vs %d)\n", dpl, rpl));
3316#endif
3317 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3318 }
3319 } else {
3320 if (dpl != rpl)
3321 {
3322#if defined(VBOX) && defined(DEBUG)
3323 Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2));
3324#endif
3325 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3326 }
3327 }
3328 if (!(e2 & DESC_P_MASK))
3329 {
3330#if defined(VBOX) && defined(DEBUG)
3331 Log(("DESC_P_MASK e2=%08x\n", e2));
3332#endif
3333 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3334 }
3335
3336 sp += addend;
3337 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3338 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3339 /* return to same privilege level */
3340#ifdef VBOX
3341 if (!(e2 & DESC_A_MASK))
3342 e2 = set_segment_accessed(new_cs, e2);
3343#endif
3344 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3345 get_seg_base(e1, e2),
3346 get_seg_limit(e1, e2),
3347 e2);
3348 } else {
3349 /* return to different privilege level */
3350#ifdef TARGET_X86_64
3351 if (shift == 2) {
3352 POPQ(sp, new_esp);
3353 POPQ(sp, new_ss);
3354 new_ss &= 0xffff;
3355 } else
3356#endif
3357 if (shift == 1) {
3358 /* 32 bits */
3359 POPL(ssp, sp, sp_mask, new_esp);
3360 POPL(ssp, sp, sp_mask, new_ss);
3361 new_ss &= 0xffff;
3362 } else {
3363 /* 16 bits */
3364 POPW(ssp, sp, sp_mask, new_esp);
3365 POPW(ssp, sp, sp_mask, new_ss);
3366 }
3367 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3368 new_ss, new_esp);
3369 if ((new_ss & 0xfffc) == 0) {
3370#ifdef TARGET_X86_64
3371 /* NULL ss is allowed in long mode if cpl != 3*/
3372# ifndef VBOX
3373 /* XXX: test CS64 ? */
3374 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3375 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3376 0, 0xffffffff,
3377 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3378 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3379 DESC_W_MASK | DESC_A_MASK);
3380 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3381 } else
3382# else /* VBOX */
3383 if ((env->hflags & HF_LMA_MASK) && rpl != 3 && (e2 & DESC_L_MASK)) {
3384 if (!(e2 & DESC_A_MASK))
3385 e2 = set_segment_accessed(new_cs, e2);
3386 cpu_x86_load_seg_cache_with_clean_flags(env, R_SS, new_ss,
3387 0, 0xffffffff,
3388 DESC_INTEL_UNUSABLE | (rpl << DESC_DPL_SHIFT) );
3389 ss_e2 = DESC_B_MASK; /* not really used */
3390 } else
3391# endif
3392#endif
3393 {
3394#if defined(VBOX) && defined(DEBUG)
3395 Log(("NULL ss, rpl=%d\n", rpl));
3396#endif
3397 raise_exception_err(EXCP0D_GPF, 0);
3398 }
3399 } else {
3400 if ((new_ss & 3) != rpl)
3401 {
3402#if defined(VBOX) && defined(DEBUG)
3403 Log(("new_ss=%x != rpl=%d\n", new_ss, rpl));
3404#endif
3405 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3406 }
3407 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3408 {
3409#if defined(VBOX) && defined(DEBUG)
3410 Log(("new_ss=%x load error\n", new_ss));
3411#endif
3412 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3413 }
3414 if (!(ss_e2 & DESC_S_MASK) ||
3415 (ss_e2 & DESC_CS_MASK) ||
3416 !(ss_e2 & DESC_W_MASK))
3417 {
3418#if defined(VBOX) && defined(DEBUG)
3419 Log(("new_ss=%x ss_e2=%#x bad type\n", new_ss, ss_e2));
3420#endif
3421 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3422 }
3423 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3424 if (dpl != rpl)
3425 {
3426#if defined(VBOX) && defined(DEBUG)
3427 Log(("SS.dpl=%u != rpl=%u\n", dpl, rpl));
3428#endif
3429 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3430 }
3431 if (!(ss_e2 & DESC_P_MASK))
3432 {
3433#if defined(VBOX) && defined(DEBUG)
3434 Log(("new_ss=%#x #NP\n", new_ss));
3435#endif
3436 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3437 }
3438#ifdef VBOX
3439 if (!(e2 & DESC_A_MASK))
3440 e2 = set_segment_accessed(new_cs, e2);
3441 if (!(ss_e2 & DESC_A_MASK))
3442 ss_e2 = set_segment_accessed(new_ss, ss_e2);
3443#endif
3444 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3445 get_seg_base(ss_e1, ss_e2),
3446 get_seg_limit(ss_e1, ss_e2),
3447 ss_e2);
3448 }
3449
3450 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3451 get_seg_base(e1, e2),
3452 get_seg_limit(e1, e2),
3453 e2);
3454 cpu_x86_set_cpl(env, rpl);
3455 sp = new_esp;
3456#ifdef TARGET_X86_64
3457 if (env->hflags & HF_CS64_MASK)
3458 sp_mask = -1;
3459 else
3460#endif
3461 sp_mask = get_sp_mask(ss_e2);
3462
3463 /* validate data segments */
3464 validate_seg(R_ES, rpl);
3465 validate_seg(R_DS, rpl);
3466 validate_seg(R_FS, rpl);
3467 validate_seg(R_GS, rpl);
3468
3469 sp += addend;
3470 }
3471 SET_ESP(sp, sp_mask);
3472 env->eip = new_eip;
3473 if (is_iret) {
3474 /* NOTE: 'cpl' is the _old_ CPL */
3475 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3476 if (cpl == 0)
3477#ifdef VBOX
3478 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3479#else
3480 eflags_mask |= IOPL_MASK;
3481#endif
3482 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3483 if (cpl <= iopl)
3484 eflags_mask |= IF_MASK;
3485 if (shift == 0)
3486 eflags_mask &= 0xffff;
3487 load_eflags(new_eflags, eflags_mask);
3488 }
3489 return;
3490
3491 return_to_vm86:
3492 POPL(ssp, sp, sp_mask, new_esp);
3493 POPL(ssp, sp, sp_mask, new_ss);
3494 POPL(ssp, sp, sp_mask, new_es);
3495 POPL(ssp, sp, sp_mask, new_ds);
3496 POPL(ssp, sp, sp_mask, new_fs);
3497 POPL(ssp, sp, sp_mask, new_gs);
3498
3499 /* modify processor state */
3500 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3501 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3502 load_seg_vm(R_CS, new_cs & 0xffff);
3503 cpu_x86_set_cpl(env, 3);
3504 load_seg_vm(R_SS, new_ss & 0xffff);
3505 load_seg_vm(R_ES, new_es & 0xffff);
3506 load_seg_vm(R_DS, new_ds & 0xffff);
3507 load_seg_vm(R_FS, new_fs & 0xffff);
3508 load_seg_vm(R_GS, new_gs & 0xffff);
3509
3510 env->eip = new_eip & 0xffff;
3511 ESP = new_esp;
3512}
3513
3514void helper_iret_protected(int shift, int next_eip)
3515{
3516 int tss_selector, type;
3517 uint32_t e1, e2;
3518
3519#ifdef VBOX
3520 Log(("iret (shift=%d new_eip=%#x)\n", shift, next_eip));
3521 e1 = e2 = 0; /** @todo Why do we do this? */
3522 remR3TrapClear(env->pVM);
3523#endif
3524
3525 /* specific case for TSS */
3526 if (env->eflags & NT_MASK) {
3527#ifdef TARGET_X86_64
3528 if (env->hflags & HF_LMA_MASK)
3529 {
3530#if defined(VBOX) && defined(DEBUG)
3531 Log(("eflags.NT=1 on iret in long mode\n"));
3532#endif
3533 raise_exception_err(EXCP0D_GPF, 0);
3534 }
3535#endif
3536 tss_selector = lduw_kernel(env->tr.base + 0);
3537 if (tss_selector & 4)
3538 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3539 if (load_segment(&e1, &e2, tss_selector) != 0)
3540 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3541 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3542 /* NOTE: we check both segment and busy TSS */
3543 if (type != 3)
3544 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3545 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3546 } else {
3547 helper_ret_protected(shift, 1, 0);
3548 }
3549 env->hflags2 &= ~HF2_NMI_MASK;
3550}
3551
3552void helper_lret_protected(int shift, int addend)
3553{
3554 helper_ret_protected(shift, 0, addend);
3555}
3556
3557void helper_sysenter(void)
3558{
3559 if (env->sysenter_cs == 0) {
3560 raise_exception_err(EXCP0D_GPF, 0);
3561 }
3562 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3563 cpu_x86_set_cpl(env, 0);
3564
3565#ifdef TARGET_X86_64
3566 if (env->hflags & HF_LMA_MASK) {
3567 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3568 0, 0xffffffff,
3569 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3570 DESC_S_MASK |
3571 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3572 } else
3573#endif
3574 {
3575 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3576 0, 0xffffffff,
3577 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3578 DESC_S_MASK |
3579 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3580 }
3581 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3582 0, 0xffffffff,
3583 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3584 DESC_S_MASK |
3585 DESC_W_MASK | DESC_A_MASK);
3586 ESP = env->sysenter_esp;
3587 EIP = env->sysenter_eip;
3588}
3589
3590void helper_sysexit(int dflag)
3591{
3592 int cpl;
3593
3594 cpl = env->hflags & HF_CPL_MASK;
3595 if (env->sysenter_cs == 0 || cpl != 0) {
3596 raise_exception_err(EXCP0D_GPF, 0);
3597 }
3598 cpu_x86_set_cpl(env, 3);
3599#ifdef TARGET_X86_64
3600 if (dflag == 2) {
3601 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3602 0, 0xffffffff,
3603 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3604 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3605 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3606 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3607 0, 0xffffffff,
3608 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3609 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3610 DESC_W_MASK | DESC_A_MASK);
3611 } else
3612#endif
3613 {
3614 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3615 0, 0xffffffff,
3616 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3617 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3618 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3619 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3620 0, 0xffffffff,
3621 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3622 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3623 DESC_W_MASK | DESC_A_MASK);
3624 }
3625 ESP = ECX;
3626 EIP = EDX;
3627}
3628
3629#if defined(CONFIG_USER_ONLY)
3630target_ulong helper_read_crN(int reg)
3631{
3632 return 0;
3633}
3634
3635void helper_write_crN(int reg, target_ulong t0)
3636{
3637}
3638
3639void helper_movl_drN_T0(int reg, target_ulong t0)
3640{
3641}
3642#else
3643target_ulong helper_read_crN(int reg)
3644{
3645 target_ulong val;
3646
3647 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3648 switch(reg) {
3649 default:
3650 val = env->cr[reg];
3651 break;
3652 case 8:
3653 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3654#ifndef VBOX
3655 val = cpu_get_apic_tpr(env->apic_state);
3656#else /* VBOX */
3657 val = cpu_get_apic_tpr(env);
3658#endif /* VBOX */
3659 } else {
3660 val = env->v_tpr;
3661 }
3662 break;
3663 }
3664 return val;
3665}
3666
3667void helper_write_crN(int reg, target_ulong t0)
3668{
3669 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3670 switch(reg) {
3671 case 0:
3672 cpu_x86_update_cr0(env, t0);
3673 break;
3674 case 3:
3675 cpu_x86_update_cr3(env, t0);
3676 break;
3677 case 4:
3678 cpu_x86_update_cr4(env, t0);
3679 break;
3680 case 8:
3681 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3682#ifndef VBOX
3683 cpu_set_apic_tpr(env->apic_state, t0);
3684#else /* VBOX */
3685 cpu_set_apic_tpr(env, t0);
3686#endif /* VBOX */
3687 }
3688 env->v_tpr = t0 & 0x0f;
3689 break;
3690 default:
3691 env->cr[reg] = t0;
3692 break;
3693 }
3694}
3695
3696void helper_movl_drN_T0(int reg, target_ulong t0)
3697{
3698 int i;
3699
3700 if (reg < 4) {
3701 hw_breakpoint_remove(env, reg);
3702 env->dr[reg] = t0;
3703 hw_breakpoint_insert(env, reg);
3704# ifndef VBOX
3705 } else if (reg == 7) {
3706# else
3707 } else if (reg == 7 || reg == 5) { /* (DR5 is an alias for DR7.) */
3708 if (t0 & X86_DR7_MBZ_MASK)
3709 raise_exception_err(EXCP0D_GPF, 0);
3710 t0 |= X86_DR7_RA1_MASK;
3711 t0 &= ~X86_DR7_RAZ_MASK;
3712# endif
3713 for (i = 0; i < 4; i++)
3714 hw_breakpoint_remove(env, i);
3715 env->dr[7] = t0;
3716 for (i = 0; i < 4; i++)
3717 hw_breakpoint_insert(env, i);
3718 } else {
3719# ifndef VBOX
3720 env->dr[reg] = t0;
3721# else
3722 if (t0 & X86_DR6_MBZ_MASK)
3723 raise_exception_err(EXCP0D_GPF, 0);
3724 t0 |= X86_DR6_RA1_MASK;
3725 t0 &= ~X86_DR6_RAZ_MASK;
3726 env->dr[6] = t0; /* (DR4 is an alias for DR6.) */
3727# endif
3728 }
3729}
3730#endif
3731
3732void helper_lmsw(target_ulong t0)
3733{
3734 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3735 if already set to one. */
3736 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3737 helper_write_crN(0, t0);
3738}
3739
3740void helper_clts(void)
3741{
3742 env->cr[0] &= ~CR0_TS_MASK;
3743 env->hflags &= ~HF_TS_MASK;
3744}
3745
3746void helper_invlpg(target_ulong addr)
3747{
3748 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3749 tlb_flush_page(env, addr);
3750}
3751
3752void helper_rdtsc(void)
3753{
3754 uint64_t val;
3755
3756 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3757 raise_exception(EXCP0D_GPF);
3758 }
3759 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3760
3761 val = cpu_get_tsc(env) + env->tsc_offset;
3762 EAX = (uint32_t)(val);
3763 EDX = (uint32_t)(val >> 32);
3764}
3765
3766void helper_rdtscp(void)
3767{
3768 helper_rdtsc();
3769#ifndef VBOX
3770 ECX = (uint32_t)(env->tsc_aux);
3771#else /* VBOX */
3772 uint64_t val;
3773 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3774 ECX = (uint32_t)(val);
3775 else
3776 ECX = 0;
3777#endif /* VBOX */
3778}
3779
3780void helper_rdpmc(void)
3781{
3782#ifdef VBOX
3783 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3784 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3785 raise_exception(EXCP0D_GPF);
3786 }
3787 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3788 EAX = 0;
3789 EDX = 0;
3790#else /* !VBOX */
3791 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3792 raise_exception(EXCP0D_GPF);
3793 }
3794 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3795
3796 /* currently unimplemented */
3797 raise_exception_err(EXCP06_ILLOP, 0);
3798#endif /* !VBOX */
3799}
3800
3801#if defined(CONFIG_USER_ONLY)
3802void helper_wrmsr(void)
3803{
3804}
3805
3806void helper_rdmsr(void)
3807{
3808}
3809#else
3810void helper_wrmsr(void)
3811{
3812 uint64_t val;
3813
3814 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3815
3816 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3817
3818 switch((uint32_t)ECX) {
3819 case MSR_IA32_SYSENTER_CS:
3820 env->sysenter_cs = val & 0xffff;
3821 break;
3822 case MSR_IA32_SYSENTER_ESP:
3823 env->sysenter_esp = val;
3824 break;
3825 case MSR_IA32_SYSENTER_EIP:
3826 env->sysenter_eip = val;
3827 break;
3828 case MSR_IA32_APICBASE:
3829# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3830 cpu_set_apic_base(env->apic_state, val);
3831# endif
3832 break;
3833 case MSR_EFER:
3834 {
3835 uint64_t update_mask;
3836 update_mask = 0;
3837 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3838 update_mask |= MSR_EFER_SCE;
3839 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3840 update_mask |= MSR_EFER_LME;
3841 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3842 update_mask |= MSR_EFER_FFXSR;
3843 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3844 update_mask |= MSR_EFER_NXE;
3845 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3846 update_mask |= MSR_EFER_SVME;
3847 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3848 update_mask |= MSR_EFER_FFXSR;
3849 cpu_load_efer(env, (env->efer & ~update_mask) |
3850 (val & update_mask));
3851 }
3852 break;
3853 case MSR_STAR:
3854 env->star = val;
3855 break;
3856 case MSR_PAT:
3857 env->pat = val;
3858 break;
3859 case MSR_VM_HSAVE_PA:
3860 env->vm_hsave = val;
3861 break;
3862#ifdef TARGET_X86_64
3863 case MSR_LSTAR:
3864 env->lstar = val;
3865 break;
3866 case MSR_CSTAR:
3867 env->cstar = val;
3868 break;
3869 case MSR_FMASK:
3870 env->fmask = val;
3871 break;
3872 case MSR_FSBASE:
3873 env->segs[R_FS].base = val;
3874 break;
3875 case MSR_GSBASE:
3876 env->segs[R_GS].base = val;
3877 break;
3878 case MSR_KERNELGSBASE:
3879 env->kernelgsbase = val;
3880 break;
3881#endif
3882# ifndef VBOX
3883 case MSR_MTRRphysBase(0):
3884 case MSR_MTRRphysBase(1):
3885 case MSR_MTRRphysBase(2):
3886 case MSR_MTRRphysBase(3):
3887 case MSR_MTRRphysBase(4):
3888 case MSR_MTRRphysBase(5):
3889 case MSR_MTRRphysBase(6):
3890 case MSR_MTRRphysBase(7):
3891 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3892 break;
3893 case MSR_MTRRphysMask(0):
3894 case MSR_MTRRphysMask(1):
3895 case MSR_MTRRphysMask(2):
3896 case MSR_MTRRphysMask(3):
3897 case MSR_MTRRphysMask(4):
3898 case MSR_MTRRphysMask(5):
3899 case MSR_MTRRphysMask(6):
3900 case MSR_MTRRphysMask(7):
3901 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3902 break;
3903 case MSR_MTRRfix64K_00000:
3904 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3905 break;
3906 case MSR_MTRRfix16K_80000:
3907 case MSR_MTRRfix16K_A0000:
3908 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3909 break;
3910 case MSR_MTRRfix4K_C0000:
3911 case MSR_MTRRfix4K_C8000:
3912 case MSR_MTRRfix4K_D0000:
3913 case MSR_MTRRfix4K_D8000:
3914 case MSR_MTRRfix4K_E0000:
3915 case MSR_MTRRfix4K_E8000:
3916 case MSR_MTRRfix4K_F0000:
3917 case MSR_MTRRfix4K_F8000:
3918 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3919 break;
3920 case MSR_MTRRdefType:
3921 env->mtrr_deftype = val;
3922 break;
3923 case MSR_MCG_STATUS:
3924 env->mcg_status = val;
3925 break;
3926 case MSR_MCG_CTL:
3927 if ((env->mcg_cap & MCG_CTL_P)
3928 && (val == 0 || val == ~(uint64_t)0))
3929 env->mcg_ctl = val;
3930 break;
3931 case MSR_TSC_AUX:
3932 env->tsc_aux = val;
3933 break;
3934# endif /* !VBOX */
3935 default:
3936# ifndef VBOX
3937 if ((uint32_t)ECX >= MSR_MC0_CTL
3938 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3939 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3940 if ((offset & 0x3) != 0
3941 || (val == 0 || val == ~(uint64_t)0))
3942 env->mce_banks[offset] = val;
3943 break;
3944 }
3945 /* XXX: exception ? */
3946# endif
3947 break;
3948 }
3949
3950# ifdef VBOX
3951 /* call CPUM. */
3952 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3953 {
3954 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3955 }
3956# endif
3957}
3958
3959void helper_rdmsr(void)
3960{
3961 uint64_t val;
3962
3963 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3964
3965 switch((uint32_t)ECX) {
3966 case MSR_IA32_SYSENTER_CS:
3967 val = env->sysenter_cs;
3968 break;
3969 case MSR_IA32_SYSENTER_ESP:
3970 val = env->sysenter_esp;
3971 break;
3972 case MSR_IA32_SYSENTER_EIP:
3973 val = env->sysenter_eip;
3974 break;
3975 case MSR_IA32_APICBASE:
3976#ifndef VBOX
3977 val = cpu_get_apic_base(env->apic_state);
3978#else /* VBOX */
3979 val = cpu_get_apic_base(env);
3980#endif /* VBOX */
3981 break;
3982 case MSR_EFER:
3983 val = env->efer;
3984 break;
3985 case MSR_STAR:
3986 val = env->star;
3987 break;
3988 case MSR_PAT:
3989 val = env->pat;
3990 break;
3991 case MSR_VM_HSAVE_PA:
3992 val = env->vm_hsave;
3993 break;
3994# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3995 case MSR_IA32_PERF_STATUS:
3996 /* tsc_increment_by_tick */
3997 val = 1000ULL;
3998 /* CPU multiplier */
3999 val |= (((uint64_t)4ULL) << 40);
4000 break;
4001# endif /* !VBOX */
4002#ifdef TARGET_X86_64
4003 case MSR_LSTAR:
4004 val = env->lstar;
4005 break;
4006 case MSR_CSTAR:
4007 val = env->cstar;
4008 break;
4009 case MSR_FMASK:
4010 val = env->fmask;
4011 break;
4012 case MSR_FSBASE:
4013 val = env->segs[R_FS].base;
4014 break;
4015 case MSR_GSBASE:
4016 val = env->segs[R_GS].base;
4017 break;
4018 case MSR_KERNELGSBASE:
4019 val = env->kernelgsbase;
4020 break;
4021# ifndef VBOX
4022 case MSR_TSC_AUX:
4023 val = env->tsc_aux;
4024 break;
4025# endif /*!VBOX*/
4026#endif
4027# ifndef VBOX
4028 case MSR_MTRRphysBase(0):
4029 case MSR_MTRRphysBase(1):
4030 case MSR_MTRRphysBase(2):
4031 case MSR_MTRRphysBase(3):
4032 case MSR_MTRRphysBase(4):
4033 case MSR_MTRRphysBase(5):
4034 case MSR_MTRRphysBase(6):
4035 case MSR_MTRRphysBase(7):
4036 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
4037 break;
4038 case MSR_MTRRphysMask(0):
4039 case MSR_MTRRphysMask(1):
4040 case MSR_MTRRphysMask(2):
4041 case MSR_MTRRphysMask(3):
4042 case MSR_MTRRphysMask(4):
4043 case MSR_MTRRphysMask(5):
4044 case MSR_MTRRphysMask(6):
4045 case MSR_MTRRphysMask(7):
4046 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
4047 break;
4048 case MSR_MTRRfix64K_00000:
4049 val = env->mtrr_fixed[0];
4050 break;
4051 case MSR_MTRRfix16K_80000:
4052 case MSR_MTRRfix16K_A0000:
4053 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
4054 break;
4055 case MSR_MTRRfix4K_C0000:
4056 case MSR_MTRRfix4K_C8000:
4057 case MSR_MTRRfix4K_D0000:
4058 case MSR_MTRRfix4K_D8000:
4059 case MSR_MTRRfix4K_E0000:
4060 case MSR_MTRRfix4K_E8000:
4061 case MSR_MTRRfix4K_F0000:
4062 case MSR_MTRRfix4K_F8000:
4063 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
4064 break;
4065 case MSR_MTRRdefType:
4066 val = env->mtrr_deftype;
4067 break;
4068 case MSR_MTRRcap:
4069 if (env->cpuid_features & CPUID_MTRR)
4070 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
4071 else
4072 /* XXX: exception ? */
4073 val = 0;
4074 break;
4075 case MSR_MCG_CAP:
4076 val = env->mcg_cap;
4077 break;
4078 case MSR_MCG_CTL:
4079 if (env->mcg_cap & MCG_CTL_P)
4080 val = env->mcg_ctl;
4081 else
4082 val = 0;
4083 break;
4084 case MSR_MCG_STATUS:
4085 val = env->mcg_status;
4086 break;
4087# endif /* !VBOX */
4088 default:
4089# ifndef VBOX
4090 if ((uint32_t)ECX >= MSR_MC0_CTL
4091 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
4092 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
4093 val = env->mce_banks[offset];
4094 break;
4095 }
4096 /* XXX: exception ? */
4097 val = 0;
4098# else /* VBOX */
4099 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4100 {
4101 /** @todo be a brave man and raise a \#GP(0) here as we should... */
4102 val = 0;
4103 }
4104# endif /* VBOX */
4105 break;
4106 }
4107 EAX = (uint32_t)(val);
4108 EDX = (uint32_t)(val >> 32);
4109
4110# ifdef VBOX_STRICT
4111 if ((uint32_t)ECX != MSR_IA32_TSC) {
4112 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4113 val = 0;
4114 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
4115 }
4116# endif
4117}
4118#endif
4119
4120target_ulong helper_lsl(target_ulong selector1)
4121{
4122 unsigned int limit;
4123 uint32_t e1, e2, eflags, selector;
4124 int rpl, dpl, cpl, type;
4125
4126 selector = selector1 & 0xffff;
4127 eflags = helper_cc_compute_all(CC_OP);
4128 if ((selector & 0xfffc) == 0)
4129 goto fail;
4130 if (load_segment(&e1, &e2, selector) != 0)
4131 goto fail;
4132 rpl = selector & 3;
4133 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4134 cpl = env->hflags & HF_CPL_MASK;
4135 if (e2 & DESC_S_MASK) {
4136 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4137 /* conforming */
4138 } else {
4139 if (dpl < cpl || dpl < rpl)
4140 goto fail;
4141 }
4142 } else {
4143 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4144 switch(type) {
4145 case 1:
4146 case 2:
4147 case 3:
4148 case 9:
4149 case 11:
4150 break;
4151 default:
4152 goto fail;
4153 }
4154 if (dpl < cpl || dpl < rpl) {
4155 fail:
4156 CC_SRC = eflags & ~CC_Z;
4157 return 0;
4158 }
4159 }
4160 limit = get_seg_limit(e1, e2);
4161 CC_SRC = eflags | CC_Z;
4162 return limit;
4163}
4164
4165target_ulong helper_lar(target_ulong selector1)
4166{
4167 uint32_t e1, e2, eflags, selector;
4168 int rpl, dpl, cpl, type;
4169
4170 selector = selector1 & 0xffff;
4171 eflags = helper_cc_compute_all(CC_OP);
4172 if ((selector & 0xfffc) == 0)
4173 goto fail;
4174 if (load_segment(&e1, &e2, selector) != 0)
4175 goto fail;
4176 rpl = selector & 3;
4177 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4178 cpl = env->hflags & HF_CPL_MASK;
4179 if (e2 & DESC_S_MASK) {
4180 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4181 /* conforming */
4182 } else {
4183 if (dpl < cpl || dpl < rpl)
4184 goto fail;
4185 }
4186 } else {
4187 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4188 switch(type) {
4189 case 1:
4190 case 2:
4191 case 3:
4192 case 4:
4193 case 5:
4194 case 9:
4195 case 11:
4196 case 12:
4197 break;
4198 default:
4199 goto fail;
4200 }
4201 if (dpl < cpl || dpl < rpl) {
4202 fail:
4203 CC_SRC = eflags & ~CC_Z;
4204 return 0;
4205 }
4206 }
4207 CC_SRC = eflags | CC_Z;
4208#ifdef VBOX /* AMD says 0x00ffff00, while intel says 0x00fxff00. Bochs and IEM does like AMD says (x=f). */
4209 return e2 & 0x00ffff00;
4210#else
4211 return e2 & 0x00f0ff00;
4212#endif
4213}
4214
4215void helper_verr(target_ulong selector1)
4216{
4217 uint32_t e1, e2, eflags, selector;
4218 int rpl, dpl, cpl;
4219
4220 selector = selector1 & 0xffff;
4221 eflags = helper_cc_compute_all(CC_OP);
4222 if ((selector & 0xfffc) == 0)
4223 goto fail;
4224 if (load_segment(&e1, &e2, selector) != 0)
4225 goto fail;
4226 if (!(e2 & DESC_S_MASK))
4227 goto fail;
4228 rpl = selector & 3;
4229 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4230 cpl = env->hflags & HF_CPL_MASK;
4231 if (e2 & DESC_CS_MASK) {
4232 if (!(e2 & DESC_R_MASK))
4233 goto fail;
4234 if (!(e2 & DESC_C_MASK)) {
4235 if (dpl < cpl || dpl < rpl)
4236 goto fail;
4237 }
4238 } else {
4239 if (dpl < cpl || dpl < rpl) {
4240 fail:
4241 CC_SRC = eflags & ~CC_Z;
4242 return;
4243 }
4244 }
4245 CC_SRC = eflags | CC_Z;
4246}
4247
4248void helper_verw(target_ulong selector1)
4249{
4250 uint32_t e1, e2, eflags, selector;
4251 int rpl, dpl, cpl;
4252
4253 selector = selector1 & 0xffff;
4254 eflags = helper_cc_compute_all(CC_OP);
4255 if ((selector & 0xfffc) == 0)
4256 goto fail;
4257 if (load_segment(&e1, &e2, selector) != 0)
4258 goto fail;
4259 if (!(e2 & DESC_S_MASK))
4260 goto fail;
4261 rpl = selector & 3;
4262 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4263 cpl = env->hflags & HF_CPL_MASK;
4264 if (e2 & DESC_CS_MASK) {
4265 goto fail;
4266 } else {
4267 if (dpl < cpl || dpl < rpl)
4268 goto fail;
4269 if (!(e2 & DESC_W_MASK)) {
4270 fail:
4271 CC_SRC = eflags & ~CC_Z;
4272 return;
4273 }
4274 }
4275 CC_SRC = eflags | CC_Z;
4276}
4277
4278/* x87 FPU helpers */
4279
4280static void fpu_set_exception(int mask)
4281{
4282 env->fpus |= mask;
4283 if (env->fpus & (~env->fpuc & FPUC_EM))
4284 env->fpus |= FPUS_SE | FPUS_B;
4285}
4286
4287static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4288{
4289 if (b == 0.0)
4290 fpu_set_exception(FPUS_ZE);
4291 return a / b;
4292}
4293
4294static void fpu_raise_exception(void)
4295{
4296 if (env->cr[0] & CR0_NE_MASK) {
4297 raise_exception(EXCP10_COPR);
4298 }
4299#if !defined(CONFIG_USER_ONLY)
4300 else {
4301 cpu_set_ferr(env);
4302 }
4303#endif
4304}
4305
4306void helper_flds_FT0(uint32_t val)
4307{
4308 union {
4309 float32 f;
4310 uint32_t i;
4311 } u;
4312 u.i = val;
4313 FT0 = float32_to_floatx(u.f, &env->fp_status);
4314}
4315
4316void helper_fldl_FT0(uint64_t val)
4317{
4318 union {
4319 float64 f;
4320 uint64_t i;
4321 } u;
4322 u.i = val;
4323 FT0 = float64_to_floatx(u.f, &env->fp_status);
4324}
4325
4326void helper_fildl_FT0(int32_t val)
4327{
4328 FT0 = int32_to_floatx(val, &env->fp_status);
4329}
4330
4331void helper_flds_ST0(uint32_t val)
4332{
4333 int new_fpstt;
4334 union {
4335 float32 f;
4336 uint32_t i;
4337 } u;
4338 new_fpstt = (env->fpstt - 1) & 7;
4339 u.i = val;
4340 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4341 env->fpstt = new_fpstt;
4342 env->fptags[new_fpstt] = 0; /* validate stack entry */
4343}
4344
4345void helper_fldl_ST0(uint64_t val)
4346{
4347 int new_fpstt;
4348 union {
4349 float64 f;
4350 uint64_t i;
4351 } u;
4352 new_fpstt = (env->fpstt - 1) & 7;
4353 u.i = val;
4354 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4355 env->fpstt = new_fpstt;
4356 env->fptags[new_fpstt] = 0; /* validate stack entry */
4357}
4358
4359void helper_fildl_ST0(int32_t val)
4360{
4361 int new_fpstt;
4362 new_fpstt = (env->fpstt - 1) & 7;
4363 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4364 env->fpstt = new_fpstt;
4365 env->fptags[new_fpstt] = 0; /* validate stack entry */
4366}
4367
4368void helper_fildll_ST0(int64_t val)
4369{
4370 int new_fpstt;
4371 new_fpstt = (env->fpstt - 1) & 7;
4372 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4373 env->fpstt = new_fpstt;
4374 env->fptags[new_fpstt] = 0; /* validate stack entry */
4375}
4376
4377#ifndef VBOX
4378uint32_t helper_fsts_ST0(void)
4379#else
4380RTCCUINTREG helper_fsts_ST0(void)
4381#endif
4382{
4383 union {
4384 float32 f;
4385 uint32_t i;
4386 } u;
4387 u.f = floatx_to_float32(ST0, &env->fp_status);
4388 return u.i;
4389}
4390
4391uint64_t helper_fstl_ST0(void)
4392{
4393 union {
4394 float64 f;
4395 uint64_t i;
4396 } u;
4397 u.f = floatx_to_float64(ST0, &env->fp_status);
4398 return u.i;
4399}
4400
4401#ifndef VBOX
4402int32_t helper_fist_ST0(void)
4403#else
4404RTCCINTREG helper_fist_ST0(void)
4405#endif
4406{
4407 int32_t val;
4408 val = floatx_to_int32(ST0, &env->fp_status);
4409 if (val != (int16_t)val)
4410 val = -32768;
4411 return val;
4412}
4413
4414#ifndef VBOX
4415int32_t helper_fistl_ST0(void)
4416#else
4417RTCCINTREG helper_fistl_ST0(void)
4418#endif
4419{
4420 int32_t val;
4421 val = floatx_to_int32(ST0, &env->fp_status);
4422 return val;
4423}
4424
4425int64_t helper_fistll_ST0(void)
4426{
4427 int64_t val;
4428 val = floatx_to_int64(ST0, &env->fp_status);
4429 return val;
4430}
4431
4432#ifndef VBOX
4433int32_t helper_fistt_ST0(void)
4434#else
4435RTCCINTREG helper_fistt_ST0(void)
4436#endif
4437{
4438 int32_t val;
4439 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4440 if (val != (int16_t)val)
4441 val = -32768;
4442 return val;
4443}
4444
4445#ifndef VBOX
4446int32_t helper_fisttl_ST0(void)
4447#else
4448RTCCINTREG helper_fisttl_ST0(void)
4449#endif
4450{
4451 int32_t val;
4452 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4453 return val;
4454}
4455
4456int64_t helper_fisttll_ST0(void)
4457{
4458 int64_t val;
4459 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4460 return val;
4461}
4462
4463void helper_fldt_ST0(target_ulong ptr)
4464{
4465 int new_fpstt;
4466 new_fpstt = (env->fpstt - 1) & 7;
4467 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4468 env->fpstt = new_fpstt;
4469 env->fptags[new_fpstt] = 0; /* validate stack entry */
4470}
4471
4472void helper_fstt_ST0(target_ulong ptr)
4473{
4474 helper_fstt(ST0, ptr);
4475}
4476
4477void helper_fpush(void)
4478{
4479 fpush();
4480}
4481
4482void helper_fpop(void)
4483{
4484 fpop();
4485}
4486
4487void helper_fdecstp(void)
4488{
4489 env->fpstt = (env->fpstt - 1) & 7;
4490 env->fpus &= (~0x4700);
4491}
4492
4493void helper_fincstp(void)
4494{
4495 env->fpstt = (env->fpstt + 1) & 7;
4496 env->fpus &= (~0x4700);
4497}
4498
4499/* FPU move */
4500
4501void helper_ffree_STN(int st_index)
4502{
4503 env->fptags[(env->fpstt + st_index) & 7] = 1;
4504}
4505
4506void helper_fmov_ST0_FT0(void)
4507{
4508 ST0 = FT0;
4509}
4510
4511void helper_fmov_FT0_STN(int st_index)
4512{
4513 FT0 = ST(st_index);
4514}
4515
4516void helper_fmov_ST0_STN(int st_index)
4517{
4518 ST0 = ST(st_index);
4519}
4520
4521void helper_fmov_STN_ST0(int st_index)
4522{
4523 ST(st_index) = ST0;
4524}
4525
4526void helper_fxchg_ST0_STN(int st_index)
4527{
4528 CPU86_LDouble tmp;
4529 tmp = ST(st_index);
4530 ST(st_index) = ST0;
4531 ST0 = tmp;
4532}
4533
4534/* FPU operations */
4535
4536static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4537
4538void helper_fcom_ST0_FT0(void)
4539{
4540 int ret;
4541
4542 ret = floatx_compare(ST0, FT0, &env->fp_status);
4543 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4544}
4545
4546void helper_fucom_ST0_FT0(void)
4547{
4548 int ret;
4549
4550 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4551 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4552}
4553
4554static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4555
4556void helper_fcomi_ST0_FT0(void)
4557{
4558 int eflags;
4559 int ret;
4560
4561 ret = floatx_compare(ST0, FT0, &env->fp_status);
4562 eflags = helper_cc_compute_all(CC_OP);
4563 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4564 CC_SRC = eflags;
4565}
4566
4567void helper_fucomi_ST0_FT0(void)
4568{
4569 int eflags;
4570 int ret;
4571
4572 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4573 eflags = helper_cc_compute_all(CC_OP);
4574 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4575 CC_SRC = eflags;
4576}
4577
4578void helper_fadd_ST0_FT0(void)
4579{
4580 ST0 += FT0;
4581}
4582
4583void helper_fmul_ST0_FT0(void)
4584{
4585 ST0 *= FT0;
4586}
4587
4588void helper_fsub_ST0_FT0(void)
4589{
4590 ST0 -= FT0;
4591}
4592
4593void helper_fsubr_ST0_FT0(void)
4594{
4595 ST0 = FT0 - ST0;
4596}
4597
4598void helper_fdiv_ST0_FT0(void)
4599{
4600 ST0 = helper_fdiv(ST0, FT0);
4601}
4602
4603void helper_fdivr_ST0_FT0(void)
4604{
4605 ST0 = helper_fdiv(FT0, ST0);
4606}
4607
4608/* fp operations between STN and ST0 */
4609
4610void helper_fadd_STN_ST0(int st_index)
4611{
4612 ST(st_index) += ST0;
4613}
4614
4615void helper_fmul_STN_ST0(int st_index)
4616{
4617 ST(st_index) *= ST0;
4618}
4619
4620void helper_fsub_STN_ST0(int st_index)
4621{
4622 ST(st_index) -= ST0;
4623}
4624
4625void helper_fsubr_STN_ST0(int st_index)
4626{
4627 CPU86_LDouble *p;
4628 p = &ST(st_index);
4629 *p = ST0 - *p;
4630}
4631
4632void helper_fdiv_STN_ST0(int st_index)
4633{
4634 CPU86_LDouble *p;
4635 p = &ST(st_index);
4636 *p = helper_fdiv(*p, ST0);
4637}
4638
4639void helper_fdivr_STN_ST0(int st_index)
4640{
4641 CPU86_LDouble *p;
4642 p = &ST(st_index);
4643 *p = helper_fdiv(ST0, *p);
4644}
4645
4646/* misc FPU operations */
4647void helper_fchs_ST0(void)
4648{
4649 ST0 = floatx_chs(ST0);
4650}
4651
4652void helper_fabs_ST0(void)
4653{
4654 ST0 = floatx_abs(ST0);
4655}
4656
4657void helper_fld1_ST0(void)
4658{
4659 ST0 = f15rk[1];
4660}
4661
4662void helper_fldl2t_ST0(void)
4663{
4664 ST0 = f15rk[6];
4665}
4666
4667void helper_fldl2e_ST0(void)
4668{
4669 ST0 = f15rk[5];
4670}
4671
4672void helper_fldpi_ST0(void)
4673{
4674 ST0 = f15rk[2];
4675}
4676
4677void helper_fldlg2_ST0(void)
4678{
4679 ST0 = f15rk[3];
4680}
4681
4682void helper_fldln2_ST0(void)
4683{
4684 ST0 = f15rk[4];
4685}
4686
4687void helper_fldz_ST0(void)
4688{
4689 ST0 = f15rk[0];
4690}
4691
4692void helper_fldz_FT0(void)
4693{
4694 FT0 = f15rk[0];
4695}
4696
4697#ifndef VBOX
4698uint32_t helper_fnstsw(void)
4699#else
4700RTCCUINTREG helper_fnstsw(void)
4701#endif
4702{
4703 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4704}
4705
4706#ifndef VBOX
4707uint32_t helper_fnstcw(void)
4708#else
4709RTCCUINTREG helper_fnstcw(void)
4710#endif
4711{
4712 return env->fpuc;
4713}
4714
4715static void update_fp_status(void)
4716{
4717 int rnd_type;
4718
4719 /* set rounding mode */
4720 switch(env->fpuc & RC_MASK) {
4721 default:
4722 case RC_NEAR:
4723 rnd_type = float_round_nearest_even;
4724 break;
4725 case RC_DOWN:
4726 rnd_type = float_round_down;
4727 break;
4728 case RC_UP:
4729 rnd_type = float_round_up;
4730 break;
4731 case RC_CHOP:
4732 rnd_type = float_round_to_zero;
4733 break;
4734 }
4735 set_float_rounding_mode(rnd_type, &env->fp_status);
4736#ifdef FLOATX80
4737 switch((env->fpuc >> 8) & 3) {
4738 case 0:
4739 rnd_type = 32;
4740 break;
4741 case 2:
4742 rnd_type = 64;
4743 break;
4744 case 3:
4745 default:
4746 rnd_type = 80;
4747 break;
4748 }
4749 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4750#endif
4751}
4752
4753void helper_fldcw(uint32_t val)
4754{
4755 env->fpuc = val;
4756 update_fp_status();
4757}
4758
4759void helper_fclex(void)
4760{
4761 env->fpus &= 0x7f00;
4762}
4763
4764void helper_fwait(void)
4765{
4766 if (env->fpus & FPUS_SE)
4767 fpu_raise_exception();
4768}
4769
4770void helper_fninit(void)
4771{
4772 env->fpus = 0;
4773 env->fpstt = 0;
4774 env->fpuc = 0x37f;
4775 env->fptags[0] = 1;
4776 env->fptags[1] = 1;
4777 env->fptags[2] = 1;
4778 env->fptags[3] = 1;
4779 env->fptags[4] = 1;
4780 env->fptags[5] = 1;
4781 env->fptags[6] = 1;
4782 env->fptags[7] = 1;
4783}
4784
4785/* BCD ops */
4786
4787void helper_fbld_ST0(target_ulong ptr)
4788{
4789 CPU86_LDouble tmp;
4790 uint64_t val;
4791 unsigned int v;
4792 int i;
4793
4794 val = 0;
4795 for(i = 8; i >= 0; i--) {
4796 v = ldub(ptr + i);
4797 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4798 }
4799 tmp = val;
4800 if (ldub(ptr + 9) & 0x80)
4801 tmp = -tmp;
4802 fpush();
4803 ST0 = tmp;
4804}
4805
4806void helper_fbst_ST0(target_ulong ptr)
4807{
4808 int v;
4809 target_ulong mem_ref, mem_end;
4810 int64_t val;
4811
4812 val = floatx_to_int64(ST0, &env->fp_status);
4813 mem_ref = ptr;
4814 mem_end = mem_ref + 9;
4815 if (val < 0) {
4816 stb(mem_end, 0x80);
4817 val = -val;
4818 } else {
4819 stb(mem_end, 0x00);
4820 }
4821 while (mem_ref < mem_end) {
4822 if (val == 0)
4823 break;
4824 v = val % 100;
4825 val = val / 100;
4826 v = ((v / 10) << 4) | (v % 10);
4827 stb(mem_ref++, v);
4828 }
4829 while (mem_ref < mem_end) {
4830 stb(mem_ref++, 0);
4831 }
4832}
4833
4834void helper_f2xm1(void)
4835{
4836 ST0 = pow(2.0,ST0) - 1.0;
4837}
4838
4839void helper_fyl2x(void)
4840{
4841 CPU86_LDouble fptemp;
4842
4843 fptemp = ST0;
4844 if (fptemp>0.0){
4845 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4846 ST1 *= fptemp;
4847 fpop();
4848 } else {
4849 env->fpus &= (~0x4700);
4850 env->fpus |= 0x400;
4851 }
4852}
4853
4854void helper_fptan(void)
4855{
4856 CPU86_LDouble fptemp;
4857
4858 fptemp = ST0;
4859 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4860 env->fpus |= 0x400;
4861 } else {
4862 ST0 = tan(fptemp);
4863 fpush();
4864 ST0 = 1.0;
4865 env->fpus &= (~0x400); /* C2 <-- 0 */
4866 /* the above code is for |arg| < 2**52 only */
4867 }
4868}
4869
4870void helper_fpatan(void)
4871{
4872 CPU86_LDouble fptemp, fpsrcop;
4873
4874 fpsrcop = ST1;
4875 fptemp = ST0;
4876 ST1 = atan2(fpsrcop,fptemp);
4877 fpop();
4878}
4879
4880void helper_fxtract(void)
4881{
4882 CPU86_LDoubleU temp;
4883 unsigned int expdif;
4884
4885 temp.d = ST0;
4886 expdif = EXPD(temp) - EXPBIAS;
4887 /*DP exponent bias*/
4888 ST0 = expdif;
4889 fpush();
4890 BIASEXPONENT(temp);
4891 ST0 = temp.d;
4892}
4893
4894void helper_fprem1(void)
4895{
4896 CPU86_LDouble dblq, fpsrcop, fptemp;
4897 CPU86_LDoubleU fpsrcop1, fptemp1;
4898 int expdif;
4899 signed long long int q;
4900
4901#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4902 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4903#else
4904 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4905#endif
4906 ST0 = 0.0 / 0.0; /* NaN */
4907 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4908 return;
4909 }
4910
4911 fpsrcop = ST0;
4912 fptemp = ST1;
4913 fpsrcop1.d = fpsrcop;
4914 fptemp1.d = fptemp;
4915 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4916
4917 if (expdif < 0) {
4918 /* optimisation? taken from the AMD docs */
4919 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4920 /* ST0 is unchanged */
4921 return;
4922 }
4923
4924 if (expdif < 53) {
4925 dblq = fpsrcop / fptemp;
4926 /* round dblq towards nearest integer */
4927 dblq = rint(dblq);
4928 ST0 = fpsrcop - fptemp * dblq;
4929
4930 /* convert dblq to q by truncating towards zero */
4931 if (dblq < 0.0)
4932 q = (signed long long int)(-dblq);
4933 else
4934 q = (signed long long int)dblq;
4935
4936 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4937 /* (C0,C3,C1) <-- (q2,q1,q0) */
4938 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4939 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4940 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4941 } else {
4942 env->fpus |= 0x400; /* C2 <-- 1 */
4943 fptemp = pow(2.0, expdif - 50);
4944 fpsrcop = (ST0 / ST1) / fptemp;
4945 /* fpsrcop = integer obtained by chopping */
4946 fpsrcop = (fpsrcop < 0.0) ?
4947 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4948 ST0 -= (ST1 * fpsrcop * fptemp);
4949 }
4950}
4951
4952void helper_fprem(void)
4953{
4954 CPU86_LDouble dblq, fpsrcop, fptemp;
4955 CPU86_LDoubleU fpsrcop1, fptemp1;
4956 int expdif;
4957 signed long long int q;
4958
4959#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4960 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4961#else
4962 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4963#endif
4964 ST0 = 0.0 / 0.0; /* NaN */
4965 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4966 return;
4967 }
4968
4969 fpsrcop = (CPU86_LDouble)ST0;
4970 fptemp = (CPU86_LDouble)ST1;
4971 fpsrcop1.d = fpsrcop;
4972 fptemp1.d = fptemp;
4973 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4974
4975 if (expdif < 0) {
4976 /* optimisation? taken from the AMD docs */
4977 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4978 /* ST0 is unchanged */
4979 return;
4980 }
4981
4982 if ( expdif < 53 ) {
4983 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4984 /* round dblq towards zero */
4985 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4986 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4987
4988 /* convert dblq to q by truncating towards zero */
4989 if (dblq < 0.0)
4990 q = (signed long long int)(-dblq);
4991 else
4992 q = (signed long long int)dblq;
4993
4994 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4995 /* (C0,C3,C1) <-- (q2,q1,q0) */
4996 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4997 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4998 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4999 } else {
5000 int N = 32 + (expdif % 32); /* as per AMD docs */
5001 env->fpus |= 0x400; /* C2 <-- 1 */
5002 fptemp = pow(2.0, (double)(expdif - N));
5003 fpsrcop = (ST0 / ST1) / fptemp;
5004 /* fpsrcop = integer obtained by chopping */
5005 fpsrcop = (fpsrcop < 0.0) ?
5006 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
5007 ST0 -= (ST1 * fpsrcop * fptemp);
5008 }
5009}
5010
5011void helper_fyl2xp1(void)
5012{
5013 CPU86_LDouble fptemp;
5014
5015 fptemp = ST0;
5016 if ((fptemp+1.0)>0.0) {
5017 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
5018 ST1 *= fptemp;
5019 fpop();
5020 } else {
5021 env->fpus &= (~0x4700);
5022 env->fpus |= 0x400;
5023 }
5024}
5025
5026void helper_fsqrt(void)
5027{
5028 CPU86_LDouble fptemp;
5029
5030 fptemp = ST0;
5031 if (fptemp<0.0) {
5032 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5033 env->fpus |= 0x400;
5034 }
5035 ST0 = sqrt(fptemp);
5036}
5037
5038void helper_fsincos(void)
5039{
5040 CPU86_LDouble fptemp;
5041
5042 fptemp = ST0;
5043 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5044 env->fpus |= 0x400;
5045 } else {
5046 ST0 = sin(fptemp);
5047 fpush();
5048 ST0 = cos(fptemp);
5049 env->fpus &= (~0x400); /* C2 <-- 0 */
5050 /* the above code is for |arg| < 2**63 only */
5051 }
5052}
5053
5054void helper_frndint(void)
5055{
5056 ST0 = floatx_round_to_int(ST0, &env->fp_status);
5057}
5058
5059void helper_fscale(void)
5060{
5061 ST0 = ldexp (ST0, (int)(ST1));
5062}
5063
5064void helper_fsin(void)
5065{
5066 CPU86_LDouble fptemp;
5067
5068 fptemp = ST0;
5069 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5070 env->fpus |= 0x400;
5071 } else {
5072 ST0 = sin(fptemp);
5073 env->fpus &= (~0x400); /* C2 <-- 0 */
5074 /* the above code is for |arg| < 2**53 only */
5075 }
5076}
5077
5078void helper_fcos(void)
5079{
5080 CPU86_LDouble fptemp;
5081
5082 fptemp = ST0;
5083 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5084 env->fpus |= 0x400;
5085 } else {
5086 ST0 = cos(fptemp);
5087 env->fpus &= (~0x400); /* C2 <-- 0 */
5088 /* the above code is for |arg5 < 2**63 only */
5089 }
5090}
5091
5092void helper_fxam_ST0(void)
5093{
5094 CPU86_LDoubleU temp;
5095 int expdif;
5096
5097 temp.d = ST0;
5098
5099 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5100 if (SIGND(temp))
5101 env->fpus |= 0x200; /* C1 <-- 1 */
5102
5103 /* XXX: test fptags too */
5104 expdif = EXPD(temp);
5105 if (expdif == MAXEXPD) {
5106#ifdef USE_X86LDOUBLE
5107 if (MANTD(temp) == 0x8000000000000000ULL)
5108#else
5109 if (MANTD(temp) == 0)
5110#endif
5111 env->fpus |= 0x500 /*Infinity*/;
5112 else
5113 env->fpus |= 0x100 /*NaN*/;
5114 } else if (expdif == 0) {
5115 if (MANTD(temp) == 0)
5116 env->fpus |= 0x4000 /*Zero*/;
5117 else
5118 env->fpus |= 0x4400 /*Denormal*/;
5119 } else {
5120 env->fpus |= 0x400;
5121 }
5122}
5123
5124void helper_fstenv(target_ulong ptr, int data32)
5125{
5126 int fpus, fptag, exp, i;
5127 uint64_t mant;
5128 CPU86_LDoubleU tmp;
5129
5130 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5131 fptag = 0;
5132 for (i=7; i>=0; i--) {
5133 fptag <<= 2;
5134 if (env->fptags[i]) {
5135 fptag |= 3;
5136 } else {
5137 tmp.d = env->fpregs[i].d;
5138 exp = EXPD(tmp);
5139 mant = MANTD(tmp);
5140 if (exp == 0 && mant == 0) {
5141 /* zero */
5142 fptag |= 1;
5143 } else if (exp == 0 || exp == MAXEXPD
5144#ifdef USE_X86LDOUBLE
5145 || (mant & (1LL << 63)) == 0
5146#endif
5147 ) {
5148 /* NaNs, infinity, denormal */
5149 fptag |= 2;
5150 }
5151 }
5152 }
5153 if (data32) {
5154 /* 32 bit */
5155 stl(ptr, env->fpuc);
5156 stl(ptr + 4, fpus);
5157 stl(ptr + 8, fptag);
5158 stl(ptr + 12, 0); /* fpip */
5159 stl(ptr + 16, 0); /* fpcs */
5160 stl(ptr + 20, 0); /* fpoo */
5161 stl(ptr + 24, 0); /* fpos */
5162 } else {
5163 /* 16 bit */
5164 stw(ptr, env->fpuc);
5165 stw(ptr + 2, fpus);
5166 stw(ptr + 4, fptag);
5167 stw(ptr + 6, 0);
5168 stw(ptr + 8, 0);
5169 stw(ptr + 10, 0);
5170 stw(ptr + 12, 0);
5171 }
5172}
5173
5174void helper_fldenv(target_ulong ptr, int data32)
5175{
5176 int i, fpus, fptag;
5177
5178 if (data32) {
5179 env->fpuc = lduw(ptr);
5180 fpus = lduw(ptr + 4);
5181 fptag = lduw(ptr + 8);
5182 }
5183 else {
5184 env->fpuc = lduw(ptr);
5185 fpus = lduw(ptr + 2);
5186 fptag = lduw(ptr + 4);
5187 }
5188 env->fpstt = (fpus >> 11) & 7;
5189 env->fpus = fpus & ~0x3800;
5190 for(i = 0;i < 8; i++) {
5191 env->fptags[i] = ((fptag & 3) == 3);
5192 fptag >>= 2;
5193 }
5194}
5195
5196void helper_fsave(target_ulong ptr, int data32)
5197{
5198 CPU86_LDouble tmp;
5199 int i;
5200
5201 helper_fstenv(ptr, data32);
5202
5203 ptr += (14 << data32);
5204 for(i = 0;i < 8; i++) {
5205 tmp = ST(i);
5206 helper_fstt(tmp, ptr);
5207 ptr += 10;
5208 }
5209
5210 /* fninit */
5211 env->fpus = 0;
5212 env->fpstt = 0;
5213 env->fpuc = 0x37f;
5214 env->fptags[0] = 1;
5215 env->fptags[1] = 1;
5216 env->fptags[2] = 1;
5217 env->fptags[3] = 1;
5218 env->fptags[4] = 1;
5219 env->fptags[5] = 1;
5220 env->fptags[6] = 1;
5221 env->fptags[7] = 1;
5222}
5223
5224void helper_frstor(target_ulong ptr, int data32)
5225{
5226 CPU86_LDouble tmp;
5227 int i;
5228
5229 helper_fldenv(ptr, data32);
5230 ptr += (14 << data32);
5231
5232 for(i = 0;i < 8; i++) {
5233 tmp = helper_fldt(ptr);
5234 ST(i) = tmp;
5235 ptr += 10;
5236 }
5237}
5238
5239void helper_fxsave(target_ulong ptr, int data64)
5240{
5241 int fpus, fptag, i, nb_xmm_regs;
5242 CPU86_LDouble tmp;
5243 target_ulong addr;
5244
5245 /* The operand must be 16 byte aligned */
5246 if (ptr & 0xf) {
5247 raise_exception(EXCP0D_GPF);
5248 }
5249
5250 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5251 fptag = 0;
5252 for(i = 0; i < 8; i++) {
5253 fptag |= (env->fptags[i] << i);
5254 }
5255 stw(ptr, env->fpuc);
5256 stw(ptr + 2, fpus);
5257 stw(ptr + 4, fptag ^ 0xff);
5258#ifdef TARGET_X86_64
5259 if (data64) {
5260 stq(ptr + 0x08, 0); /* rip */
5261 stq(ptr + 0x10, 0); /* rdp */
5262 } else
5263#endif
5264 {
5265 stl(ptr + 0x08, 0); /* eip */
5266 stl(ptr + 0x0c, 0); /* sel */
5267 stl(ptr + 0x10, 0); /* dp */
5268 stl(ptr + 0x14, 0); /* sel */
5269 }
5270
5271 addr = ptr + 0x20;
5272 for(i = 0;i < 8; i++) {
5273 tmp = ST(i);
5274 helper_fstt(tmp, addr);
5275 addr += 16;
5276 }
5277
5278 if (env->cr[4] & CR4_OSFXSR_MASK) {
5279 /* XXX: finish it */
5280 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5281 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5282 if (env->hflags & HF_CS64_MASK)
5283 nb_xmm_regs = 16;
5284 else
5285 nb_xmm_regs = 8;
5286 addr = ptr + 0xa0;
5287 /* Fast FXSAVE leaves out the XMM registers */
5288 if (!(env->efer & MSR_EFER_FFXSR)
5289 || (env->hflags & HF_CPL_MASK)
5290 || !(env->hflags & HF_LMA_MASK)) {
5291 for(i = 0; i < nb_xmm_regs; i++) {
5292 stq(addr, env->xmm_regs[i].XMM_Q(0));
5293 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5294 addr += 16;
5295 }
5296 }
5297 }
5298}
5299
5300void helper_fxrstor(target_ulong ptr, int data64)
5301{
5302 int i, fpus, fptag, nb_xmm_regs;
5303 CPU86_LDouble tmp;
5304 target_ulong addr;
5305
5306 /* The operand must be 16 byte aligned */
5307 if (ptr & 0xf) {
5308 raise_exception(EXCP0D_GPF);
5309 }
5310
5311 env->fpuc = lduw(ptr);
5312 fpus = lduw(ptr + 2);
5313 fptag = lduw(ptr + 4);
5314 env->fpstt = (fpus >> 11) & 7;
5315 env->fpus = fpus & ~0x3800;
5316 fptag ^= 0xff;
5317 for(i = 0;i < 8; i++) {
5318 env->fptags[i] = ((fptag >> i) & 1);
5319 }
5320
5321 addr = ptr + 0x20;
5322 for(i = 0;i < 8; i++) {
5323 tmp = helper_fldt(addr);
5324 ST(i) = tmp;
5325 addr += 16;
5326 }
5327
5328 if (env->cr[4] & CR4_OSFXSR_MASK) {
5329 /* XXX: finish it */
5330 env->mxcsr = ldl(ptr + 0x18);
5331 //ldl(ptr + 0x1c);
5332 if (env->hflags & HF_CS64_MASK)
5333 nb_xmm_regs = 16;
5334 else
5335 nb_xmm_regs = 8;
5336 addr = ptr + 0xa0;
5337 /* Fast FXRESTORE leaves out the XMM registers */
5338 if (!(env->efer & MSR_EFER_FFXSR)
5339 || (env->hflags & HF_CPL_MASK)
5340 || !(env->hflags & HF_LMA_MASK)) {
5341 for(i = 0; i < nb_xmm_regs; i++) {
5342#if !defined(VBOX) || __GNUC__ < 4
5343 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5344 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5345#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5346# if 1
5347 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5348 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5349 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5350 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5351# else
5352 /* this works fine on Mac OS X, gcc 4.0.1 */
5353 uint64_t u64 = ldq(addr);
5354 env->xmm_regs[i].XMM_Q(0);
5355 u64 = ldq(addr + 4);
5356 env->xmm_regs[i].XMM_Q(1) = u64;
5357# endif
5358#endif
5359 addr += 16;
5360 }
5361 }
5362 }
5363}
5364
5365#ifndef USE_X86LDOUBLE
5366
5367void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5368{
5369 CPU86_LDoubleU temp;
5370 int e;
5371
5372 temp.d = f;
5373 /* mantissa */
5374 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5375 /* exponent + sign */
5376 e = EXPD(temp) - EXPBIAS + 16383;
5377 e |= SIGND(temp) >> 16;
5378 *pexp = e;
5379}
5380
5381CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5382{
5383 CPU86_LDoubleU temp;
5384 int e;
5385 uint64_t ll;
5386
5387 /* XXX: handle overflow ? */
5388 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5389 e |= (upper >> 4) & 0x800; /* sign */
5390 ll = (mant >> 11) & ((1LL << 52) - 1);
5391#ifdef __arm__
5392 temp.l.upper = (e << 20) | (ll >> 32);
5393 temp.l.lower = ll;
5394#else
5395 temp.ll = ll | ((uint64_t)e << 52);
5396#endif
5397 return temp.d;
5398}
5399
5400#else
5401
5402void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5403{
5404 CPU86_LDoubleU temp;
5405
5406 temp.d = f;
5407 *pmant = temp.l.lower;
5408 *pexp = temp.l.upper;
5409}
5410
5411CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5412{
5413 CPU86_LDoubleU temp;
5414
5415 temp.l.upper = upper;
5416 temp.l.lower = mant;
5417 return temp.d;
5418}
5419#endif
5420
5421#ifdef TARGET_X86_64
5422
5423//#define DEBUG_MULDIV
5424
5425static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5426{
5427 *plow += a;
5428 /* carry test */
5429 if (*plow < a)
5430 (*phigh)++;
5431 *phigh += b;
5432}
5433
5434static void neg128(uint64_t *plow, uint64_t *phigh)
5435{
5436 *plow = ~ *plow;
5437 *phigh = ~ *phigh;
5438 add128(plow, phigh, 1, 0);
5439}
5440
5441/* return TRUE if overflow */
5442static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5443{
5444 uint64_t q, r, a1, a0;
5445 int i, qb, ab;
5446
5447 a0 = *plow;
5448 a1 = *phigh;
5449 if (a1 == 0) {
5450 q = a0 / b;
5451 r = a0 % b;
5452 *plow = q;
5453 *phigh = r;
5454 } else {
5455 if (a1 >= b)
5456 return 1;
5457 /* XXX: use a better algorithm */
5458 for(i = 0; i < 64; i++) {
5459 ab = a1 >> 63;
5460 a1 = (a1 << 1) | (a0 >> 63);
5461 if (ab || a1 >= b) {
5462 a1 -= b;
5463 qb = 1;
5464 } else {
5465 qb = 0;
5466 }
5467 a0 = (a0 << 1) | qb;
5468 }
5469#if defined(DEBUG_MULDIV)
5470 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5471 *phigh, *plow, b, a0, a1);
5472#endif
5473 *plow = a0;
5474 *phigh = a1;
5475 }
5476 return 0;
5477}
5478
5479/* return TRUE if overflow */
5480static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5481{
5482 int sa, sb;
5483 sa = ((int64_t)*phigh < 0);
5484 if (sa)
5485 neg128(plow, phigh);
5486 sb = (b < 0);
5487 if (sb)
5488 b = -b;
5489 if (div64(plow, phigh, b) != 0)
5490 return 1;
5491 if (sa ^ sb) {
5492 if (*plow > (1ULL << 63))
5493 return 1;
5494 *plow = - *plow;
5495 } else {
5496 if (*plow >= (1ULL << 63))
5497 return 1;
5498 }
5499 if (sa)
5500 *phigh = - *phigh;
5501 return 0;
5502}
5503
5504void helper_mulq_EAX_T0(target_ulong t0)
5505{
5506 uint64_t r0, r1;
5507
5508 mulu64(&r0, &r1, EAX, t0);
5509 EAX = r0;
5510 EDX = r1;
5511 CC_DST = r0;
5512 CC_SRC = r1;
5513}
5514
5515void helper_imulq_EAX_T0(target_ulong t0)
5516{
5517 uint64_t r0, r1;
5518
5519 muls64(&r0, &r1, EAX, t0);
5520 EAX = r0;
5521 EDX = r1;
5522 CC_DST = r0;
5523 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5524}
5525
5526target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5527{
5528 uint64_t r0, r1;
5529
5530 muls64(&r0, &r1, t0, t1);
5531 CC_DST = r0;
5532 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5533 return r0;
5534}
5535
5536void helper_divq_EAX(target_ulong t0)
5537{
5538 uint64_t r0, r1;
5539 if (t0 == 0) {
5540 raise_exception(EXCP00_DIVZ);
5541 }
5542 r0 = EAX;
5543 r1 = EDX;
5544 if (div64(&r0, &r1, t0))
5545 raise_exception(EXCP00_DIVZ);
5546 EAX = r0;
5547 EDX = r1;
5548}
5549
5550void helper_idivq_EAX(target_ulong t0)
5551{
5552 uint64_t r0, r1;
5553 if (t0 == 0) {
5554 raise_exception(EXCP00_DIVZ);
5555 }
5556 r0 = EAX;
5557 r1 = EDX;
5558 if (idiv64(&r0, &r1, t0))
5559 raise_exception(EXCP00_DIVZ);
5560 EAX = r0;
5561 EDX = r1;
5562}
5563#endif
5564
5565static void do_hlt(void)
5566{
5567 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5568 env->halted = 1;
5569 env->exception_index = EXCP_HLT;
5570 cpu_loop_exit();
5571}
5572
5573void helper_hlt(int next_eip_addend)
5574{
5575 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5576 EIP += next_eip_addend;
5577
5578 do_hlt();
5579}
5580
5581void helper_monitor(target_ulong ptr)
5582{
5583#ifdef VBOX
5584 if ((uint32_t)ECX > 1)
5585 raise_exception(EXCP0D_GPF);
5586#else /* !VBOX */
5587 if ((uint32_t)ECX != 0)
5588 raise_exception(EXCP0D_GPF);
5589#endif /* !VBOX */
5590 /* XXX: store address ? */
5591 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5592}
5593
5594void helper_mwait(int next_eip_addend)
5595{
5596 if ((uint32_t)ECX != 0)
5597 raise_exception(EXCP0D_GPF);
5598#ifdef VBOX
5599 helper_hlt(next_eip_addend);
5600#else /* !VBOX */
5601 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5602 EIP += next_eip_addend;
5603
5604 /* XXX: not complete but not completely erroneous */
5605 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5606 /* more than one CPU: do not sleep because another CPU may
5607 wake this one */
5608 } else {
5609 do_hlt();
5610 }
5611#endif /* !VBOX */
5612}
5613
5614void helper_debug(void)
5615{
5616 env->exception_index = EXCP_DEBUG;
5617 cpu_loop_exit();
5618}
5619
5620void helper_reset_rf(void)
5621{
5622 env->eflags &= ~RF_MASK;
5623}
5624
5625void helper_raise_interrupt(int intno, int next_eip_addend)
5626{
5627 raise_interrupt(intno, 1, 0, next_eip_addend);
5628}
5629
5630void helper_raise_exception(int exception_index)
5631{
5632 raise_exception(exception_index);
5633}
5634
5635void helper_cli(void)
5636{
5637 env->eflags &= ~IF_MASK;
5638}
5639
5640void helper_sti(void)
5641{
5642 env->eflags |= IF_MASK;
5643}
5644
5645#ifdef VBOX
5646void helper_cli_vme(void)
5647{
5648 env->eflags &= ~VIF_MASK;
5649}
5650
5651void helper_sti_vme(void)
5652{
5653 /* First check, then change eflags according to the AMD manual */
5654 if (env->eflags & VIP_MASK) {
5655 raise_exception(EXCP0D_GPF);
5656 }
5657 env->eflags |= VIF_MASK;
5658}
5659#endif /* VBOX */
5660
5661#if 0
5662/* vm86plus instructions */
5663void helper_cli_vm(void)
5664{
5665 env->eflags &= ~VIF_MASK;
5666}
5667
5668void helper_sti_vm(void)
5669{
5670 env->eflags |= VIF_MASK;
5671 if (env->eflags & VIP_MASK) {
5672 raise_exception(EXCP0D_GPF);
5673 }
5674}
5675#endif
5676
5677void helper_set_inhibit_irq(void)
5678{
5679 env->hflags |= HF_INHIBIT_IRQ_MASK;
5680}
5681
5682void helper_reset_inhibit_irq(void)
5683{
5684 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5685}
5686
5687void helper_boundw(target_ulong a0, int v)
5688{
5689 int low, high;
5690 low = ldsw(a0);
5691 high = ldsw(a0 + 2);
5692 v = (int16_t)v;
5693 if (v < low || v > high) {
5694 raise_exception(EXCP05_BOUND);
5695 }
5696}
5697
5698void helper_boundl(target_ulong a0, int v)
5699{
5700 int low, high;
5701 low = ldl(a0);
5702 high = ldl(a0 + 4);
5703 if (v < low || v > high) {
5704 raise_exception(EXCP05_BOUND);
5705 }
5706}
5707
5708static float approx_rsqrt(float a)
5709{
5710 return 1.0 / sqrt(a);
5711}
5712
5713static float approx_rcp(float a)
5714{
5715 return 1.0 / a;
5716}
5717
5718#if !defined(CONFIG_USER_ONLY)
5719
5720#define MMUSUFFIX _mmu
5721
5722#define SHIFT 0
5723#include "softmmu_template.h"
5724
5725#define SHIFT 1
5726#include "softmmu_template.h"
5727
5728#define SHIFT 2
5729#include "softmmu_template.h"
5730
5731#define SHIFT 3
5732#include "softmmu_template.h"
5733
5734#endif
5735
5736#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5737/* This code assumes real physical address always fit into host CPU reg,
5738 which is wrong in general, but true for our current use cases. */
5739RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5740{
5741 return remR3PhysReadS8(addr);
5742}
5743RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5744{
5745 return remR3PhysReadU8(addr);
5746}
5747void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5748{
5749 remR3PhysWriteU8(addr, val);
5750}
5751RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5752{
5753 return remR3PhysReadS16(addr);
5754}
5755RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5756{
5757 return remR3PhysReadU16(addr);
5758}
5759void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5760{
5761 remR3PhysWriteU16(addr, val);
5762}
5763RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5764{
5765 return remR3PhysReadS32(addr);
5766}
5767RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5768{
5769 return remR3PhysReadU32(addr);
5770}
5771void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5772{
5773 remR3PhysWriteU32(addr, val);
5774}
5775uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5776{
5777 return remR3PhysReadU64(addr);
5778}
5779void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5780{
5781 remR3PhysWriteU64(addr, val);
5782}
5783#endif /* VBOX */
5784
5785#if !defined(CONFIG_USER_ONLY)
5786/* try to fill the TLB and return an exception if error. If retaddr is
5787 NULL, it means that the function was called in C code (i.e. not
5788 from generated code or from helper.c) */
5789/* XXX: fix it to restore all registers */
5790void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5791{
5792 TranslationBlock *tb;
5793 int ret;
5794 uintptr_t pc;
5795 CPUX86State *saved_env;
5796
5797 /* XXX: hack to restore env in all cases, even if not called from
5798 generated code */
5799 saved_env = env;
5800 env = cpu_single_env;
5801
5802 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5803 if (ret) {
5804 if (retaddr) {
5805 /* now we have a real cpu fault */
5806 pc = (uintptr_t)retaddr;
5807 tb = tb_find_pc(pc);
5808 if (tb) {
5809 /* the PC is inside the translated code. It means that we have
5810 a virtual CPU fault */
5811 cpu_restore_state(tb, env, pc, NULL);
5812 }
5813 }
5814 raise_exception_err(env->exception_index, env->error_code);
5815 }
5816 env = saved_env;
5817}
5818#endif
5819
5820#ifdef VBOX
5821
5822/**
5823 * Correctly computes the eflags.
5824 * @returns eflags.
5825 * @param env1 CPU environment.
5826 */
5827uint32_t raw_compute_eflags(CPUX86State *env1)
5828{
5829 CPUX86State *savedenv = env;
5830 uint32_t efl;
5831 env = env1;
5832 efl = compute_eflags();
5833 env = savedenv;
5834 return efl;
5835}
5836
5837/**
5838 * Reads byte from virtual address in guest memory area.
5839 * XXX: is it working for any addresses? swapped out pages?
5840 * @returns read data byte.
5841 * @param env1 CPU environment.
5842 * @param pvAddr GC Virtual address.
5843 */
5844uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5845{
5846 CPUX86State *savedenv = env;
5847 uint8_t u8;
5848 env = env1;
5849 u8 = ldub_kernel(addr);
5850 env = savedenv;
5851 return u8;
5852}
5853
5854/**
5855 * Reads byte from virtual address in guest memory area.
5856 * XXX: is it working for any addresses? swapped out pages?
5857 * @returns read data byte.
5858 * @param env1 CPU environment.
5859 * @param pvAddr GC Virtual address.
5860 */
5861uint16_t read_word(CPUX86State *env1, target_ulong addr)
5862{
5863 CPUX86State *savedenv = env;
5864 uint16_t u16;
5865 env = env1;
5866 u16 = lduw_kernel(addr);
5867 env = savedenv;
5868 return u16;
5869}
5870
5871/**
5872 * Reads byte from virtual address in guest memory area.
5873 * XXX: is it working for any addresses? swapped out pages?
5874 * @returns read data byte.
5875 * @param env1 CPU environment.
5876 * @param pvAddr GC Virtual address.
5877 */
5878uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5879{
5880 CPUX86State *savedenv = env;
5881 uint32_t u32;
5882 env = env1;
5883 u32 = ldl_kernel(addr);
5884 env = savedenv;
5885 return u32;
5886}
5887
5888/**
5889 * Writes byte to virtual address in guest memory area.
5890 * XXX: is it working for any addresses? swapped out pages?
5891 * @returns read data byte.
5892 * @param env1 CPU environment.
5893 * @param pvAddr GC Virtual address.
5894 * @param val byte value
5895 */
5896void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5897{
5898 CPUX86State *savedenv = env;
5899 env = env1;
5900 stb(addr, val);
5901 env = savedenv;
5902}
5903
5904void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5905{
5906 CPUX86State *savedenv = env;
5907 env = env1;
5908 stw(addr, val);
5909 env = savedenv;
5910}
5911
5912void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5913{
5914 CPUX86State *savedenv = env;
5915 env = env1;
5916 stl(addr, val);
5917 env = savedenv;
5918}
5919
5920/**
5921 * Correctly loads selector into segment register with updating internal
5922 * qemu data/caches.
5923 * @param env1 CPU environment.
5924 * @param seg_reg Segment register.
5925 * @param selector Selector to load.
5926 */
5927void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5928{
5929 CPUX86State *savedenv = env;
5930#ifdef FORCE_SEGMENT_SYNC
5931 jmp_buf old_buf;
5932#endif
5933
5934 env = env1;
5935
5936 if ( env->eflags & X86_EFL_VM
5937 || !(env->cr[0] & X86_CR0_PE))
5938 {
5939 load_seg_vm(seg_reg, selector);
5940
5941 env = savedenv;
5942
5943 /* Successful sync. */
5944 Assert(env1->segs[seg_reg].newselector == 0);
5945 }
5946 else
5947 {
5948 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5949 time critical - let's not do that */
5950#ifdef FORCE_SEGMENT_SYNC
5951 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5952#endif
5953 if (setjmp(env1->jmp_env) == 0)
5954 {
5955 if (seg_reg == R_CS)
5956 {
5957 uint32_t e1, e2;
5958 e1 = e2 = 0;
5959 load_segment(&e1, &e2, selector);
5960 cpu_x86_load_seg_cache(env, R_CS, selector,
5961 get_seg_base(e1, e2),
5962 get_seg_limit(e1, e2),
5963 e2);
5964 }
5965 else
5966 helper_load_seg(seg_reg, selector);
5967 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5968 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5969
5970 env = savedenv;
5971
5972 /* Successful sync. */
5973 Assert(env1->segs[seg_reg].newselector == 0);
5974 }
5975 else
5976 {
5977 env = savedenv;
5978
5979 /* Postpone sync until the guest uses the selector. */
5980 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5981 env1->segs[seg_reg].newselector = selector;
5982 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5983 env1->exception_index = -1;
5984 env1->error_code = 0;
5985 env1->old_exception = -1;
5986 }
5987#ifdef FORCE_SEGMENT_SYNC
5988 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5989#endif
5990 }
5991
5992}
5993
5994DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5995{
5996 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5997}
5998
5999
6000int emulate_single_instr(CPUX86State *env1)
6001{
6002 TranslationBlock *tb;
6003 TranslationBlock *current;
6004 int flags;
6005 uint8_t *tc_ptr;
6006 target_ulong old_eip;
6007
6008 /* ensures env is loaded! */
6009 CPUX86State *savedenv = env;
6010 env = env1;
6011
6012 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
6013
6014 current = env->current_tb;
6015 env->current_tb = NULL;
6016 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
6017
6018 /*
6019 * Translate only one instruction.
6020 */
6021 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
6022 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
6023 env->segs[R_CS].base, flags, 0);
6024
6025 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
6026
6027
6028 /* tb_link_phys: */
6029 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
6030 tb->jmp_next[0] = NULL;
6031 tb->jmp_next[1] = NULL;
6032 Assert(tb->jmp_next[0] == NULL);
6033 Assert(tb->jmp_next[1] == NULL);
6034 if (tb->tb_next_offset[0] != 0xffff)
6035 tb_reset_jump(tb, 0);
6036 if (tb->tb_next_offset[1] != 0xffff)
6037 tb_reset_jump(tb, 1);
6038
6039 /*
6040 * Execute it using emulation
6041 */
6042 old_eip = env->eip;
6043 env->current_tb = tb;
6044
6045 /*
6046 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
6047 * perhaps not a very safe hack
6048 */
6049 while (old_eip == env->eip)
6050 {
6051 tc_ptr = tb->tc_ptr;
6052
6053#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
6054 int fake_ret;
6055 tcg_qemu_tb_exec(tc_ptr, fake_ret);
6056#else
6057 tcg_qemu_tb_exec(tc_ptr);
6058#endif
6059
6060 /*
6061 * Exit once we detect an external interrupt and interrupts are enabled
6062 */
6063 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
6064 || ( (env->eflags & IF_MASK)
6065 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
6066 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
6067 )
6068 {
6069 break;
6070 }
6071 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
6072 tlb_flush(env, true);
6073 }
6074 }
6075 env->current_tb = current;
6076
6077 tb_phys_invalidate(tb, -1);
6078 tb_free(tb);
6079/*
6080 Assert(tb->tb_next_offset[0] == 0xffff);
6081 Assert(tb->tb_next_offset[1] == 0xffff);
6082 Assert(tb->tb_next[0] == 0xffff);
6083 Assert(tb->tb_next[1] == 0xffff);
6084 Assert(tb->jmp_next[0] == NULL);
6085 Assert(tb->jmp_next[1] == NULL);
6086 Assert(tb->jmp_first == NULL); */
6087
6088 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
6089
6090 /*
6091 * Execute the next instruction when we encounter instruction fusing.
6092 */
6093 if (env->hflags & HF_INHIBIT_IRQ_MASK)
6094 {
6095 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
6096 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6097 emulate_single_instr(env);
6098 }
6099
6100 env = savedenv;
6101 return 0;
6102}
6103
6104/**
6105 * Correctly loads a new ldtr selector.
6106 *
6107 * @param env1 CPU environment.
6108 * @param selector Selector to load.
6109 */
6110void sync_ldtr(CPUX86State *env1, int selector)
6111{
6112 CPUX86State *saved_env = env;
6113 if (setjmp(env1->jmp_env) == 0)
6114 {
6115 env = env1;
6116 helper_lldt(selector);
6117 env = saved_env;
6118 }
6119 else
6120 {
6121 env = saved_env;
6122#ifdef VBOX_STRICT
6123 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
6124#endif
6125 }
6126}
6127
6128int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
6129 uint32_t *esp_ptr, int dpl)
6130{
6131 int type, index, shift;
6132
6133 CPUX86State *savedenv = env;
6134 env = env1;
6135
6136 if (!(env->tr.flags & DESC_P_MASK))
6137 cpu_abort(env, "invalid tss");
6138 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
6139 if ((type & 7) != 1)
6140 cpu_abort(env, "invalid tss type %d", type);
6141 shift = type >> 3;
6142 index = (dpl * 4 + 2) << shift;
6143 if (index + (4 << shift) - 1 > env->tr.limit)
6144 {
6145 env = savedenv;
6146 return 0;
6147 }
6148 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6149
6150 if (shift == 0) {
6151 *esp_ptr = lduw_kernel(env->tr.base + index);
6152 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6153 } else {
6154 *esp_ptr = ldl_kernel(env->tr.base + index);
6155 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6156 }
6157
6158 env = savedenv;
6159 return 1;
6160}
6161
6162//*****************************************************************************
6163// Needs to be at the bottom of the file (overriding macros)
6164
6165static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6166{
6167#ifdef USE_X86LDOUBLE
6168 CPU86_LDoubleU tmp;
6169 tmp.l.lower = *(uint64_t const *)ptr;
6170 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6171 return tmp.d;
6172#else
6173# error "Busted FPU saving/restoring!"
6174 return *(CPU86_LDouble *)ptr;
6175#endif
6176}
6177
6178static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6179{
6180#ifdef USE_X86LDOUBLE
6181 CPU86_LDoubleU tmp;
6182 tmp.d = f;
6183 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6184 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6185 *(uint16_t *)(ptr + 10) = 0;
6186 *(uint32_t *)(ptr + 12) = 0;
6187 AssertCompile(sizeof(long double) > 8);
6188#else
6189# error "Busted FPU saving/restoring!"
6190 *(CPU86_LDouble *)ptr = f;
6191#endif
6192}
6193
6194#undef stw
6195#undef stl
6196#undef stq
6197#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6198#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6199#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6200
6201//*****************************************************************************
6202void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6203{
6204 int fpus, fptag, i, nb_xmm_regs;
6205 CPU86_LDouble tmp;
6206 uint8_t *addr;
6207 int data64 = !!(env->hflags & HF_LMA_MASK);
6208
6209 if (env->cpuid_features & CPUID_FXSR)
6210 {
6211 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6212 fptag = 0;
6213 for(i = 0; i < 8; i++) {
6214 fptag |= (env->fptags[i] << i);
6215 }
6216 stw(ptr, env->fpuc);
6217 stw(ptr + 2, fpus);
6218 stw(ptr + 4, fptag ^ 0xff);
6219
6220 addr = ptr + 0x20;
6221 for(i = 0;i < 8; i++) {
6222 tmp = ST(i);
6223 helper_fstt_raw(tmp, addr);
6224 addr += 16;
6225 }
6226
6227 if (env->cr[4] & CR4_OSFXSR_MASK) {
6228 /* XXX: finish it */
6229 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6230 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6231 nb_xmm_regs = 8 << data64;
6232 addr = ptr + 0xa0;
6233 for(i = 0; i < nb_xmm_regs; i++) {
6234#if __GNUC__ < 4
6235 stq(addr, env->xmm_regs[i].XMM_Q(0));
6236 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6237#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6238 stl(addr, env->xmm_regs[i].XMM_L(0));
6239 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6240 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6241 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6242#endif
6243 addr += 16;
6244 }
6245 }
6246 }
6247 else
6248 {
6249 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6250 int fptag;
6251
6252 fp->FCW = env->fpuc;
6253 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6254 fptag = 0;
6255 for (i=7; i>=0; i--) {
6256 fptag <<= 2;
6257 if (env->fptags[i]) {
6258 fptag |= 3;
6259 } else {
6260 /* the FPU automatically computes it */
6261 }
6262 }
6263 fp->FTW = fptag;
6264
6265 for(i = 0;i < 8; i++) {
6266 tmp = ST(i);
6267 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6268 }
6269 }
6270}
6271
6272//*****************************************************************************
6273#undef lduw
6274#undef ldl
6275#undef ldq
6276#define lduw(a) *(uint16_t *)(a)
6277#define ldl(a) *(uint32_t *)(a)
6278#define ldq(a) *(uint64_t *)(a)
6279//*****************************************************************************
6280void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6281{
6282 int i, fpus, fptag, nb_xmm_regs;
6283 CPU86_LDouble tmp;
6284 uint8_t *addr;
6285 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6286
6287 if (env->cpuid_features & CPUID_FXSR)
6288 {
6289 env->fpuc = lduw(ptr);
6290 fpus = lduw(ptr + 2);
6291 fptag = lduw(ptr + 4);
6292 env->fpstt = (fpus >> 11) & 7;
6293 env->fpus = fpus & ~0x3800;
6294 fptag ^= 0xff;
6295 for(i = 0;i < 8; i++) {
6296 env->fptags[i] = ((fptag >> i) & 1);
6297 }
6298
6299 addr = ptr + 0x20;
6300 for(i = 0;i < 8; i++) {
6301 tmp = helper_fldt_raw(addr);
6302 ST(i) = tmp;
6303 addr += 16;
6304 }
6305
6306 if (env->cr[4] & CR4_OSFXSR_MASK) {
6307 /* XXX: finish it, endianness */
6308 env->mxcsr = ldl(ptr + 0x18);
6309 //ldl(ptr + 0x1c);
6310 nb_xmm_regs = 8 << data64;
6311 addr = ptr + 0xa0;
6312 for(i = 0; i < nb_xmm_regs; i++) {
6313#if HC_ARCH_BITS == 32
6314 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6315 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6316 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6317 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6318 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6319#else
6320 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6321 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6322#endif
6323 addr += 16;
6324 }
6325 }
6326 }
6327 else
6328 {
6329 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6330 int fptag, j;
6331
6332 env->fpuc = fp->FCW;
6333 env->fpstt = (fp->FSW >> 11) & 7;
6334 env->fpus = fp->FSW & ~0x3800;
6335 fptag = fp->FTW;
6336 for(i = 0;i < 8; i++) {
6337 env->fptags[i] = ((fptag & 3) == 3);
6338 fptag >>= 2;
6339 }
6340 j = env->fpstt;
6341 for(i = 0;i < 8; i++) {
6342 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6343 ST(i) = tmp;
6344 }
6345 }
6346}
6347//*****************************************************************************
6348//*****************************************************************************
6349
6350#endif /* VBOX */
6351
6352/* Secure Virtual Machine helpers */
6353
6354#if defined(CONFIG_USER_ONLY)
6355
6356void helper_vmrun(int aflag, int next_eip_addend)
6357{
6358}
6359void helper_vmmcall(void)
6360{
6361}
6362void helper_vmload(int aflag)
6363{
6364}
6365void helper_vmsave(int aflag)
6366{
6367}
6368void helper_stgi(void)
6369{
6370}
6371void helper_clgi(void)
6372{
6373}
6374void helper_skinit(void)
6375{
6376}
6377void helper_invlpga(int aflag)
6378{
6379}
6380void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6381{
6382}
6383void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6384{
6385}
6386
6387void helper_svm_check_io(uint32_t port, uint32_t param,
6388 uint32_t next_eip_addend)
6389{
6390}
6391#else
6392
6393static inline void svm_save_seg(target_phys_addr_t addr,
6394 const SegmentCache *sc)
6395{
6396 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6397 sc->selector);
6398 stq_phys(addr + offsetof(struct vmcb_seg, base),
6399 sc->base);
6400 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6401 sc->limit);
6402 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6403 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6404}
6405
6406static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6407{
6408 unsigned int flags;
6409
6410 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6411 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6412 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6413 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6414 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6415}
6416
6417static inline void svm_load_seg_cache(target_phys_addr_t addr,
6418 CPUState *env, int seg_reg)
6419{
6420 SegmentCache sc1, *sc = &sc1;
6421 svm_load_seg(addr, sc);
6422 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6423 sc->base, sc->limit, sc->flags);
6424}
6425
6426void helper_vmrun(int aflag, int next_eip_addend)
6427{
6428 target_ulong addr;
6429 uint32_t event_inj;
6430 uint32_t int_ctl;
6431
6432 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6433
6434 if (aflag == 2)
6435 addr = EAX;
6436 else
6437 addr = (uint32_t)EAX;
6438
6439 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6440
6441 env->vm_vmcb = addr;
6442
6443 /* save the current CPU state in the hsave page */
6444 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6445 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6446
6447 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6448 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6449
6450 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6451 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6452 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6453 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6454 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6455 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6456
6457 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6458 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6459
6460 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6461 &env->segs[R_ES]);
6462 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6463 &env->segs[R_CS]);
6464 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6465 &env->segs[R_SS]);
6466 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6467 &env->segs[R_DS]);
6468
6469 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6470 EIP + next_eip_addend);
6471 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6472 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6473
6474 /* load the interception bitmaps so we do not need to access the
6475 vmcb in svm mode */
6476 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6477 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6478 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6479 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6480 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6481 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6482
6483 /* enable intercepts */
6484 env->hflags |= HF_SVMI_MASK;
6485
6486 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6487
6488 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6489 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6490
6491 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6492 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6493
6494 /* clear exit_info_2 so we behave like the real hardware */
6495 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6496
6497 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6498 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6499 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6500 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6501 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6502 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6503 if (int_ctl & V_INTR_MASKING_MASK) {
6504 env->v_tpr = int_ctl & V_TPR_MASK;
6505 env->hflags2 |= HF2_VINTR_MASK;
6506 if (env->eflags & IF_MASK)
6507 env->hflags2 |= HF2_HIF_MASK;
6508 }
6509
6510 cpu_load_efer(env,
6511 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6512 env->eflags = 0;
6513 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6514 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6515 CC_OP = CC_OP_EFLAGS;
6516
6517 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6518 env, R_ES);
6519 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6520 env, R_CS);
6521 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6522 env, R_SS);
6523 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6524 env, R_DS);
6525
6526 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6527 env->eip = EIP;
6528 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6529 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6530 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6531 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6532 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6533
6534 /* FIXME: guest state consistency checks */
6535
6536 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6537 case TLB_CONTROL_DO_NOTHING:
6538 break;
6539 case TLB_CONTROL_FLUSH_ALL_ASID:
6540 /* FIXME: this is not 100% correct but should work for now */
6541 tlb_flush(env, 1);
6542 break;
6543 }
6544
6545 env->hflags2 |= HF2_GIF_MASK;
6546
6547 if (int_ctl & V_IRQ_MASK) {
6548 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6549 }
6550
6551 /* maybe we need to inject an event */
6552 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6553 if (event_inj & SVM_EVTINJ_VALID) {
6554 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6555 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6556 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6557
6558 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6559 /* FIXME: need to implement valid_err */
6560 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6561 case SVM_EVTINJ_TYPE_INTR:
6562 env->exception_index = vector;
6563 env->error_code = event_inj_err;
6564 env->exception_is_int = 0;
6565 env->exception_next_eip = -1;
6566 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6567 /* XXX: is it always correct ? */
6568 do_interrupt(vector, 0, 0, 0, 1);
6569 break;
6570 case SVM_EVTINJ_TYPE_NMI:
6571 env->exception_index = EXCP02_NMI;
6572 env->error_code = event_inj_err;
6573 env->exception_is_int = 0;
6574 env->exception_next_eip = EIP;
6575 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6576 cpu_loop_exit();
6577 break;
6578 case SVM_EVTINJ_TYPE_EXEPT:
6579 env->exception_index = vector;
6580 env->error_code = event_inj_err;
6581 env->exception_is_int = 0;
6582 env->exception_next_eip = -1;
6583 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6584 cpu_loop_exit();
6585 break;
6586 case SVM_EVTINJ_TYPE_SOFT:
6587 env->exception_index = vector;
6588 env->error_code = event_inj_err;
6589 env->exception_is_int = 1;
6590 env->exception_next_eip = EIP;
6591 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6592 cpu_loop_exit();
6593 break;
6594 }
6595 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6596 }
6597}
6598
6599void helper_vmmcall(void)
6600{
6601 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6602 raise_exception(EXCP06_ILLOP);
6603}
6604
6605void helper_vmload(int aflag)
6606{
6607 target_ulong addr;
6608 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6609
6610 if (aflag == 2)
6611 addr = EAX;
6612 else
6613 addr = (uint32_t)EAX;
6614
6615 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6616 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6617 env->segs[R_FS].base);
6618
6619 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6620 env, R_FS);
6621 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6622 env, R_GS);
6623 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6624 &env->tr);
6625 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6626 &env->ldt);
6627
6628#ifdef TARGET_X86_64
6629 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6630 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6631 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6632 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6633#endif
6634 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6635 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6636 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6637 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6638}
6639
6640void helper_vmsave(int aflag)
6641{
6642 target_ulong addr;
6643 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6644
6645 if (aflag == 2)
6646 addr = EAX;
6647 else
6648 addr = (uint32_t)EAX;
6649
6650 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6651 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6652 env->segs[R_FS].base);
6653
6654 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6655 &env->segs[R_FS]);
6656 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6657 &env->segs[R_GS]);
6658 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6659 &env->tr);
6660 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6661 &env->ldt);
6662
6663#ifdef TARGET_X86_64
6664 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6665 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6666 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6667 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6668#endif
6669 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6670 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6671 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6672 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6673}
6674
6675void helper_stgi(void)
6676{
6677 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6678 env->hflags2 |= HF2_GIF_MASK;
6679}
6680
6681void helper_clgi(void)
6682{
6683 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6684 env->hflags2 &= ~HF2_GIF_MASK;
6685}
6686
6687void helper_skinit(void)
6688{
6689 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6690 /* XXX: not implemented */
6691 raise_exception(EXCP06_ILLOP);
6692}
6693
6694void helper_invlpga(int aflag)
6695{
6696 target_ulong addr;
6697 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6698
6699 if (aflag == 2)
6700 addr = EAX;
6701 else
6702 addr = (uint32_t)EAX;
6703
6704 /* XXX: could use the ASID to see if it is needed to do the
6705 flush */
6706 tlb_flush_page(env, addr);
6707}
6708
6709void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6710{
6711 if (likely(!(env->hflags & HF_SVMI_MASK)))
6712 return;
6713#ifndef VBOX
6714 switch(type) {
6715 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6716 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6717 helper_vmexit(type, param);
6718 }
6719 break;
6720 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6721 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6722 helper_vmexit(type, param);
6723 }
6724 break;
6725 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6726 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6727 helper_vmexit(type, param);
6728 }
6729 break;
6730 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6731 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6732 helper_vmexit(type, param);
6733 }
6734 break;
6735 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6736 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6737 helper_vmexit(type, param);
6738 }
6739 break;
6740 case SVM_EXIT_MSR:
6741 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6742 /* FIXME: this should be read in at vmrun (faster this way?) */
6743 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6744 uint32_t t0, t1;
6745 switch((uint32_t)ECX) {
6746 case 0 ... 0x1fff:
6747 t0 = (ECX * 2) % 8;
6748 t1 = ECX / 8;
6749 break;
6750 case 0xc0000000 ... 0xc0001fff:
6751 t0 = (8192 + ECX - 0xc0000000) * 2;
6752 t1 = (t0 / 8);
6753 t0 %= 8;
6754 break;
6755 case 0xc0010000 ... 0xc0011fff:
6756 t0 = (16384 + ECX - 0xc0010000) * 2;
6757 t1 = (t0 / 8);
6758 t0 %= 8;
6759 break;
6760 default:
6761 helper_vmexit(type, param);
6762 t0 = 0;
6763 t1 = 0;
6764 break;
6765 }
6766 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6767 helper_vmexit(type, param);
6768 }
6769 break;
6770 default:
6771 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6772 helper_vmexit(type, param);
6773 }
6774 break;
6775 }
6776#else /* VBOX */
6777 AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
6778#endif /* VBOX */
6779}
6780
6781void helper_svm_check_io(uint32_t port, uint32_t param,
6782 uint32_t next_eip_addend)
6783{
6784 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6785 /* FIXME: this should be read in at vmrun (faster this way?) */
6786 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6787 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6788 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6789 /* next EIP */
6790 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6791 env->eip + next_eip_addend);
6792 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6793 }
6794 }
6795}
6796
6797/* Note: currently only 32 bits of exit_code are used */
6798void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6799{
6800 uint32_t int_ctl;
6801
6802 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6803 exit_code, exit_info_1,
6804 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6805 EIP);
6806
6807 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6808 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6809 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6810 } else {
6811 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6812 }
6813
6814 /* Save the VM state in the vmcb */
6815 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6816 &env->segs[R_ES]);
6817 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6818 &env->segs[R_CS]);
6819 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6820 &env->segs[R_SS]);
6821 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6822 &env->segs[R_DS]);
6823
6824 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6825 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6826
6827 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6828 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6829
6830 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6831 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6832 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6833 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6834 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6835
6836 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6837 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6838 int_ctl |= env->v_tpr & V_TPR_MASK;
6839 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6840 int_ctl |= V_IRQ_MASK;
6841 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6842
6843 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6844 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6845 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6846 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6847 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6848 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6849 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6850
6851 /* Reload the host state from vm_hsave */
6852 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6853 env->hflags &= ~HF_SVMI_MASK;
6854 env->intercept = 0;
6855 env->intercept_exceptions = 0;
6856 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6857 env->tsc_offset = 0;
6858
6859 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6860 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6861
6862 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6863 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6864
6865 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6866 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6867 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6868 /* we need to set the efer after the crs so the hidden flags get
6869 set properly */
6870 cpu_load_efer(env,
6871 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6872 env->eflags = 0;
6873 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6874 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6875 CC_OP = CC_OP_EFLAGS;
6876
6877 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6878 env, R_ES);
6879 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6880 env, R_CS);
6881 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6882 env, R_SS);
6883 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6884 env, R_DS);
6885
6886 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6887 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6888 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6889
6890 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6891 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6892
6893 /* other setups */
6894 cpu_x86_set_cpl(env, 0);
6895 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6896 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6897
6898 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6899 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6900 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6901 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6902 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6903
6904 env->hflags2 &= ~HF2_GIF_MASK;
6905 /* FIXME: Resets the current ASID register to zero (host ASID). */
6906
6907 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6908
6909 /* Clears the TSC_OFFSET inside the processor. */
6910
6911 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6912 from the page table indicated the host's CR3. If the PDPEs contain
6913 illegal state, the processor causes a shutdown. */
6914
6915 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6916 env->cr[0] |= CR0_PE_MASK;
6917 env->eflags &= ~VM_MASK;
6918
6919 /* Disables all breakpoints in the host DR7 register. */
6920
6921 /* Checks the reloaded host state for consistency. */
6922
6923 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6924 host's code segment or non-canonical (in the case of long mode), a
6925 #GP fault is delivered inside the host.) */
6926
6927 /* remove any pending exception */
6928 env->exception_index = -1;
6929 env->error_code = 0;
6930 env->old_exception = -1;
6931
6932 cpu_loop_exit();
6933}
6934
6935#endif
6936
6937/* MMX/SSE */
6938/* XXX: optimize by storing fptt and fptags in the static cpu state */
6939void helper_enter_mmx(void)
6940{
6941 env->fpstt = 0;
6942 *(uint32_t *)(env->fptags) = 0;
6943 *(uint32_t *)(env->fptags + 4) = 0;
6944}
6945
6946void helper_emms(void)
6947{
6948 /* set to empty state */
6949 *(uint32_t *)(env->fptags) = 0x01010101;
6950 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6951}
6952
6953/* XXX: suppress */
6954void helper_movq(void *d, void *s)
6955{
6956 *(uint64_t *)d = *(uint64_t *)s;
6957}
6958
6959#define SHIFT 0
6960#include "ops_sse.h"
6961
6962#define SHIFT 1
6963#include "ops_sse.h"
6964
6965#define SHIFT 0
6966#include "helper_template.h"
6967#undef SHIFT
6968
6969#define SHIFT 1
6970#include "helper_template.h"
6971#undef SHIFT
6972
6973#define SHIFT 2
6974#include "helper_template.h"
6975#undef SHIFT
6976
6977#ifdef TARGET_X86_64
6978
6979#define SHIFT 3
6980#include "helper_template.h"
6981#undef SHIFT
6982
6983#endif
6984
6985/* bit operations */
6986target_ulong helper_bsf(target_ulong t0)
6987{
6988 int count;
6989 target_ulong res;
6990
6991 res = t0;
6992 count = 0;
6993 while ((res & 1) == 0) {
6994 count++;
6995 res >>= 1;
6996 }
6997 return count;
6998}
6999
7000target_ulong helper_lzcnt(target_ulong t0, int wordsize)
7001{
7002 int count;
7003 target_ulong res, mask;
7004
7005 if (wordsize > 0 && t0 == 0) {
7006 return wordsize;
7007 }
7008 res = t0;
7009 count = TARGET_LONG_BITS - 1;
7010 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
7011 while ((res & mask) == 0) {
7012 count--;
7013 res <<= 1;
7014 }
7015 if (wordsize > 0) {
7016 return wordsize - 1 - count;
7017 }
7018 return count;
7019}
7020
7021target_ulong helper_bsr(target_ulong t0)
7022{
7023 return helper_lzcnt(t0, 0);
7024}
7025
7026static int compute_all_eflags(void)
7027{
7028 return CC_SRC;
7029}
7030
7031static int compute_c_eflags(void)
7032{
7033 return CC_SRC & CC_C;
7034}
7035
7036uint32_t helper_cc_compute_all(int op)
7037{
7038 switch (op) {
7039 default: /* should never happen */ return 0;
7040
7041 case CC_OP_EFLAGS: return compute_all_eflags();
7042
7043 case CC_OP_MULB: return compute_all_mulb();
7044 case CC_OP_MULW: return compute_all_mulw();
7045 case CC_OP_MULL: return compute_all_mull();
7046
7047 case CC_OP_ADDB: return compute_all_addb();
7048 case CC_OP_ADDW: return compute_all_addw();
7049 case CC_OP_ADDL: return compute_all_addl();
7050
7051 case CC_OP_ADCB: return compute_all_adcb();
7052 case CC_OP_ADCW: return compute_all_adcw();
7053 case CC_OP_ADCL: return compute_all_adcl();
7054
7055 case CC_OP_SUBB: return compute_all_subb();
7056 case CC_OP_SUBW: return compute_all_subw();
7057 case CC_OP_SUBL: return compute_all_subl();
7058
7059 case CC_OP_SBBB: return compute_all_sbbb();
7060 case CC_OP_SBBW: return compute_all_sbbw();
7061 case CC_OP_SBBL: return compute_all_sbbl();
7062
7063 case CC_OP_LOGICB: return compute_all_logicb();
7064 case CC_OP_LOGICW: return compute_all_logicw();
7065 case CC_OP_LOGICL: return compute_all_logicl();
7066
7067 case CC_OP_INCB: return compute_all_incb();
7068 case CC_OP_INCW: return compute_all_incw();
7069 case CC_OP_INCL: return compute_all_incl();
7070
7071 case CC_OP_DECB: return compute_all_decb();
7072 case CC_OP_DECW: return compute_all_decw();
7073 case CC_OP_DECL: return compute_all_decl();
7074
7075 case CC_OP_SHLB: return compute_all_shlb();
7076 case CC_OP_SHLW: return compute_all_shlw();
7077 case CC_OP_SHLL: return compute_all_shll();
7078
7079 case CC_OP_SARB: return compute_all_sarb();
7080 case CC_OP_SARW: return compute_all_sarw();
7081 case CC_OP_SARL: return compute_all_sarl();
7082
7083#ifdef TARGET_X86_64
7084 case CC_OP_MULQ: return compute_all_mulq();
7085
7086 case CC_OP_ADDQ: return compute_all_addq();
7087
7088 case CC_OP_ADCQ: return compute_all_adcq();
7089
7090 case CC_OP_SUBQ: return compute_all_subq();
7091
7092 case CC_OP_SBBQ: return compute_all_sbbq();
7093
7094 case CC_OP_LOGICQ: return compute_all_logicq();
7095
7096 case CC_OP_INCQ: return compute_all_incq();
7097
7098 case CC_OP_DECQ: return compute_all_decq();
7099
7100 case CC_OP_SHLQ: return compute_all_shlq();
7101
7102 case CC_OP_SARQ: return compute_all_sarq();
7103#endif
7104 }
7105}
7106
7107uint32_t helper_cc_compute_c(int op)
7108{
7109 switch (op) {
7110 default: /* should never happen */ return 0;
7111
7112 case CC_OP_EFLAGS: return compute_c_eflags();
7113
7114 case CC_OP_MULB: return compute_c_mull();
7115 case CC_OP_MULW: return compute_c_mull();
7116 case CC_OP_MULL: return compute_c_mull();
7117
7118 case CC_OP_ADDB: return compute_c_addb();
7119 case CC_OP_ADDW: return compute_c_addw();
7120 case CC_OP_ADDL: return compute_c_addl();
7121
7122 case CC_OP_ADCB: return compute_c_adcb();
7123 case CC_OP_ADCW: return compute_c_adcw();
7124 case CC_OP_ADCL: return compute_c_adcl();
7125
7126 case CC_OP_SUBB: return compute_c_subb();
7127 case CC_OP_SUBW: return compute_c_subw();
7128 case CC_OP_SUBL: return compute_c_subl();
7129
7130 case CC_OP_SBBB: return compute_c_sbbb();
7131 case CC_OP_SBBW: return compute_c_sbbw();
7132 case CC_OP_SBBL: return compute_c_sbbl();
7133
7134 case CC_OP_LOGICB: return compute_c_logicb();
7135 case CC_OP_LOGICW: return compute_c_logicw();
7136 case CC_OP_LOGICL: return compute_c_logicl();
7137
7138 case CC_OP_INCB: return compute_c_incl();
7139 case CC_OP_INCW: return compute_c_incl();
7140 case CC_OP_INCL: return compute_c_incl();
7141
7142 case CC_OP_DECB: return compute_c_incl();
7143 case CC_OP_DECW: return compute_c_incl();
7144 case CC_OP_DECL: return compute_c_incl();
7145
7146 case CC_OP_SHLB: return compute_c_shlb();
7147 case CC_OP_SHLW: return compute_c_shlw();
7148 case CC_OP_SHLL: return compute_c_shll();
7149
7150 case CC_OP_SARB: return compute_c_sarl();
7151 case CC_OP_SARW: return compute_c_sarl();
7152 case CC_OP_SARL: return compute_c_sarl();
7153
7154#ifdef TARGET_X86_64
7155 case CC_OP_MULQ: return compute_c_mull();
7156
7157 case CC_OP_ADDQ: return compute_c_addq();
7158
7159 case CC_OP_ADCQ: return compute_c_adcq();
7160
7161 case CC_OP_SUBQ: return compute_c_subq();
7162
7163 case CC_OP_SBBQ: return compute_c_sbbq();
7164
7165 case CC_OP_LOGICQ: return compute_c_logicq();
7166
7167 case CC_OP_INCQ: return compute_c_incl();
7168
7169 case CC_OP_DECQ: return compute_c_incl();
7170
7171 case CC_OP_SHLQ: return compute_c_shlq();
7172
7173 case CC_OP_SARQ: return compute_c_sarl();
7174#endif
7175 }
7176}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette