VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 46178

Last change on this file since 46178 was 45494, checked in by vboxsync, 12 years ago

op_helper.c: Removed debug statement that was accidentially committed.

  • Property svn:eol-style set to native
File size: 197.6 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205/**
206 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
207 * returns the updated e2.
208 *
209 * @returns e2 with A set.
210 * @param e2 The 2nd selector DWORD.
211 */
212static uint32_t set_segment_accessed(int selector, uint32_t e2)
213{
214 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
215 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
216
217 e2 |= DESC_A_MASK;
218 stl_kernel(ptr + 4, e2);
219 return e2;
220}
221
222#endif /* VBOX */
223
224/* return non zero if error */
225static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
226 int selector)
227{
228 SegmentCache *dt;
229 int index;
230 target_ulong ptr;
231
232#ifdef VBOX
233 /* Trying to load a selector with CPL=1? */
234 /** @todo this is a hack to correct the incorrect checking order for pending interrupts in the patm iret replacement code (corrected in the ring-1 version) */
235 /** @todo in theory the iret could fault and we'd still need this. */
236 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0) && !EMIsRawRing1Enabled(env->pVM))
237 {
238 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
239 selector = selector & 0xfffc;
240 }
241#endif /* VBOX */
242
243 if (selector & 0x4)
244 dt = &env->ldt;
245 else
246 dt = &env->gdt;
247 index = selector & ~7;
248 if ((index + 7) > dt->limit)
249 return -1;
250 ptr = dt->base + index;
251 *e1_ptr = ldl_kernel(ptr);
252 *e2_ptr = ldl_kernel(ptr + 4);
253 return 0;
254}
255
256static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
257{
258 unsigned int limit;
259 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
260 if (e2 & DESC_G_MASK)
261 limit = (limit << 12) | 0xfff;
262 return limit;
263}
264
265static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
266{
267 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
268}
269
270static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
271{
272 sc->base = get_seg_base(e1, e2);
273 sc->limit = get_seg_limit(e1, e2);
274 sc->flags = e2;
275#ifdef VBOX
276 sc->newselector = 0;
277 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
278#endif
279}
280
281/* init the segment cache in vm86 mode. */
282static inline void load_seg_vm(int seg, int selector)
283{
284 selector &= 0xffff;
285#ifdef VBOX
286 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
287 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
288 flags |= (3 << DESC_DPL_SHIFT);
289
290 cpu_x86_load_seg_cache(env, seg, selector,
291 (selector << 4), 0xffff, flags);
292#else /* VBOX */
293 cpu_x86_load_seg_cache(env, seg, selector,
294 (selector << 4), 0xffff, 0);
295#endif /* VBOX */
296}
297
298static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
299 uint32_t *esp_ptr, int dpl)
300{
301#ifndef VBOX
302 int type, index, shift;
303#else
304 unsigned int type, index, shift;
305#endif
306
307#if 0
308 {
309 int i;
310 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
311 for(i=0;i<env->tr.limit;i++) {
312 printf("%02x ", env->tr.base[i]);
313 if ((i & 7) == 7) printf("\n");
314 }
315 printf("\n");
316 }
317#endif
318
319 if (!(env->tr.flags & DESC_P_MASK))
320 cpu_abort(env, "invalid tss");
321 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
322 if ((type & 7) != 1)
323 cpu_abort(env, "invalid tss type");
324 shift = type >> 3;
325 index = (dpl * 4 + 2) << shift;
326 if (index + (4 << shift) - 1 > env->tr.limit)
327 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
328 if (shift == 0) {
329 *esp_ptr = lduw_kernel(env->tr.base + index);
330 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
331 } else {
332 *esp_ptr = ldl_kernel(env->tr.base + index);
333 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
334 }
335}
336
337/* XXX: merge with load_seg() */
338static void tss_load_seg(int seg_reg, int selector)
339{
340 uint32_t e1, e2;
341 int rpl, dpl, cpl;
342
343#ifdef VBOX
344 e1 = e2 = 0; /* gcc warning? */
345 cpl = env->hflags & HF_CPL_MASK;
346 /* Trying to load a selector with CPL=1? */
347 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
348 {
349 Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc));
350 selector = selector & 0xfffc;
351 }
352#endif /* VBOX */
353
354 if ((selector & 0xfffc) != 0) {
355 if (load_segment(&e1, &e2, selector) != 0)
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 if (!(e2 & DESC_S_MASK))
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 rpl = selector & 3;
360 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
361 cpl = env->hflags & HF_CPL_MASK;
362 if (seg_reg == R_CS) {
363 if (!(e2 & DESC_CS_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 /* XXX: is it correct ? */
366 if (dpl != rpl)
367 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
368 if ((e2 & DESC_C_MASK) && dpl > rpl)
369 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
370 } else if (seg_reg == R_SS) {
371 /* SS must be writable data */
372 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
373 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
374 if (dpl != cpl || dpl != rpl)
375 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
376 } else {
377 /* not readable code */
378 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
379 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
380 /* if data or non conforming code, checks the rights */
381 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
382 if (dpl < cpl || dpl < rpl)
383 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
384 }
385 }
386 if (!(e2 & DESC_P_MASK))
387 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
388 cpu_x86_load_seg_cache(env, seg_reg, selector,
389 get_seg_base(e1, e2),
390 get_seg_limit(e1, e2),
391 e2);
392 } else {
393 if (seg_reg == R_SS || seg_reg == R_CS)
394 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
395#ifdef VBOX
396# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
397 cpu_x86_load_seg_cache(env, seg_reg, selector,
398 0, 0, 0);
399# endif
400#endif /* VBOX */
401 }
402}
403
404#define SWITCH_TSS_JMP 0
405#define SWITCH_TSS_IRET 1
406#define SWITCH_TSS_CALL 2
407
408/* XXX: restore CPU state in registers (PowerPC case) */
409static void switch_tss(int tss_selector,
410 uint32_t e1, uint32_t e2, int source,
411 uint32_t next_eip)
412{
413 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
414 target_ulong tss_base;
415 uint32_t new_regs[8], new_segs[6];
416 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
417 uint32_t old_eflags, eflags_mask;
418 SegmentCache *dt;
419#ifndef VBOX
420 int index;
421#else
422 unsigned int index;
423#endif
424 target_ulong ptr;
425
426 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
427 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
428
429 /* if task gate, we read the TSS segment and we load it */
430 if (type == 5) {
431 if (!(e2 & DESC_P_MASK))
432 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
433 tss_selector = e1 >> 16;
434 if (tss_selector & 4)
435 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
436 if (load_segment(&e1, &e2, tss_selector) != 0)
437 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
438 if (e2 & DESC_S_MASK)
439 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
440 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
441 if ((type & 7) != 1)
442 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
443 }
444
445 if (!(e2 & DESC_P_MASK))
446 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
447
448 if (type & 8)
449 tss_limit_max = 103;
450 else
451 tss_limit_max = 43;
452 tss_limit = get_seg_limit(e1, e2);
453 tss_base = get_seg_base(e1, e2);
454 if ((tss_selector & 4) != 0 ||
455 tss_limit < tss_limit_max)
456 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
457 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
458 if (old_type & 8)
459 old_tss_limit_max = 103;
460 else
461 old_tss_limit_max = 43;
462
463 /* read all the registers from the new TSS */
464 if (type & 8) {
465 /* 32 bit */
466 new_cr3 = ldl_kernel(tss_base + 0x1c);
467 new_eip = ldl_kernel(tss_base + 0x20);
468 new_eflags = ldl_kernel(tss_base + 0x24);
469 for(i = 0; i < 8; i++)
470 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
471 for(i = 0; i < 6; i++)
472 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
473 new_ldt = lduw_kernel(tss_base + 0x60);
474 new_trap = ldl_kernel(tss_base + 0x64);
475 } else {
476 /* 16 bit */
477 new_cr3 = 0;
478 new_eip = lduw_kernel(tss_base + 0x0e);
479 new_eflags = lduw_kernel(tss_base + 0x10);
480 for(i = 0; i < 8; i++)
481 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
482 for(i = 0; i < 4; i++)
483 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
484 new_ldt = lduw_kernel(tss_base + 0x2a);
485 new_segs[R_FS] = 0;
486 new_segs[R_GS] = 0;
487 new_trap = 0;
488 }
489
490 /* NOTE: we must avoid memory exceptions during the task switch,
491 so we make dummy accesses before */
492 /* XXX: it can still fail in some cases, so a bigger hack is
493 necessary to valid the TLB after having done the accesses */
494
495 v1 = ldub_kernel(env->tr.base);
496 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
497 stb_kernel(env->tr.base, v1);
498 stb_kernel(env->tr.base + old_tss_limit_max, v2);
499
500 /* clear busy bit (it is restartable) */
501 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
502 target_ulong ptr;
503 uint32_t e2;
504 ptr = env->gdt.base + (env->tr.selector & ~7);
505 e2 = ldl_kernel(ptr + 4);
506 e2 &= ~DESC_TSS_BUSY_MASK;
507 stl_kernel(ptr + 4, e2);
508 }
509 old_eflags = compute_eflags();
510 if (source == SWITCH_TSS_IRET)
511 old_eflags &= ~NT_MASK;
512
513 /* save the current state in the old TSS */
514 if (type & 8) {
515 /* 32 bit */
516 stl_kernel(env->tr.base + 0x20, next_eip);
517 stl_kernel(env->tr.base + 0x24, old_eflags);
518 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
519 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
520 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
521 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
522 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
523 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
524 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
525 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
526 for(i = 0; i < 6; i++)
527 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
528#ifdef VBOX
529 /* Must store the ldt as it gets reloaded and might have been changed. */
530 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
531#endif
532#if defined(VBOX) && defined(DEBUG)
533 printf("TSS 32 bits switch\n");
534 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
535#endif
536 } else {
537 /* 16 bit */
538 stw_kernel(env->tr.base + 0x0e, next_eip);
539 stw_kernel(env->tr.base + 0x10, old_eflags);
540 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
541 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
542 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
543 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
544 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
545 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
546 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
547 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
548 for(i = 0; i < 4; i++)
549 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
550#ifdef VBOX
551 /* Must store the ldt as it gets reloaded and might have been changed. */
552 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
553#endif
554 }
555
556 /* now if an exception occurs, it will occurs in the next task
557 context */
558
559 if (source == SWITCH_TSS_CALL) {
560 stw_kernel(tss_base, env->tr.selector);
561 new_eflags |= NT_MASK;
562 }
563
564 /* set busy bit */
565 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
566 target_ulong ptr;
567 uint32_t e2;
568 ptr = env->gdt.base + (tss_selector & ~7);
569 e2 = ldl_kernel(ptr + 4);
570 e2 |= DESC_TSS_BUSY_MASK;
571 stl_kernel(ptr + 4, e2);
572 }
573
574 /* set the new CPU state */
575 /* from this point, any exception which occurs can give problems */
576 env->cr[0] |= CR0_TS_MASK;
577 env->hflags |= HF_TS_MASK;
578 env->tr.selector = tss_selector;
579 env->tr.base = tss_base;
580 env->tr.limit = tss_limit;
581 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
582#ifdef VBOX
583 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
584 env->tr.newselector = 0;
585#endif
586
587 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
588 cpu_x86_update_cr3(env, new_cr3);
589 }
590
591 /* load all registers without an exception, then reload them with
592 possible exception */
593 env->eip = new_eip;
594 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
595 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
596 if (!(type & 8))
597 eflags_mask &= 0xffff;
598 load_eflags(new_eflags, eflags_mask);
599 /* XXX: what to do in 16 bit case ? */
600 EAX = new_regs[0];
601 ECX = new_regs[1];
602 EDX = new_regs[2];
603 EBX = new_regs[3];
604 ESP = new_regs[4];
605 EBP = new_regs[5];
606 ESI = new_regs[6];
607 EDI = new_regs[7];
608 if (new_eflags & VM_MASK) {
609 for(i = 0; i < 6; i++)
610 load_seg_vm(i, new_segs[i]);
611 /* in vm86, CPL is always 3 */
612 cpu_x86_set_cpl(env, 3);
613 } else {
614 /* CPL is set the RPL of CS */
615 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
616 /* first just selectors as the rest may trigger exceptions */
617 for(i = 0; i < 6; i++)
618 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
619 }
620
621 env->ldt.selector = new_ldt & ~4;
622 env->ldt.base = 0;
623 env->ldt.limit = 0;
624 env->ldt.flags = 0;
625#ifdef VBOX
626 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
627 env->ldt.newselector = 0;
628#endif
629
630 /* load the LDT */
631 if (new_ldt & 4)
632 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
633
634 if ((new_ldt & 0xfffc) != 0) {
635 dt = &env->gdt;
636 index = new_ldt & ~7;
637 if ((index + 7) > dt->limit)
638 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
639 ptr = dt->base + index;
640 e1 = ldl_kernel(ptr);
641 e2 = ldl_kernel(ptr + 4);
642 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
643 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
644 if (!(e2 & DESC_P_MASK))
645 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
646 load_seg_cache_raw_dt(&env->ldt, e1, e2);
647 }
648
649 /* load the segments */
650 if (!(new_eflags & VM_MASK)) {
651 tss_load_seg(R_CS, new_segs[R_CS]);
652 tss_load_seg(R_SS, new_segs[R_SS]);
653 tss_load_seg(R_ES, new_segs[R_ES]);
654 tss_load_seg(R_DS, new_segs[R_DS]);
655 tss_load_seg(R_FS, new_segs[R_FS]);
656 tss_load_seg(R_GS, new_segs[R_GS]);
657 }
658
659 /* check that EIP is in the CS segment limits */
660 if (new_eip > env->segs[R_CS].limit) {
661 /* XXX: different exception if CALL ? */
662 raise_exception_err(EXCP0D_GPF, 0);
663 }
664
665#ifndef CONFIG_USER_ONLY
666 /* reset local breakpoints */
667 if (env->dr[7] & 0x55) {
668 for (i = 0; i < 4; i++) {
669 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
670 hw_breakpoint_remove(env, i);
671 }
672 env->dr[7] &= ~0x55;
673 }
674#endif
675}
676
677/* check if Port I/O is allowed in TSS */
678static inline void check_io(int addr, int size)
679{
680#ifndef VBOX
681 int io_offset, val, mask;
682#else
683 int val, mask;
684 unsigned int io_offset;
685#endif /* VBOX */
686
687 /* TSS must be a valid 32 bit one */
688 if (!(env->tr.flags & DESC_P_MASK) ||
689 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
690 env->tr.limit < 103)
691 goto fail;
692 io_offset = lduw_kernel(env->tr.base + 0x66);
693 io_offset += (addr >> 3);
694 /* Note: the check needs two bytes */
695 if ((io_offset + 1) > env->tr.limit)
696 goto fail;
697 val = lduw_kernel(env->tr.base + io_offset);
698 val >>= (addr & 7);
699 mask = (1 << size) - 1;
700 /* all bits must be zero to allow the I/O */
701 if ((val & mask) != 0) {
702 fail:
703 raise_exception_err(EXCP0D_GPF, 0);
704 }
705}
706
707#ifdef VBOX
708
709/* Keep in sync with gen_check_external_event() */
710void helper_check_external_event()
711{
712 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
713 | CPU_INTERRUPT_EXTERNAL_EXIT
714 | CPU_INTERRUPT_EXTERNAL_TIMER
715 | CPU_INTERRUPT_EXTERNAL_DMA))
716 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
717 && (env->eflags & IF_MASK)
718 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
719 {
720 helper_external_event();
721 }
722
723}
724
725void helper_sync_seg(uint32_t reg)
726{
727 if (env->segs[reg].newselector)
728 sync_seg(env, reg, env->segs[reg].newselector);
729}
730
731#endif /* VBOX */
732
733void helper_check_iob(uint32_t t0)
734{
735 check_io(t0, 1);
736}
737
738void helper_check_iow(uint32_t t0)
739{
740 check_io(t0, 2);
741}
742
743void helper_check_iol(uint32_t t0)
744{
745 check_io(t0, 4);
746}
747
748void helper_outb(uint32_t port, uint32_t data)
749{
750#ifndef VBOX
751 cpu_outb(port, data & 0xff);
752#else
753 cpu_outb(env, port, data & 0xff);
754#endif
755}
756
757target_ulong helper_inb(uint32_t port)
758{
759#ifndef VBOX
760 return cpu_inb(port);
761#else
762 return cpu_inb(env, port);
763#endif
764}
765
766void helper_outw(uint32_t port, uint32_t data)
767{
768#ifndef VBOX
769 cpu_outw(port, data & 0xffff);
770#else
771 cpu_outw(env, port, data & 0xffff);
772#endif
773}
774
775target_ulong helper_inw(uint32_t port)
776{
777#ifndef VBOX
778 return cpu_inw(port);
779#else
780 return cpu_inw(env, port);
781#endif
782}
783
784void helper_outl(uint32_t port, uint32_t data)
785{
786#ifndef VBOX
787 cpu_outl(port, data);
788#else
789 cpu_outl(env, port, data);
790#endif
791}
792
793target_ulong helper_inl(uint32_t port)
794{
795#ifndef VBOX
796 return cpu_inl(port);
797#else
798 return cpu_inl(env, port);
799#endif
800}
801
802static inline unsigned int get_sp_mask(unsigned int e2)
803{
804 if (e2 & DESC_B_MASK)
805 return 0xffffffff;
806 else
807 return 0xffff;
808}
809
810static int exeption_has_error_code(int intno)
811{
812 switch(intno) {
813 case 8:
814 case 10:
815 case 11:
816 case 12:
817 case 13:
818 case 14:
819 case 17:
820 return 1;
821 }
822 return 0;
823}
824
825#ifdef TARGET_X86_64
826#define SET_ESP(val, sp_mask)\
827do {\
828 if ((sp_mask) == 0xffff)\
829 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
830 else if ((sp_mask) == 0xffffffffLL)\
831 ESP = (uint32_t)(val);\
832 else\
833 ESP = (val);\
834} while (0)
835#else
836#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
837#endif
838
839/* in 64-bit machines, this can overflow. So this segment addition macro
840 * can be used to trim the value to 32-bit whenever needed */
841#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
842
843/* XXX: add a is_user flag to have proper security support */
844#define PUSHW(ssp, sp, sp_mask, val)\
845{\
846 sp -= 2;\
847 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
848}
849
850#define PUSHL(ssp, sp, sp_mask, val)\
851{\
852 sp -= 4;\
853 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
854}
855
856#define POPW(ssp, sp, sp_mask, val)\
857{\
858 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
859 sp += 2;\
860}
861
862#define POPL(ssp, sp, sp_mask, val)\
863{\
864 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
865 sp += 4;\
866}
867
868/* protected mode interrupt */
869static void do_interrupt_protected(int intno, int is_int, int error_code,
870 unsigned int next_eip, int is_hw)
871{
872 SegmentCache *dt;
873 target_ulong ptr, ssp;
874 int type, dpl, selector, ss_dpl, cpl;
875 int has_error_code, new_stack, shift;
876 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
877 uint32_t old_eip, sp_mask;
878
879#ifdef VBOX
880 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
881 cpu_loop_exit();
882#endif
883
884 has_error_code = 0;
885 if (!is_int && !is_hw)
886 has_error_code = exeption_has_error_code(intno);
887 if (is_int)
888 old_eip = next_eip;
889 else
890 old_eip = env->eip;
891
892 dt = &env->idt;
893#ifndef VBOX
894 if (intno * 8 + 7 > dt->limit)
895#else
896 if ((unsigned)intno * 8 + 7 > dt->limit)
897#endif
898 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
899 ptr = dt->base + intno * 8;
900 e1 = ldl_kernel(ptr);
901 e2 = ldl_kernel(ptr + 4);
902 /* check gate type */
903 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
904 switch(type) {
905 case 5: /* task gate */
906#ifdef VBOX
907 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
908 cpl = env->hflags & HF_CPL_MASK;
909 /* check privilege if software int */
910 if (is_int && dpl < cpl)
911 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
912#endif
913 /* must do that check here to return the correct error code */
914 if (!(e2 & DESC_P_MASK))
915 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
916 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
917 if (has_error_code) {
918 int type;
919 uint32_t mask;
920 /* push the error code */
921 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
922 shift = type >> 3;
923 if (env->segs[R_SS].flags & DESC_B_MASK)
924 mask = 0xffffffff;
925 else
926 mask = 0xffff;
927 esp = (ESP - (2 << shift)) & mask;
928 ssp = env->segs[R_SS].base + esp;
929 if (shift)
930 stl_kernel(ssp, error_code);
931 else
932 stw_kernel(ssp, error_code);
933 SET_ESP(esp, mask);
934 }
935 return;
936 case 6: /* 286 interrupt gate */
937 case 7: /* 286 trap gate */
938 case 14: /* 386 interrupt gate */
939 case 15: /* 386 trap gate */
940 break;
941 default:
942 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
943 break;
944 }
945 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
946 cpl = env->hflags & HF_CPL_MASK;
947 /* check privilege if software int */
948 if (is_int && dpl < cpl)
949 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
950 /* check valid bit */
951 if (!(e2 & DESC_P_MASK))
952 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
953 selector = e1 >> 16;
954 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
955 if ((selector & 0xfffc) == 0)
956 raise_exception_err(EXCP0D_GPF, 0);
957
958 if (load_segment(&e1, &e2, selector) != 0)
959 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
960 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
961 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
962 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
963 if (dpl > cpl)
964 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
965 if (!(e2 & DESC_P_MASK))
966 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
967 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
968 /* to inner privilege */
969 get_ss_esp_from_tss(&ss, &esp, dpl);
970 if ((ss & 0xfffc) == 0)
971 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
972 if ((ss & 3) != dpl)
973 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
974 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
975 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
976 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
977 if (ss_dpl != dpl)
978 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
979 if (!(ss_e2 & DESC_S_MASK) ||
980 (ss_e2 & DESC_CS_MASK) ||
981 !(ss_e2 & DESC_W_MASK))
982 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
983 if (!(ss_e2 & DESC_P_MASK))
984#ifdef VBOX /* See page 3-477 of 253666.pdf */
985 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
986#else
987 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
988#endif
989 new_stack = 1;
990 sp_mask = get_sp_mask(ss_e2);
991 ssp = get_seg_base(ss_e1, ss_e2);
992#if defined(VBOX) && defined(DEBUG)
993 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
994#endif
995 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
996 /* to same privilege */
997 if (env->eflags & VM_MASK)
998 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
999 new_stack = 0;
1000 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1001 ssp = env->segs[R_SS].base;
1002 esp = ESP;
1003 dpl = cpl;
1004 } else {
1005 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1006 new_stack = 0; /* avoid warning */
1007 sp_mask = 0; /* avoid warning */
1008 ssp = 0; /* avoid warning */
1009 esp = 0; /* avoid warning */
1010 }
1011
1012 shift = type >> 3;
1013
1014#if 0
1015 /* XXX: check that enough room is available */
1016 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1017 if (env->eflags & VM_MASK)
1018 push_size += 8;
1019 push_size <<= shift;
1020#endif
1021 if (shift == 1) {
1022 if (new_stack) {
1023 if (env->eflags & VM_MASK) {
1024 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1025 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1026 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1027 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1028 }
1029 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1030 PUSHL(ssp, esp, sp_mask, ESP);
1031 }
1032 PUSHL(ssp, esp, sp_mask, compute_eflags());
1033 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1034 PUSHL(ssp, esp, sp_mask, old_eip);
1035 if (has_error_code) {
1036 PUSHL(ssp, esp, sp_mask, error_code);
1037 }
1038 } else {
1039 if (new_stack) {
1040 if (env->eflags & VM_MASK) {
1041 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1042 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1043 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1044 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1045 }
1046 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1047 PUSHW(ssp, esp, sp_mask, ESP);
1048 }
1049 PUSHW(ssp, esp, sp_mask, compute_eflags());
1050 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1051 PUSHW(ssp, esp, sp_mask, old_eip);
1052 if (has_error_code) {
1053 PUSHW(ssp, esp, sp_mask, error_code);
1054 }
1055 }
1056
1057 if (new_stack) {
1058 if (env->eflags & VM_MASK) {
1059 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1060 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1061 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1062 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1063 }
1064 ss = (ss & ~3) | dpl;
1065 cpu_x86_load_seg_cache(env, R_SS, ss,
1066 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1067 }
1068 SET_ESP(esp, sp_mask);
1069
1070 selector = (selector & ~3) | dpl;
1071 cpu_x86_load_seg_cache(env, R_CS, selector,
1072 get_seg_base(e1, e2),
1073 get_seg_limit(e1, e2),
1074 e2);
1075 cpu_x86_set_cpl(env, dpl);
1076 env->eip = offset;
1077
1078 /* interrupt gate clear IF mask */
1079 if ((type & 1) == 0) {
1080 env->eflags &= ~IF_MASK;
1081 }
1082#ifndef VBOX
1083 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1084#else
1085 /*
1086 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1087 * gets confused by seemingly changed EFLAGS. See #3491 and
1088 * public bug #2341.
1089 */
1090 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1091#endif
1092}
1093
1094#ifdef VBOX
1095
1096/* check if VME interrupt redirection is enabled in TSS */
1097DECLINLINE(bool) is_vme_irq_redirected(int intno)
1098{
1099 unsigned int io_offset, intredir_offset;
1100 unsigned char val, mask;
1101
1102 /* TSS must be a valid 32 bit one */
1103 if (!(env->tr.flags & DESC_P_MASK) ||
1104 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1105 env->tr.limit < 103)
1106 goto fail;
1107 io_offset = lduw_kernel(env->tr.base + 0x66);
1108 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1109 if (io_offset < 0x68 + 0x20)
1110 io_offset = 0x68 + 0x20;
1111 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1112 intredir_offset = io_offset - 0x20;
1113
1114 intredir_offset += (intno >> 3);
1115 if ((intredir_offset) > env->tr.limit)
1116 goto fail;
1117
1118 val = ldub_kernel(env->tr.base + intredir_offset);
1119 mask = 1 << (unsigned char)(intno & 7);
1120
1121 /* bit set means no redirection. */
1122 if ((val & mask) != 0) {
1123 return false;
1124 }
1125 return true;
1126
1127fail:
1128 raise_exception_err(EXCP0D_GPF, 0);
1129 return true;
1130}
1131
1132/* V86 mode software interrupt with CR4.VME=1 */
1133static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1134{
1135 target_ulong ptr, ssp;
1136 int selector;
1137 uint32_t offset, esp;
1138 uint32_t old_cs, old_eflags;
1139 uint32_t iopl;
1140
1141 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1142
1143 if (!is_vme_irq_redirected(intno))
1144 {
1145 if (iopl == 3)
1146 {
1147 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1148 return;
1149 }
1150 else
1151 raise_exception_err(EXCP0D_GPF, 0);
1152 }
1153
1154 /* virtual mode idt is at linear address 0 */
1155 ptr = 0 + intno * 4;
1156 offset = lduw_kernel(ptr);
1157 selector = lduw_kernel(ptr + 2);
1158 esp = ESP;
1159 ssp = env->segs[R_SS].base;
1160 old_cs = env->segs[R_CS].selector;
1161
1162 old_eflags = compute_eflags();
1163 if (iopl < 3)
1164 {
1165 /* copy VIF into IF and set IOPL to 3 */
1166 if (env->eflags & VIF_MASK)
1167 old_eflags |= IF_MASK;
1168 else
1169 old_eflags &= ~IF_MASK;
1170
1171 old_eflags |= (3 << IOPL_SHIFT);
1172 }
1173
1174 /* XXX: use SS segment size ? */
1175 PUSHW(ssp, esp, 0xffff, old_eflags);
1176 PUSHW(ssp, esp, 0xffff, old_cs);
1177 PUSHW(ssp, esp, 0xffff, next_eip);
1178
1179 /* update processor state */
1180 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1181 env->eip = offset;
1182 env->segs[R_CS].selector = selector;
1183 env->segs[R_CS].base = (selector << 4);
1184 env->eflags &= ~(TF_MASK | RF_MASK);
1185
1186 if (iopl < 3)
1187 env->eflags &= ~VIF_MASK;
1188 else
1189 env->eflags &= ~IF_MASK;
1190}
1191
1192#endif /* VBOX */
1193
1194#ifdef TARGET_X86_64
1195
1196#define PUSHQ(sp, val)\
1197{\
1198 sp -= 8;\
1199 stq_kernel(sp, (val));\
1200}
1201
1202#define POPQ(sp, val)\
1203{\
1204 val = ldq_kernel(sp);\
1205 sp += 8;\
1206}
1207
1208static inline target_ulong get_rsp_from_tss(int level)
1209{
1210 int index;
1211
1212#if 0
1213 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1214 env->tr.base, env->tr.limit);
1215#endif
1216
1217 if (!(env->tr.flags & DESC_P_MASK))
1218 cpu_abort(env, "invalid tss");
1219 index = 8 * level + 4;
1220 if ((index + 7) > env->tr.limit)
1221 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1222 return ldq_kernel(env->tr.base + index);
1223}
1224
1225/* 64 bit interrupt */
1226static void do_interrupt64(int intno, int is_int, int error_code,
1227 target_ulong next_eip, int is_hw)
1228{
1229 SegmentCache *dt;
1230 target_ulong ptr;
1231 int type, dpl, selector, cpl, ist;
1232 int has_error_code, new_stack;
1233 uint32_t e1, e2, e3, ss;
1234 target_ulong old_eip, esp, offset;
1235
1236#ifdef VBOX
1237 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1238 cpu_loop_exit();
1239#endif
1240
1241 has_error_code = 0;
1242 if (!is_int && !is_hw)
1243 has_error_code = exeption_has_error_code(intno);
1244 if (is_int)
1245 old_eip = next_eip;
1246 else
1247 old_eip = env->eip;
1248
1249 dt = &env->idt;
1250 if (intno * 16 + 15 > dt->limit)
1251 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1252 ptr = dt->base + intno * 16;
1253 e1 = ldl_kernel(ptr);
1254 e2 = ldl_kernel(ptr + 4);
1255 e3 = ldl_kernel(ptr + 8);
1256 /* check gate type */
1257 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1258 switch(type) {
1259 case 14: /* 386 interrupt gate */
1260 case 15: /* 386 trap gate */
1261 break;
1262 default:
1263 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1264 break;
1265 }
1266 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1267 cpl = env->hflags & HF_CPL_MASK;
1268 /* check privilege if software int */
1269 if (is_int && dpl < cpl)
1270 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1271 /* check valid bit */
1272 if (!(e2 & DESC_P_MASK))
1273 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1274 selector = e1 >> 16;
1275 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1276 ist = e2 & 7;
1277 if ((selector & 0xfffc) == 0)
1278 raise_exception_err(EXCP0D_GPF, 0);
1279
1280 if (load_segment(&e1, &e2, selector) != 0)
1281 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1282 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1283 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1284 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1285 if (dpl > cpl)
1286 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1287 if (!(e2 & DESC_P_MASK))
1288 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1289 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1290 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1291 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1292 /* to inner privilege */
1293 if (ist != 0)
1294 esp = get_rsp_from_tss(ist + 3);
1295 else
1296 esp = get_rsp_from_tss(dpl);
1297 esp &= ~0xfLL; /* align stack */
1298 ss = 0;
1299 new_stack = 1;
1300 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1301 /* to same privilege */
1302 if (env->eflags & VM_MASK)
1303 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1304 new_stack = 0;
1305 if (ist != 0)
1306 esp = get_rsp_from_tss(ist + 3);
1307 else
1308 esp = ESP;
1309 esp &= ~0xfLL; /* align stack */
1310 dpl = cpl;
1311 } else {
1312 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1313 new_stack = 0; /* avoid warning */
1314 esp = 0; /* avoid warning */
1315 }
1316
1317 PUSHQ(esp, env->segs[R_SS].selector);
1318 PUSHQ(esp, ESP);
1319 PUSHQ(esp, compute_eflags());
1320 PUSHQ(esp, env->segs[R_CS].selector);
1321 PUSHQ(esp, old_eip);
1322 if (has_error_code) {
1323 PUSHQ(esp, error_code);
1324 }
1325
1326 if (new_stack) {
1327 ss = 0 | dpl;
1328 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1329 }
1330 ESP = esp;
1331
1332 selector = (selector & ~3) | dpl;
1333 cpu_x86_load_seg_cache(env, R_CS, selector,
1334 get_seg_base(e1, e2),
1335 get_seg_limit(e1, e2),
1336 e2);
1337 cpu_x86_set_cpl(env, dpl);
1338 env->eip = offset;
1339
1340 /* interrupt gate clear IF mask */
1341 if ((type & 1) == 0) {
1342 env->eflags &= ~IF_MASK;
1343 }
1344#ifndef VBOX
1345 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1346#else /* VBOX */
1347 /*
1348 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1349 * gets confused by seemingly changed EFLAGS. See #3491 and
1350 * public bug #2341.
1351 */
1352 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1353#endif /* VBOX */
1354}
1355#endif
1356
1357#ifdef TARGET_X86_64
1358#if defined(CONFIG_USER_ONLY)
1359void helper_syscall(int next_eip_addend)
1360{
1361 env->exception_index = EXCP_SYSCALL;
1362 env->exception_next_eip = env->eip + next_eip_addend;
1363 cpu_loop_exit();
1364}
1365#else
1366void helper_syscall(int next_eip_addend)
1367{
1368 int selector;
1369
1370 if (!(env->efer & MSR_EFER_SCE)) {
1371 raise_exception_err(EXCP06_ILLOP, 0);
1372 }
1373 selector = (env->star >> 32) & 0xffff;
1374 if (env->hflags & HF_LMA_MASK) {
1375 int code64;
1376
1377 ECX = env->eip + next_eip_addend;
1378 env->regs[11] = compute_eflags();
1379
1380 code64 = env->hflags & HF_CS64_MASK;
1381
1382 cpu_x86_set_cpl(env, 0);
1383 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1384 0, 0xffffffff,
1385 DESC_G_MASK | DESC_P_MASK |
1386 DESC_S_MASK |
1387 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1388 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1389 0, 0xffffffff,
1390 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1391 DESC_S_MASK |
1392 DESC_W_MASK | DESC_A_MASK);
1393 env->eflags &= ~env->fmask;
1394 load_eflags(env->eflags, 0);
1395 if (code64)
1396 env->eip = env->lstar;
1397 else
1398 env->eip = env->cstar;
1399 } else {
1400 ECX = (uint32_t)(env->eip + next_eip_addend);
1401
1402 cpu_x86_set_cpl(env, 0);
1403 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1404 0, 0xffffffff,
1405 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1406 DESC_S_MASK |
1407 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1408 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1409 0, 0xffffffff,
1410 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1411 DESC_S_MASK |
1412 DESC_W_MASK | DESC_A_MASK);
1413 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1414 env->eip = (uint32_t)env->star;
1415 }
1416}
1417#endif
1418#endif
1419
1420#ifdef TARGET_X86_64
1421void helper_sysret(int dflag)
1422{
1423 int cpl, selector;
1424
1425 if (!(env->efer & MSR_EFER_SCE)) {
1426 raise_exception_err(EXCP06_ILLOP, 0);
1427 }
1428 cpl = env->hflags & HF_CPL_MASK;
1429 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1430 raise_exception_err(EXCP0D_GPF, 0);
1431 }
1432 selector = (env->star >> 48) & 0xffff;
1433 if (env->hflags & HF_LMA_MASK) {
1434 if (dflag == 2) {
1435 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1436 0, 0xffffffff,
1437 DESC_G_MASK | DESC_P_MASK |
1438 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1439 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1440 DESC_L_MASK);
1441 env->eip = ECX;
1442 } else {
1443 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1444 0, 0xffffffff,
1445 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1446 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1447 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1448 env->eip = (uint32_t)ECX;
1449 }
1450 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1451 0, 0xffffffff,
1452 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1453 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1454 DESC_W_MASK | DESC_A_MASK);
1455 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1456 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1457 cpu_x86_set_cpl(env, 3);
1458 } else {
1459 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1460 0, 0xffffffff,
1461 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1462 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1463 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1464 env->eip = (uint32_t)ECX;
1465 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1466 0, 0xffffffff,
1467 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1468 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1469 DESC_W_MASK | DESC_A_MASK);
1470 env->eflags |= IF_MASK;
1471 cpu_x86_set_cpl(env, 3);
1472 }
1473}
1474#endif
1475
1476#ifdef VBOX
1477
1478/**
1479 * Checks and processes external VMM events.
1480 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1481 */
1482void helper_external_event(void)
1483{
1484# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1485 uintptr_t uSP;
1486# ifdef RT_ARCH_AMD64
1487 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1488# else
1489 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1490# endif
1491 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1492# endif
1493 /* Keep in sync with flags checked by gen_check_external_event() */
1494 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1495 {
1496 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1497 ~CPU_INTERRUPT_EXTERNAL_HARD);
1498 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1499 }
1500 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1501 {
1502 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1503 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1504 cpu_exit(env);
1505 }
1506 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1507 {
1508 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1509 ~CPU_INTERRUPT_EXTERNAL_DMA);
1510 remR3DmaRun(env);
1511 }
1512 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1513 {
1514 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1515 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1516 remR3TimersRun(env);
1517 }
1518 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1519 {
1520 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1521 ~CPU_INTERRUPT_EXTERNAL_HARD);
1522 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1523 }
1524}
1525
1526/* helper for recording call instruction addresses for later scanning */
1527void helper_record_call()
1528{
1529 if ( !(env->state & CPU_RAW_RING0)
1530 && (env->cr[0] & CR0_PG_MASK)
1531 && !(env->eflags & X86_EFL_IF))
1532 remR3RecordCall(env);
1533}
1534
1535#endif /* VBOX */
1536
1537/* real mode interrupt */
1538static void do_interrupt_real(int intno, int is_int, int error_code,
1539 unsigned int next_eip)
1540{
1541 SegmentCache *dt;
1542 target_ulong ptr, ssp;
1543 int selector;
1544 uint32_t offset, esp;
1545 uint32_t old_cs, old_eip;
1546
1547 /* real mode (simpler !) */
1548 dt = &env->idt;
1549#ifndef VBOX
1550 if (intno * 4 + 3 > dt->limit)
1551#else
1552 if ((unsigned)intno * 4 + 3 > dt->limit)
1553#endif
1554 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1555 ptr = dt->base + intno * 4;
1556 offset = lduw_kernel(ptr);
1557 selector = lduw_kernel(ptr + 2);
1558 esp = ESP;
1559 ssp = env->segs[R_SS].base;
1560 if (is_int)
1561 old_eip = next_eip;
1562 else
1563 old_eip = env->eip;
1564 old_cs = env->segs[R_CS].selector;
1565 /* XXX: use SS segment size ? */
1566 PUSHW(ssp, esp, 0xffff, compute_eflags());
1567 PUSHW(ssp, esp, 0xffff, old_cs);
1568 PUSHW(ssp, esp, 0xffff, old_eip);
1569
1570 /* update processor state */
1571 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1572 env->eip = offset;
1573 env->segs[R_CS].selector = selector;
1574 env->segs[R_CS].base = (selector << 4);
1575 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1576}
1577
1578/* fake user mode interrupt */
1579void do_interrupt_user(int intno, int is_int, int error_code,
1580 target_ulong next_eip)
1581{
1582 SegmentCache *dt;
1583 target_ulong ptr;
1584 int dpl, cpl, shift;
1585 uint32_t e2;
1586
1587 dt = &env->idt;
1588 if (env->hflags & HF_LMA_MASK) {
1589 shift = 4;
1590 } else {
1591 shift = 3;
1592 }
1593 ptr = dt->base + (intno << shift);
1594 e2 = ldl_kernel(ptr + 4);
1595
1596 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1597 cpl = env->hflags & HF_CPL_MASK;
1598 /* check privilege if software int */
1599 if (is_int && dpl < cpl)
1600 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1601
1602 /* Since we emulate only user space, we cannot do more than
1603 exiting the emulation with the suitable exception and error
1604 code */
1605 if (is_int)
1606 EIP = next_eip;
1607}
1608
1609#if !defined(CONFIG_USER_ONLY)
1610static void handle_even_inj(int intno, int is_int, int error_code,
1611 int is_hw, int rm)
1612{
1613 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1614 if (!(event_inj & SVM_EVTINJ_VALID)) {
1615 int type;
1616 if (is_int)
1617 type = SVM_EVTINJ_TYPE_SOFT;
1618 else
1619 type = SVM_EVTINJ_TYPE_EXEPT;
1620 event_inj = intno | type | SVM_EVTINJ_VALID;
1621 if (!rm && exeption_has_error_code(intno)) {
1622 event_inj |= SVM_EVTINJ_VALID_ERR;
1623 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1624 }
1625 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1626 }
1627}
1628#endif
1629
1630/*
1631 * Begin execution of an interruption. is_int is TRUE if coming from
1632 * the int instruction. next_eip is the EIP value AFTER the interrupt
1633 * instruction. It is only relevant if is_int is TRUE.
1634 */
1635void do_interrupt(int intno, int is_int, int error_code,
1636 target_ulong next_eip, int is_hw)
1637{
1638 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1639 if ((env->cr[0] & CR0_PE_MASK)) {
1640 static int count;
1641 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1642 count, intno, error_code, is_int,
1643 env->hflags & HF_CPL_MASK,
1644 env->segs[R_CS].selector, EIP,
1645 (int)env->segs[R_CS].base + EIP,
1646 env->segs[R_SS].selector, ESP);
1647 if (intno == 0x0e) {
1648 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1649 } else {
1650 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1651 }
1652 qemu_log("\n");
1653 log_cpu_state(env, X86_DUMP_CCOP);
1654#if 0
1655 {
1656 int i;
1657 uint8_t *ptr;
1658 qemu_log(" code=");
1659 ptr = env->segs[R_CS].base + env->eip;
1660 for(i = 0; i < 16; i++) {
1661 qemu_log(" %02x", ldub(ptr + i));
1662 }
1663 qemu_log("\n");
1664 }
1665#endif
1666 count++;
1667 }
1668 }
1669#ifdef VBOX
1670 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1671 if (is_int) {
1672 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1673 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1674 } else {
1675 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1676 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1677 }
1678 }
1679#endif
1680 if (env->cr[0] & CR0_PE_MASK) {
1681#if !defined(CONFIG_USER_ONLY)
1682 if (env->hflags & HF_SVMI_MASK)
1683 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1684#endif
1685#ifdef TARGET_X86_64
1686 if (env->hflags & HF_LMA_MASK) {
1687 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1688 } else
1689#endif
1690 {
1691#ifdef VBOX
1692 /* int xx *, v86 code and VME enabled? */
1693 if ( (env->eflags & VM_MASK)
1694 && (env->cr[4] & CR4_VME_MASK)
1695 && is_int
1696 && !is_hw
1697 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1698 )
1699 do_soft_interrupt_vme(intno, error_code, next_eip);
1700 else
1701#endif /* VBOX */
1702 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1703 }
1704 } else {
1705#if !defined(CONFIG_USER_ONLY)
1706 if (env->hflags & HF_SVMI_MASK)
1707 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1708#endif
1709 do_interrupt_real(intno, is_int, error_code, next_eip);
1710 }
1711
1712#if !defined(CONFIG_USER_ONLY)
1713 if (env->hflags & HF_SVMI_MASK) {
1714 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1715 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1716 }
1717#endif
1718}
1719
1720/* This should come from sysemu.h - if we could include it here... */
1721void qemu_system_reset_request(void);
1722
1723/*
1724 * Check nested exceptions and change to double or triple fault if
1725 * needed. It should only be called, if this is not an interrupt.
1726 * Returns the new exception number.
1727 */
1728static int check_exception(int intno, int *error_code)
1729{
1730 int first_contributory = env->old_exception == 0 ||
1731 (env->old_exception >= 10 &&
1732 env->old_exception <= 13);
1733 int second_contributory = intno == 0 ||
1734 (intno >= 10 && intno <= 13);
1735
1736 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1737 env->old_exception, intno);
1738
1739#if !defined(CONFIG_USER_ONLY)
1740 if (env->old_exception == EXCP08_DBLE) {
1741 if (env->hflags & HF_SVMI_MASK)
1742 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1743
1744 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1745
1746# ifndef VBOX
1747 qemu_system_reset_request();
1748# else
1749 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1750# endif
1751 return EXCP_HLT;
1752 }
1753#endif
1754
1755 if ((first_contributory && second_contributory)
1756 || (env->old_exception == EXCP0E_PAGE &&
1757 (second_contributory || (intno == EXCP0E_PAGE)))) {
1758 intno = EXCP08_DBLE;
1759 *error_code = 0;
1760 }
1761
1762 if (second_contributory || (intno == EXCP0E_PAGE) ||
1763 (intno == EXCP08_DBLE))
1764 env->old_exception = intno;
1765
1766 return intno;
1767}
1768
1769/*
1770 * Signal an interruption. It is executed in the main CPU loop.
1771 * is_int is TRUE if coming from the int instruction. next_eip is the
1772 * EIP value AFTER the interrupt instruction. It is only relevant if
1773 * is_int is TRUE.
1774 */
1775static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1776 int next_eip_addend)
1777{
1778#if defined(VBOX) && defined(DEBUG)
1779 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1780#endif
1781 if (!is_int) {
1782 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1783 intno = check_exception(intno, &error_code);
1784 } else {
1785 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1786 }
1787
1788 env->exception_index = intno;
1789 env->error_code = error_code;
1790 env->exception_is_int = is_int;
1791 env->exception_next_eip = env->eip + next_eip_addend;
1792 cpu_loop_exit();
1793}
1794
1795/* shortcuts to generate exceptions */
1796
1797void raise_exception_err(int exception_index, int error_code)
1798{
1799 raise_interrupt(exception_index, 0, error_code, 0);
1800}
1801
1802void raise_exception(int exception_index)
1803{
1804 raise_interrupt(exception_index, 0, 0, 0);
1805}
1806
1807void raise_exception_env(int exception_index, CPUState *nenv)
1808{
1809 env = nenv;
1810 raise_exception(exception_index);
1811}
1812/* SMM support */
1813
1814#if defined(CONFIG_USER_ONLY)
1815
1816void do_smm_enter(void)
1817{
1818}
1819
1820void helper_rsm(void)
1821{
1822}
1823
1824#else
1825
1826#ifdef TARGET_X86_64
1827#define SMM_REVISION_ID 0x00020064
1828#else
1829#define SMM_REVISION_ID 0x00020000
1830#endif
1831
1832void do_smm_enter(void)
1833{
1834 target_ulong sm_state;
1835 SegmentCache *dt;
1836 int i, offset;
1837
1838 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1839 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1840
1841 env->hflags |= HF_SMM_MASK;
1842 cpu_smm_update(env);
1843
1844 sm_state = env->smbase + 0x8000;
1845
1846#ifdef TARGET_X86_64
1847 for(i = 0; i < 6; i++) {
1848 dt = &env->segs[i];
1849 offset = 0x7e00 + i * 16;
1850 stw_phys(sm_state + offset, dt->selector);
1851 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1852 stl_phys(sm_state + offset + 4, dt->limit);
1853 stq_phys(sm_state + offset + 8, dt->base);
1854 }
1855
1856 stq_phys(sm_state + 0x7e68, env->gdt.base);
1857 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1858
1859 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1860 stq_phys(sm_state + 0x7e78, env->ldt.base);
1861 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1862 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1863
1864 stq_phys(sm_state + 0x7e88, env->idt.base);
1865 stl_phys(sm_state + 0x7e84, env->idt.limit);
1866
1867 stw_phys(sm_state + 0x7e90, env->tr.selector);
1868 stq_phys(sm_state + 0x7e98, env->tr.base);
1869 stl_phys(sm_state + 0x7e94, env->tr.limit);
1870 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1871
1872 stq_phys(sm_state + 0x7ed0, env->efer);
1873
1874 stq_phys(sm_state + 0x7ff8, EAX);
1875 stq_phys(sm_state + 0x7ff0, ECX);
1876 stq_phys(sm_state + 0x7fe8, EDX);
1877 stq_phys(sm_state + 0x7fe0, EBX);
1878 stq_phys(sm_state + 0x7fd8, ESP);
1879 stq_phys(sm_state + 0x7fd0, EBP);
1880 stq_phys(sm_state + 0x7fc8, ESI);
1881 stq_phys(sm_state + 0x7fc0, EDI);
1882 for(i = 8; i < 16; i++)
1883 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1884 stq_phys(sm_state + 0x7f78, env->eip);
1885 stl_phys(sm_state + 0x7f70, compute_eflags());
1886 stl_phys(sm_state + 0x7f68, env->dr[6]);
1887 stl_phys(sm_state + 0x7f60, env->dr[7]);
1888
1889 stl_phys(sm_state + 0x7f48, env->cr[4]);
1890 stl_phys(sm_state + 0x7f50, env->cr[3]);
1891 stl_phys(sm_state + 0x7f58, env->cr[0]);
1892
1893 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1894 stl_phys(sm_state + 0x7f00, env->smbase);
1895#else
1896 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1897 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1898 stl_phys(sm_state + 0x7ff4, compute_eflags());
1899 stl_phys(sm_state + 0x7ff0, env->eip);
1900 stl_phys(sm_state + 0x7fec, EDI);
1901 stl_phys(sm_state + 0x7fe8, ESI);
1902 stl_phys(sm_state + 0x7fe4, EBP);
1903 stl_phys(sm_state + 0x7fe0, ESP);
1904 stl_phys(sm_state + 0x7fdc, EBX);
1905 stl_phys(sm_state + 0x7fd8, EDX);
1906 stl_phys(sm_state + 0x7fd4, ECX);
1907 stl_phys(sm_state + 0x7fd0, EAX);
1908 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1909 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1910
1911 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1912 stl_phys(sm_state + 0x7f64, env->tr.base);
1913 stl_phys(sm_state + 0x7f60, env->tr.limit);
1914 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1915
1916 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1917 stl_phys(sm_state + 0x7f80, env->ldt.base);
1918 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1919 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1920
1921 stl_phys(sm_state + 0x7f74, env->gdt.base);
1922 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1923
1924 stl_phys(sm_state + 0x7f58, env->idt.base);
1925 stl_phys(sm_state + 0x7f54, env->idt.limit);
1926
1927 for(i = 0; i < 6; i++) {
1928 dt = &env->segs[i];
1929 if (i < 3)
1930 offset = 0x7f84 + i * 12;
1931 else
1932 offset = 0x7f2c + (i - 3) * 12;
1933 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1934 stl_phys(sm_state + offset + 8, dt->base);
1935 stl_phys(sm_state + offset + 4, dt->limit);
1936 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1937 }
1938 stl_phys(sm_state + 0x7f14, env->cr[4]);
1939
1940 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1941 stl_phys(sm_state + 0x7ef8, env->smbase);
1942#endif
1943 /* init SMM cpu state */
1944
1945#ifdef TARGET_X86_64
1946 cpu_load_efer(env, 0);
1947#endif
1948 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1949 env->eip = 0x00008000;
1950 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1951 0xffffffff, 0);
1952 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1953 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1954 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1955 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1956 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1957
1958 cpu_x86_update_cr0(env,
1959 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1960 cpu_x86_update_cr4(env, 0);
1961 env->dr[7] = 0x00000400;
1962 CC_OP = CC_OP_EFLAGS;
1963}
1964
1965void helper_rsm(void)
1966{
1967#ifdef VBOX
1968 cpu_abort(env, "helper_rsm");
1969#else /* !VBOX */
1970 target_ulong sm_state;
1971 int i, offset;
1972 uint32_t val;
1973
1974 sm_state = env->smbase + 0x8000;
1975#ifdef TARGET_X86_64
1976 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1977
1978 for(i = 0; i < 6; i++) {
1979 offset = 0x7e00 + i * 16;
1980 cpu_x86_load_seg_cache(env, i,
1981 lduw_phys(sm_state + offset),
1982 ldq_phys(sm_state + offset + 8),
1983 ldl_phys(sm_state + offset + 4),
1984 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1985 }
1986
1987 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1988 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1989
1990 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1991 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1992 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1993 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1994#ifdef VBOX
1995 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
1996 env->ldt.newselector = 0;
1997#endif
1998
1999 env->idt.base = ldq_phys(sm_state + 0x7e88);
2000 env->idt.limit = ldl_phys(sm_state + 0x7e84);
2001
2002 env->tr.selector = lduw_phys(sm_state + 0x7e90);
2003 env->tr.base = ldq_phys(sm_state + 0x7e98);
2004 env->tr.limit = ldl_phys(sm_state + 0x7e94);
2005 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
2006#ifdef VBOX
2007 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2008 env->tr.newselector = 0;
2009#endif
2010
2011 EAX = ldq_phys(sm_state + 0x7ff8);
2012 ECX = ldq_phys(sm_state + 0x7ff0);
2013 EDX = ldq_phys(sm_state + 0x7fe8);
2014 EBX = ldq_phys(sm_state + 0x7fe0);
2015 ESP = ldq_phys(sm_state + 0x7fd8);
2016 EBP = ldq_phys(sm_state + 0x7fd0);
2017 ESI = ldq_phys(sm_state + 0x7fc8);
2018 EDI = ldq_phys(sm_state + 0x7fc0);
2019 for(i = 8; i < 16; i++)
2020 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2021 env->eip = ldq_phys(sm_state + 0x7f78);
2022 load_eflags(ldl_phys(sm_state + 0x7f70),
2023 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2024 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2025 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2026
2027 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2028 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2029 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2030
2031 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2032 if (val & 0x20000) {
2033 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2034 }
2035#else
2036 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2037 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2038 load_eflags(ldl_phys(sm_state + 0x7ff4),
2039 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2040 env->eip = ldl_phys(sm_state + 0x7ff0);
2041 EDI = ldl_phys(sm_state + 0x7fec);
2042 ESI = ldl_phys(sm_state + 0x7fe8);
2043 EBP = ldl_phys(sm_state + 0x7fe4);
2044 ESP = ldl_phys(sm_state + 0x7fe0);
2045 EBX = ldl_phys(sm_state + 0x7fdc);
2046 EDX = ldl_phys(sm_state + 0x7fd8);
2047 ECX = ldl_phys(sm_state + 0x7fd4);
2048 EAX = ldl_phys(sm_state + 0x7fd0);
2049 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2050 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2051
2052 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2053 env->tr.base = ldl_phys(sm_state + 0x7f64);
2054 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2055 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2056#ifdef VBOX
2057 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2058 env->tr.newselector = 0;
2059#endif
2060
2061 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2062 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2063 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2064 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2065#ifdef VBOX
2066 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2067 env->ldt.newselector = 0;
2068#endif
2069
2070 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2071 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2072
2073 env->idt.base = ldl_phys(sm_state + 0x7f58);
2074 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2075
2076 for(i = 0; i < 6; i++) {
2077 if (i < 3)
2078 offset = 0x7f84 + i * 12;
2079 else
2080 offset = 0x7f2c + (i - 3) * 12;
2081 cpu_x86_load_seg_cache(env, i,
2082 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2083 ldl_phys(sm_state + offset + 8),
2084 ldl_phys(sm_state + offset + 4),
2085 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2086 }
2087 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2088
2089 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2090 if (val & 0x20000) {
2091 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2092 }
2093#endif
2094 CC_OP = CC_OP_EFLAGS;
2095 env->hflags &= ~HF_SMM_MASK;
2096 cpu_smm_update(env);
2097
2098 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2099 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2100#endif /* !VBOX */
2101}
2102
2103#endif /* !CONFIG_USER_ONLY */
2104
2105
2106/* division, flags are undefined */
2107
2108void helper_divb_AL(target_ulong t0)
2109{
2110 unsigned int num, den, q, r;
2111
2112 num = (EAX & 0xffff);
2113 den = (t0 & 0xff);
2114 if (den == 0) {
2115 raise_exception(EXCP00_DIVZ);
2116 }
2117 q = (num / den);
2118 if (q > 0xff)
2119 raise_exception(EXCP00_DIVZ);
2120 q &= 0xff;
2121 r = (num % den) & 0xff;
2122 EAX = (EAX & ~0xffff) | (r << 8) | q;
2123}
2124
2125void helper_idivb_AL(target_ulong t0)
2126{
2127 int num, den, q, r;
2128
2129 num = (int16_t)EAX;
2130 den = (int8_t)t0;
2131 if (den == 0) {
2132 raise_exception(EXCP00_DIVZ);
2133 }
2134 q = (num / den);
2135 if (q != (int8_t)q)
2136 raise_exception(EXCP00_DIVZ);
2137 q &= 0xff;
2138 r = (num % den) & 0xff;
2139 EAX = (EAX & ~0xffff) | (r << 8) | q;
2140}
2141
2142void helper_divw_AX(target_ulong t0)
2143{
2144 unsigned int num, den, q, r;
2145
2146 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2147 den = (t0 & 0xffff);
2148 if (den == 0) {
2149 raise_exception(EXCP00_DIVZ);
2150 }
2151 q = (num / den);
2152 if (q > 0xffff)
2153 raise_exception(EXCP00_DIVZ);
2154 q &= 0xffff;
2155 r = (num % den) & 0xffff;
2156 EAX = (EAX & ~0xffff) | q;
2157 EDX = (EDX & ~0xffff) | r;
2158}
2159
2160void helper_idivw_AX(target_ulong t0)
2161{
2162 int num, den, q, r;
2163
2164 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2165 den = (int16_t)t0;
2166 if (den == 0) {
2167 raise_exception(EXCP00_DIVZ);
2168 }
2169 q = (num / den);
2170 if (q != (int16_t)q)
2171 raise_exception(EXCP00_DIVZ);
2172 q &= 0xffff;
2173 r = (num % den) & 0xffff;
2174 EAX = (EAX & ~0xffff) | q;
2175 EDX = (EDX & ~0xffff) | r;
2176}
2177
2178void helper_divl_EAX(target_ulong t0)
2179{
2180 unsigned int den, r;
2181 uint64_t num, q;
2182
2183 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2184 den = t0;
2185 if (den == 0) {
2186 raise_exception(EXCP00_DIVZ);
2187 }
2188 q = (num / den);
2189 r = (num % den);
2190 if (q > 0xffffffff)
2191 raise_exception(EXCP00_DIVZ);
2192 EAX = (uint32_t)q;
2193 EDX = (uint32_t)r;
2194}
2195
2196void helper_idivl_EAX(target_ulong t0)
2197{
2198 int den, r;
2199 int64_t num, q;
2200
2201 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2202 den = t0;
2203 if (den == 0) {
2204 raise_exception(EXCP00_DIVZ);
2205 }
2206 q = (num / den);
2207 r = (num % den);
2208 if (q != (int32_t)q)
2209 raise_exception(EXCP00_DIVZ);
2210 EAX = (uint32_t)q;
2211 EDX = (uint32_t)r;
2212}
2213
2214/* bcd */
2215
2216/* XXX: exception */
2217void helper_aam(int base)
2218{
2219 int al, ah;
2220 al = EAX & 0xff;
2221 ah = al / base;
2222 al = al % base;
2223 EAX = (EAX & ~0xffff) | al | (ah << 8);
2224 CC_DST = al;
2225}
2226
2227void helper_aad(int base)
2228{
2229 int al, ah;
2230 al = EAX & 0xff;
2231 ah = (EAX >> 8) & 0xff;
2232 al = ((ah * base) + al) & 0xff;
2233 EAX = (EAX & ~0xffff) | al;
2234 CC_DST = al;
2235}
2236
2237void helper_aaa(void)
2238{
2239 int icarry;
2240 int al, ah, af;
2241 int eflags;
2242
2243 eflags = helper_cc_compute_all(CC_OP);
2244 af = eflags & CC_A;
2245 al = EAX & 0xff;
2246 ah = (EAX >> 8) & 0xff;
2247
2248 icarry = (al > 0xf9);
2249 if (((al & 0x0f) > 9 ) || af) {
2250 al = (al + 6) & 0x0f;
2251 ah = (ah + 1 + icarry) & 0xff;
2252 eflags |= CC_C | CC_A;
2253 } else {
2254 eflags &= ~(CC_C | CC_A);
2255 al &= 0x0f;
2256 }
2257 EAX = (EAX & ~0xffff) | al | (ah << 8);
2258 CC_SRC = eflags;
2259}
2260
2261void helper_aas(void)
2262{
2263 int icarry;
2264 int al, ah, af;
2265 int eflags;
2266
2267 eflags = helper_cc_compute_all(CC_OP);
2268 af = eflags & CC_A;
2269 al = EAX & 0xff;
2270 ah = (EAX >> 8) & 0xff;
2271
2272 icarry = (al < 6);
2273 if (((al & 0x0f) > 9 ) || af) {
2274 al = (al - 6) & 0x0f;
2275 ah = (ah - 1 - icarry) & 0xff;
2276 eflags |= CC_C | CC_A;
2277 } else {
2278 eflags &= ~(CC_C | CC_A);
2279 al &= 0x0f;
2280 }
2281 EAX = (EAX & ~0xffff) | al | (ah << 8);
2282 CC_SRC = eflags;
2283}
2284
2285void helper_daa(void)
2286{
2287 int al, af, cf;
2288 int eflags;
2289
2290 eflags = helper_cc_compute_all(CC_OP);
2291 cf = eflags & CC_C;
2292 af = eflags & CC_A;
2293 al = EAX & 0xff;
2294
2295 eflags = 0;
2296 if (((al & 0x0f) > 9 ) || af) {
2297 al = (al + 6) & 0xff;
2298 eflags |= CC_A;
2299 }
2300 if ((al > 0x9f) || cf) {
2301 al = (al + 0x60) & 0xff;
2302 eflags |= CC_C;
2303 }
2304 EAX = (EAX & ~0xff) | al;
2305 /* well, speed is not an issue here, so we compute the flags by hand */
2306 eflags |= (al == 0) << 6; /* zf */
2307 eflags |= parity_table[al]; /* pf */
2308 eflags |= (al & 0x80); /* sf */
2309 CC_SRC = eflags;
2310}
2311
2312void helper_das(void)
2313{
2314 int al, al1, af, cf;
2315 int eflags;
2316
2317 eflags = helper_cc_compute_all(CC_OP);
2318 cf = eflags & CC_C;
2319 af = eflags & CC_A;
2320 al = EAX & 0xff;
2321
2322 eflags = 0;
2323 al1 = al;
2324 if (((al & 0x0f) > 9 ) || af) {
2325 eflags |= CC_A;
2326 if (al < 6 || cf)
2327 eflags |= CC_C;
2328 al = (al - 6) & 0xff;
2329 }
2330 if ((al1 > 0x99) || cf) {
2331 al = (al - 0x60) & 0xff;
2332 eflags |= CC_C;
2333 }
2334 EAX = (EAX & ~0xff) | al;
2335 /* well, speed is not an issue here, so we compute the flags by hand */
2336 eflags |= (al == 0) << 6; /* zf */
2337 eflags |= parity_table[al]; /* pf */
2338 eflags |= (al & 0x80); /* sf */
2339 CC_SRC = eflags;
2340}
2341
2342void helper_into(int next_eip_addend)
2343{
2344 int eflags;
2345 eflags = helper_cc_compute_all(CC_OP);
2346 if (eflags & CC_O) {
2347 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2348 }
2349}
2350
2351void helper_cmpxchg8b(target_ulong a0)
2352{
2353 uint64_t d;
2354 int eflags;
2355
2356 eflags = helper_cc_compute_all(CC_OP);
2357 d = ldq(a0);
2358 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2359 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2360 eflags |= CC_Z;
2361 } else {
2362 /* always do the store */
2363 stq(a0, d);
2364 EDX = (uint32_t)(d >> 32);
2365 EAX = (uint32_t)d;
2366 eflags &= ~CC_Z;
2367 }
2368 CC_SRC = eflags;
2369}
2370
2371#ifdef TARGET_X86_64
2372void helper_cmpxchg16b(target_ulong a0)
2373{
2374 uint64_t d0, d1;
2375 int eflags;
2376
2377 if ((a0 & 0xf) != 0)
2378 raise_exception(EXCP0D_GPF);
2379 eflags = helper_cc_compute_all(CC_OP);
2380 d0 = ldq(a0);
2381 d1 = ldq(a0 + 8);
2382 if (d0 == EAX && d1 == EDX) {
2383 stq(a0, EBX);
2384 stq(a0 + 8, ECX);
2385 eflags |= CC_Z;
2386 } else {
2387 /* always do the store */
2388 stq(a0, d0);
2389 stq(a0 + 8, d1);
2390 EDX = d1;
2391 EAX = d0;
2392 eflags &= ~CC_Z;
2393 }
2394 CC_SRC = eflags;
2395}
2396#endif
2397
2398void helper_single_step(void)
2399{
2400#ifndef CONFIG_USER_ONLY
2401 check_hw_breakpoints(env, 1);
2402 env->dr[6] |= DR6_BS;
2403#endif
2404 raise_exception(EXCP01_DB);
2405}
2406
2407void helper_cpuid(void)
2408{
2409 uint32_t eax, ebx, ecx, edx;
2410
2411 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2412
2413 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2414 EAX = eax;
2415 EBX = ebx;
2416 ECX = ecx;
2417 EDX = edx;
2418}
2419
2420void helper_enter_level(int level, int data32, target_ulong t1)
2421{
2422 target_ulong ssp;
2423 uint32_t esp_mask, esp, ebp;
2424
2425 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2426 ssp = env->segs[R_SS].base;
2427 ebp = EBP;
2428 esp = ESP;
2429 if (data32) {
2430 /* 32 bit */
2431 esp -= 4;
2432 while (--level) {
2433 esp -= 4;
2434 ebp -= 4;
2435 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2436 }
2437 esp -= 4;
2438 stl(ssp + (esp & esp_mask), t1);
2439 } else {
2440 /* 16 bit */
2441 esp -= 2;
2442 while (--level) {
2443 esp -= 2;
2444 ebp -= 2;
2445 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2446 }
2447 esp -= 2;
2448 stw(ssp + (esp & esp_mask), t1);
2449 }
2450}
2451
2452#ifdef TARGET_X86_64
2453void helper_enter64_level(int level, int data64, target_ulong t1)
2454{
2455 target_ulong esp, ebp;
2456 ebp = EBP;
2457 esp = ESP;
2458
2459 if (data64) {
2460 /* 64 bit */
2461 esp -= 8;
2462 while (--level) {
2463 esp -= 8;
2464 ebp -= 8;
2465 stq(esp, ldq(ebp));
2466 }
2467 esp -= 8;
2468 stq(esp, t1);
2469 } else {
2470 /* 16 bit */
2471 esp -= 2;
2472 while (--level) {
2473 esp -= 2;
2474 ebp -= 2;
2475 stw(esp, lduw(ebp));
2476 }
2477 esp -= 2;
2478 stw(esp, t1);
2479 }
2480}
2481#endif
2482
2483void helper_lldt(int selector)
2484{
2485 SegmentCache *dt;
2486 uint32_t e1, e2;
2487#ifndef VBOX
2488 int index, entry_limit;
2489#else
2490 unsigned int index, entry_limit;
2491#endif
2492 target_ulong ptr;
2493
2494#ifdef VBOX
2495 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2496 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2497#endif
2498
2499 selector &= 0xffff;
2500 if ((selector & 0xfffc) == 0) {
2501 /* XXX: NULL selector case: invalid LDT */
2502 env->ldt.base = 0;
2503 env->ldt.limit = 0;
2504#ifdef VBOX
2505 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2506 env->ldt.newselector = 0;
2507#endif
2508 } else {
2509 if (selector & 0x4)
2510 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2511 dt = &env->gdt;
2512 index = selector & ~7;
2513#ifdef TARGET_X86_64
2514 if (env->hflags & HF_LMA_MASK)
2515 entry_limit = 15;
2516 else
2517#endif
2518 entry_limit = 7;
2519 if ((index + entry_limit) > dt->limit)
2520 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2521 ptr = dt->base + index;
2522 e1 = ldl_kernel(ptr);
2523 e2 = ldl_kernel(ptr + 4);
2524 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2525 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2526 if (!(e2 & DESC_P_MASK))
2527 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2528#ifdef TARGET_X86_64
2529 if (env->hflags & HF_LMA_MASK) {
2530 uint32_t e3;
2531 e3 = ldl_kernel(ptr + 8);
2532 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2533 env->ldt.base |= (target_ulong)e3 << 32;
2534 } else
2535#endif
2536 {
2537 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2538 }
2539 }
2540 env->ldt.selector = selector;
2541#ifdef VBOX
2542 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2543 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2544#endif
2545}
2546
2547void helper_ltr(int selector)
2548{
2549 SegmentCache *dt;
2550 uint32_t e1, e2;
2551#ifndef VBOX
2552 int index, type, entry_limit;
2553#else
2554 unsigned int index;
2555 int type, entry_limit;
2556#endif
2557 target_ulong ptr;
2558
2559#ifdef VBOX
2560 Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2561 (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2562 env->tr.flags, (RTSEL)(selector & 0xffff)));
2563#endif
2564 selector &= 0xffff;
2565 if ((selector & 0xfffc) == 0) {
2566 /* NULL selector case: invalid TR */
2567 env->tr.base = 0;
2568 env->tr.limit = 0;
2569 env->tr.flags = 0;
2570#ifdef VBOX
2571 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2572 env->tr.newselector = 0;
2573#endif
2574 } else {
2575 if (selector & 0x4)
2576 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2577 dt = &env->gdt;
2578 index = selector & ~7;
2579#ifdef TARGET_X86_64
2580 if (env->hflags & HF_LMA_MASK)
2581 entry_limit = 15;
2582 else
2583#endif
2584 entry_limit = 7;
2585 if ((index + entry_limit) > dt->limit)
2586 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2587 ptr = dt->base + index;
2588 e1 = ldl_kernel(ptr);
2589 e2 = ldl_kernel(ptr + 4);
2590 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2591 if ((e2 & DESC_S_MASK) ||
2592 (type != 1 && type != 9))
2593 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2594 if (!(e2 & DESC_P_MASK))
2595 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2596#ifdef TARGET_X86_64
2597 if (env->hflags & HF_LMA_MASK) {
2598 uint32_t e3, e4;
2599 e3 = ldl_kernel(ptr + 8);
2600 e4 = ldl_kernel(ptr + 12);
2601 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2602 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2603 load_seg_cache_raw_dt(&env->tr, e1, e2);
2604 env->tr.base |= (target_ulong)e3 << 32;
2605 } else
2606#endif
2607 {
2608 load_seg_cache_raw_dt(&env->tr, e1, e2);
2609 }
2610 e2 |= DESC_TSS_BUSY_MASK;
2611 stl_kernel(ptr + 4, e2);
2612 }
2613 env->tr.selector = selector;
2614#ifdef VBOX
2615 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2616 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2617 env->tr.flags, (RTSEL)(selector & 0xffff)));
2618#endif
2619}
2620
2621/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2622void helper_load_seg(int seg_reg, int selector)
2623{
2624 uint32_t e1, e2;
2625 int cpl, dpl, rpl;
2626 SegmentCache *dt;
2627#ifndef VBOX
2628 int index;
2629#else
2630 unsigned int index;
2631#endif
2632 target_ulong ptr;
2633
2634 selector &= 0xffff;
2635 cpl = env->hflags & HF_CPL_MASK;
2636#ifdef VBOX
2637
2638 /* Trying to load a selector with CPL=1? */
2639 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2640 {
2641 Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc));
2642 selector = selector & 0xfffc;
2643 }
2644#endif /* VBOX */
2645 if ((selector & 0xfffc) == 0) {
2646 /* null selector case */
2647 if (seg_reg == R_SS
2648#ifdef TARGET_X86_64
2649 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2650#endif
2651 )
2652 raise_exception_err(EXCP0D_GPF, 0);
2653 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2654 } else {
2655
2656 if (selector & 0x4)
2657 dt = &env->ldt;
2658 else
2659 dt = &env->gdt;
2660 index = selector & ~7;
2661 if ((index + 7) > dt->limit)
2662 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2663 ptr = dt->base + index;
2664 e1 = ldl_kernel(ptr);
2665 e2 = ldl_kernel(ptr + 4);
2666
2667 if (!(e2 & DESC_S_MASK))
2668 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2669 rpl = selector & 3;
2670 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2671 if (seg_reg == R_SS) {
2672 /* must be writable segment */
2673 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2674 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2675 if (rpl != cpl || dpl != cpl)
2676 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2677 } else {
2678 /* must be readable segment */
2679 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2680 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2681
2682 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2683 /* if not conforming code, test rights */
2684 if (dpl < cpl || dpl < rpl)
2685 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2686 }
2687 }
2688
2689 if (!(e2 & DESC_P_MASK)) {
2690 if (seg_reg == R_SS)
2691 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2692 else
2693 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2694 }
2695
2696 /* set the access bit if not already set */
2697 if (!(e2 & DESC_A_MASK)) {
2698 e2 |= DESC_A_MASK;
2699 stl_kernel(ptr + 4, e2);
2700 }
2701
2702 cpu_x86_load_seg_cache(env, seg_reg, selector,
2703 get_seg_base(e1, e2),
2704 get_seg_limit(e1, e2),
2705 e2);
2706#if 0
2707 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2708 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2709#endif
2710 }
2711}
2712
2713/* protected mode jump */
2714void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2715 int next_eip_addend)
2716{
2717 int gate_cs, type;
2718 uint32_t e1, e2, cpl, dpl, rpl, limit;
2719 target_ulong next_eip;
2720
2721#ifdef VBOX /** @todo Why do we do this? */
2722 e1 = e2 = 0;
2723#endif
2724 if ((new_cs & 0xfffc) == 0)
2725 raise_exception_err(EXCP0D_GPF, 0);
2726 if (load_segment(&e1, &e2, new_cs) != 0)
2727 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2728 cpl = env->hflags & HF_CPL_MASK;
2729 if (e2 & DESC_S_MASK) {
2730 if (!(e2 & DESC_CS_MASK))
2731 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2732 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2733 if (e2 & DESC_C_MASK) {
2734 /* conforming code segment */
2735 if (dpl > cpl)
2736 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2737 } else {
2738 /* non conforming code segment */
2739 rpl = new_cs & 3;
2740 if (rpl > cpl)
2741 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2742 if (dpl != cpl)
2743 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2744 }
2745 if (!(e2 & DESC_P_MASK))
2746 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2747 limit = get_seg_limit(e1, e2);
2748 if (new_eip > limit &&
2749 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2750 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2751 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2752 get_seg_base(e1, e2), limit, e2);
2753 EIP = new_eip;
2754 } else {
2755 /* jump to call or task gate */
2756 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2757 rpl = new_cs & 3;
2758 cpl = env->hflags & HF_CPL_MASK;
2759 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2760 switch(type) {
2761 case 1: /* 286 TSS */
2762 case 9: /* 386 TSS */
2763 case 5: /* task gate */
2764 if (dpl < cpl || dpl < rpl)
2765 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2766 next_eip = env->eip + next_eip_addend;
2767 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2768 CC_OP = CC_OP_EFLAGS;
2769 break;
2770 case 4: /* 286 call gate */
2771 case 12: /* 386 call gate */
2772 if ((dpl < cpl) || (dpl < rpl))
2773 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2774 if (!(e2 & DESC_P_MASK))
2775 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2776 gate_cs = e1 >> 16;
2777 new_eip = (e1 & 0xffff);
2778 if (type == 12)
2779 new_eip |= (e2 & 0xffff0000);
2780 if (load_segment(&e1, &e2, gate_cs) != 0)
2781 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2782 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2783 /* must be code segment */
2784 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2785 (DESC_S_MASK | DESC_CS_MASK)))
2786 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2787 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2788 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2789 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2790 if (!(e2 & DESC_P_MASK))
2791#ifdef VBOX /* See page 3-514 of 253666.pdf */
2792 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2793#else
2794 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2795#endif
2796 limit = get_seg_limit(e1, e2);
2797 if (new_eip > limit)
2798 raise_exception_err(EXCP0D_GPF, 0);
2799 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2800 get_seg_base(e1, e2), limit, e2);
2801 EIP = new_eip;
2802 break;
2803 default:
2804 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2805 break;
2806 }
2807 }
2808}
2809
2810/* real mode call */
2811void helper_lcall_real(int new_cs, target_ulong new_eip1,
2812 int shift, int next_eip)
2813{
2814 int new_eip;
2815 uint32_t esp, esp_mask;
2816 target_ulong ssp;
2817
2818 new_eip = new_eip1;
2819 esp = ESP;
2820 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2821 ssp = env->segs[R_SS].base;
2822 if (shift) {
2823 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2824 PUSHL(ssp, esp, esp_mask, next_eip);
2825 } else {
2826 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2827 PUSHW(ssp, esp, esp_mask, next_eip);
2828 }
2829
2830 SET_ESP(esp, esp_mask);
2831 env->eip = new_eip;
2832 env->segs[R_CS].selector = new_cs;
2833 env->segs[R_CS].base = (new_cs << 4);
2834}
2835
2836/* protected mode call */
2837void helper_lcall_protected(int new_cs, target_ulong new_eip,
2838 int shift, int next_eip_addend)
2839{
2840 int new_stack, i;
2841 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2842 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2843 uint32_t val, limit, old_sp_mask;
2844 target_ulong ssp, old_ssp, next_eip;
2845
2846#ifdef VBOX /** @todo Why do we do this? */
2847 e1 = e2 = 0;
2848#endif
2849 next_eip = env->eip + next_eip_addend;
2850 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2851 LOG_PCALL_STATE(env);
2852 if ((new_cs & 0xfffc) == 0)
2853 raise_exception_err(EXCP0D_GPF, 0);
2854 if (load_segment(&e1, &e2, new_cs) != 0)
2855 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2856 cpl = env->hflags & HF_CPL_MASK;
2857 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2858 if (e2 & DESC_S_MASK) {
2859 if (!(e2 & DESC_CS_MASK))
2860 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2861 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2862 if (e2 & DESC_C_MASK) {
2863 /* conforming code segment */
2864 if (dpl > cpl)
2865 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2866 } else {
2867 /* non conforming code segment */
2868 rpl = new_cs & 3;
2869 if (rpl > cpl)
2870 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2871 if (dpl != cpl)
2872 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2873 }
2874 if (!(e2 & DESC_P_MASK))
2875 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2876
2877#ifdef TARGET_X86_64
2878 /* XXX: check 16/32 bit cases in long mode */
2879 if (shift == 2) {
2880 target_ulong rsp;
2881 /* 64 bit case */
2882 rsp = ESP;
2883 PUSHQ(rsp, env->segs[R_CS].selector);
2884 PUSHQ(rsp, next_eip);
2885 /* from this point, not restartable */
2886 ESP = rsp;
2887 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2888 get_seg_base(e1, e2),
2889 get_seg_limit(e1, e2), e2);
2890 EIP = new_eip;
2891 } else
2892#endif
2893 {
2894 sp = ESP;
2895 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2896 ssp = env->segs[R_SS].base;
2897 if (shift) {
2898 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2899 PUSHL(ssp, sp, sp_mask, next_eip);
2900 } else {
2901 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2902 PUSHW(ssp, sp, sp_mask, next_eip);
2903 }
2904
2905 limit = get_seg_limit(e1, e2);
2906 if (new_eip > limit)
2907 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2908 /* from this point, not restartable */
2909 SET_ESP(sp, sp_mask);
2910 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2911 get_seg_base(e1, e2), limit, e2);
2912 EIP = new_eip;
2913 }
2914 } else {
2915 /* check gate type */
2916 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2917 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2918 rpl = new_cs & 3;
2919 switch(type) {
2920 case 1: /* available 286 TSS */
2921 case 9: /* available 386 TSS */
2922 case 5: /* task gate */
2923 if (dpl < cpl || dpl < rpl)
2924 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2925 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2926 CC_OP = CC_OP_EFLAGS;
2927 return;
2928 case 4: /* 286 call gate */
2929 case 12: /* 386 call gate */
2930 break;
2931 default:
2932 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2933 break;
2934 }
2935 shift = type >> 3;
2936
2937 if (dpl < cpl || dpl < rpl)
2938 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2939 /* check valid bit */
2940 if (!(e2 & DESC_P_MASK))
2941 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2942 selector = e1 >> 16;
2943 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2944 param_count = e2 & 0x1f;
2945 if ((selector & 0xfffc) == 0)
2946 raise_exception_err(EXCP0D_GPF, 0);
2947
2948 if (load_segment(&e1, &e2, selector) != 0)
2949 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2950 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2951 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2952 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2953 if (dpl > cpl)
2954 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2955 if (!(e2 & DESC_P_MASK))
2956 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2957
2958 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2959 /* to inner privilege */
2960 get_ss_esp_from_tss(&ss, &sp, dpl);
2961 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2962 ss, sp, param_count, ESP);
2963 if ((ss & 0xfffc) == 0)
2964 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2965 if ((ss & 3) != dpl)
2966 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2967 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2968 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2969 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2970 if (ss_dpl != dpl)
2971 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2972 if (!(ss_e2 & DESC_S_MASK) ||
2973 (ss_e2 & DESC_CS_MASK) ||
2974 !(ss_e2 & DESC_W_MASK))
2975 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2976 if (!(ss_e2 & DESC_P_MASK))
2977#ifdef VBOX /* See page 3-99 of 253666.pdf */
2978 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2979#else
2980 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2981#endif
2982
2983 // push_size = ((param_count * 2) + 8) << shift;
2984
2985 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2986 old_ssp = env->segs[R_SS].base;
2987
2988 sp_mask = get_sp_mask(ss_e2);
2989 ssp = get_seg_base(ss_e1, ss_e2);
2990 if (shift) {
2991 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2992 PUSHL(ssp, sp, sp_mask, ESP);
2993 for(i = param_count - 1; i >= 0; i--) {
2994 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2995 PUSHL(ssp, sp, sp_mask, val);
2996 }
2997 } else {
2998 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2999 PUSHW(ssp, sp, sp_mask, ESP);
3000 for(i = param_count - 1; i >= 0; i--) {
3001 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3002 PUSHW(ssp, sp, sp_mask, val);
3003 }
3004 }
3005 new_stack = 1;
3006 } else {
3007 /* to same privilege */
3008 sp = ESP;
3009 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3010 ssp = env->segs[R_SS].base;
3011 // push_size = (4 << shift);
3012 new_stack = 0;
3013 }
3014
3015 if (shift) {
3016 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3017 PUSHL(ssp, sp, sp_mask, next_eip);
3018 } else {
3019 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3020 PUSHW(ssp, sp, sp_mask, next_eip);
3021 }
3022
3023 /* from this point, not restartable */
3024
3025 if (new_stack) {
3026 ss = (ss & ~3) | dpl;
3027 cpu_x86_load_seg_cache(env, R_SS, ss,
3028 ssp,
3029 get_seg_limit(ss_e1, ss_e2),
3030 ss_e2);
3031 }
3032
3033 selector = (selector & ~3) | dpl;
3034 cpu_x86_load_seg_cache(env, R_CS, selector,
3035 get_seg_base(e1, e2),
3036 get_seg_limit(e1, e2),
3037 e2);
3038 cpu_x86_set_cpl(env, dpl);
3039 SET_ESP(sp, sp_mask);
3040 EIP = offset;
3041 }
3042}
3043
3044/* real and vm86 mode iret */
3045void helper_iret_real(int shift)
3046{
3047 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3048 target_ulong ssp;
3049 int eflags_mask;
3050#ifdef VBOX
3051 bool fVME = false;
3052
3053 remR3TrapClear(env->pVM);
3054#endif /* VBOX */
3055
3056 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3057 sp = ESP;
3058 ssp = env->segs[R_SS].base;
3059 if (shift == 1) {
3060 /* 32 bits */
3061 POPL(ssp, sp, sp_mask, new_eip);
3062 POPL(ssp, sp, sp_mask, new_cs);
3063 new_cs &= 0xffff;
3064 POPL(ssp, sp, sp_mask, new_eflags);
3065 } else {
3066 /* 16 bits */
3067 POPW(ssp, sp, sp_mask, new_eip);
3068 POPW(ssp, sp, sp_mask, new_cs);
3069 POPW(ssp, sp, sp_mask, new_eflags);
3070 }
3071#ifdef VBOX
3072 if ( (env->eflags & VM_MASK)
3073 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3074 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3075 {
3076 fVME = true;
3077 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3078 /* if TF will be set -> #GP */
3079 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3080 || (new_eflags & TF_MASK))
3081 raise_exception(EXCP0D_GPF);
3082 }
3083#endif /* VBOX */
3084 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3085 env->segs[R_CS].selector = new_cs;
3086 env->segs[R_CS].base = (new_cs << 4);
3087 env->eip = new_eip;
3088#ifdef VBOX
3089 if (fVME)
3090 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3091 else
3092#endif
3093 if (env->eflags & VM_MASK)
3094 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3095 else
3096 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3097 if (shift == 0)
3098 eflags_mask &= 0xffff;
3099 load_eflags(new_eflags, eflags_mask);
3100 env->hflags2 &= ~HF2_NMI_MASK;
3101#ifdef VBOX
3102 if (fVME)
3103 {
3104 if (new_eflags & IF_MASK)
3105 env->eflags |= VIF_MASK;
3106 else
3107 env->eflags &= ~VIF_MASK;
3108 }
3109#endif /* VBOX */
3110}
3111
3112static inline void validate_seg(int seg_reg, int cpl)
3113{
3114 int dpl;
3115 uint32_t e2;
3116
3117 /* XXX: on x86_64, we do not want to nullify FS and GS because
3118 they may still contain a valid base. I would be interested to
3119 know how a real x86_64 CPU behaves */
3120 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3121 (env->segs[seg_reg].selector & 0xfffc) == 0)
3122 return;
3123
3124 e2 = env->segs[seg_reg].flags;
3125 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3126 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3127 /* data or non conforming code segment */
3128 if (dpl < cpl) {
3129 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3130 }
3131 }
3132}
3133
3134/* protected mode iret */
3135static inline void helper_ret_protected(int shift, int is_iret, int addend)
3136{
3137 uint32_t new_cs, new_eflags, new_ss;
3138 uint32_t new_es, new_ds, new_fs, new_gs;
3139 uint32_t e1, e2, ss_e1, ss_e2;
3140 int cpl, dpl, rpl, eflags_mask, iopl;
3141 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3142
3143#ifdef VBOX /** @todo Why do we do this? */
3144 ss_e1 = ss_e2 = e1 = e2 = 0;
3145#endif
3146
3147#ifdef TARGET_X86_64
3148 if (shift == 2)
3149 sp_mask = -1;
3150 else
3151#endif
3152 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3153 sp = ESP;
3154 ssp = env->segs[R_SS].base;
3155 new_eflags = 0; /* avoid warning */
3156#ifdef TARGET_X86_64
3157 if (shift == 2) {
3158 POPQ(sp, new_eip);
3159 POPQ(sp, new_cs);
3160 new_cs &= 0xffff;
3161 if (is_iret) {
3162 POPQ(sp, new_eflags);
3163 }
3164 } else
3165#endif
3166 if (shift == 1) {
3167 /* 32 bits */
3168 POPL(ssp, sp, sp_mask, new_eip);
3169 POPL(ssp, sp, sp_mask, new_cs);
3170 new_cs &= 0xffff;
3171 if (is_iret) {
3172 POPL(ssp, sp, sp_mask, new_eflags);
3173#define LOG_GROUP LOG_GROUP_REM
3174#if defined(VBOX) && defined(DEBUG)
3175 Log(("iret: new CS %04X (old=%x)\n", new_cs, env->segs[R_CS].selector));
3176 Log(("iret: new EIP %08X\n", (uint32_t)new_eip));
3177 Log(("iret: new EFLAGS %08X\n", new_eflags));
3178 Log(("iret: EAX=%08x\n", (uint32_t)EAX));
3179#endif
3180 if (new_eflags & VM_MASK)
3181 goto return_to_vm86;
3182 }
3183#ifdef VBOX
3184 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3185 {
3186 if ( !EMIsRawRing1Enabled(env->pVM)
3187 || env->segs[R_CS].selector == (new_cs & 0xfffc))
3188 {
3189 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
3190 new_cs = new_cs & 0xfffc;
3191 }
3192 else
3193 {
3194 /* Ugly assumption: assume a genuine switch to ring-1. */
3195 Log(("Genuine switch to ring-1 (iret)\n"));
3196 }
3197 }
3198 else if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))
3199 {
3200 Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));
3201 new_cs = (new_cs & 0xfffc) | 1;
3202 }
3203#endif
3204 } else {
3205 /* 16 bits */
3206 POPW(ssp, sp, sp_mask, new_eip);
3207 POPW(ssp, sp, sp_mask, new_cs);
3208 if (is_iret)
3209 POPW(ssp, sp, sp_mask, new_eflags);
3210 }
3211 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3212 new_cs, new_eip, shift, addend);
3213 LOG_PCALL_STATE(env);
3214 if ((new_cs & 0xfffc) == 0)
3215 {
3216#if defined(VBOX) && defined(DEBUG)
3217 Log(("new_cs & 0xfffc) == 0\n"));
3218#endif
3219 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3220 }
3221 if (load_segment(&e1, &e2, new_cs) != 0)
3222 {
3223#if defined(VBOX) && defined(DEBUG)
3224 Log(("load_segment failed\n"));
3225#endif
3226 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3227 }
3228 if (!(e2 & DESC_S_MASK) ||
3229 !(e2 & DESC_CS_MASK))
3230 {
3231#if defined(VBOX) && defined(DEBUG)
3232 Log(("e2 mask %08x\n", e2));
3233#endif
3234 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3235 }
3236 cpl = env->hflags & HF_CPL_MASK;
3237 rpl = new_cs & 3;
3238 if (rpl < cpl)
3239 {
3240#if defined(VBOX) && defined(DEBUG)
3241 Log(("rpl < cpl (%d vs %d)\n", rpl, cpl));
3242#endif
3243 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3244 }
3245 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3246
3247 if (e2 & DESC_C_MASK) {
3248 if (dpl > rpl)
3249 {
3250#if defined(VBOX) && defined(DEBUG)
3251 Log(("dpl > rpl (%d vs %d)\n", dpl, rpl));
3252#endif
3253 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3254 }
3255 } else {
3256 if (dpl != rpl)
3257 {
3258#if defined(VBOX) && defined(DEBUG)
3259 Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2));
3260#endif
3261 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3262 }
3263 }
3264 if (!(e2 & DESC_P_MASK))
3265 {
3266#if defined(VBOX) && defined(DEBUG)
3267 Log(("DESC_P_MASK e2=%08x\n", e2));
3268#endif
3269 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3270 }
3271
3272 sp += addend;
3273 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3274 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3275 /* return to same privilege level */
3276#ifdef VBOX
3277 if (!(e2 & DESC_A_MASK))
3278 e2 = set_segment_accessed(new_cs, e2);
3279#endif
3280 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3281 get_seg_base(e1, e2),
3282 get_seg_limit(e1, e2),
3283 e2);
3284 } else {
3285 /* return to different privilege level */
3286#ifdef TARGET_X86_64
3287 if (shift == 2) {
3288 POPQ(sp, new_esp);
3289 POPQ(sp, new_ss);
3290 new_ss &= 0xffff;
3291 } else
3292#endif
3293 if (shift == 1) {
3294 /* 32 bits */
3295 POPL(ssp, sp, sp_mask, new_esp);
3296 POPL(ssp, sp, sp_mask, new_ss);
3297 new_ss &= 0xffff;
3298 } else {
3299 /* 16 bits */
3300 POPW(ssp, sp, sp_mask, new_esp);
3301 POPW(ssp, sp, sp_mask, new_ss);
3302 }
3303 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3304 new_ss, new_esp);
3305 if ((new_ss & 0xfffc) == 0) {
3306#ifdef TARGET_X86_64
3307 /* NULL ss is allowed in long mode if cpl != 3*/
3308 /* XXX: test CS64 ? */
3309 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3310# ifdef VBOX
3311 if (!(e2 & DESC_A_MASK))
3312 e2 = set_segment_accessed(new_cs, e2);
3313# endif
3314 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3315 0, 0xffffffff,
3316 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3317 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3318 DESC_W_MASK | DESC_A_MASK);
3319 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3320 } else
3321#endif
3322 {
3323 raise_exception_err(EXCP0D_GPF, 0);
3324 }
3325 } else {
3326 if ((new_ss & 3) != rpl)
3327 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3328 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3329 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3330 if (!(ss_e2 & DESC_S_MASK) ||
3331 (ss_e2 & DESC_CS_MASK) ||
3332 !(ss_e2 & DESC_W_MASK))
3333 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3334 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3335 if (dpl != rpl)
3336 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3337 if (!(ss_e2 & DESC_P_MASK))
3338 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3339#ifdef VBOX
3340 if (!(e2 & DESC_A_MASK))
3341 e2 = set_segment_accessed(new_cs, e2);
3342 if (!(ss_e2 & DESC_A_MASK))
3343 ss_e2 = set_segment_accessed(new_ss, ss_e2);
3344#endif
3345 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3346 get_seg_base(ss_e1, ss_e2),
3347 get_seg_limit(ss_e1, ss_e2),
3348 ss_e2);
3349 }
3350
3351 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3352 get_seg_base(e1, e2),
3353 get_seg_limit(e1, e2),
3354 e2);
3355 cpu_x86_set_cpl(env, rpl);
3356 sp = new_esp;
3357#ifdef TARGET_X86_64
3358 if (env->hflags & HF_CS64_MASK)
3359 sp_mask = -1;
3360 else
3361#endif
3362 sp_mask = get_sp_mask(ss_e2);
3363
3364 /* validate data segments */
3365 validate_seg(R_ES, rpl);
3366 validate_seg(R_DS, rpl);
3367 validate_seg(R_FS, rpl);
3368 validate_seg(R_GS, rpl);
3369
3370 sp += addend;
3371 }
3372 SET_ESP(sp, sp_mask);
3373 env->eip = new_eip;
3374 if (is_iret) {
3375 /* NOTE: 'cpl' is the _old_ CPL */
3376 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3377 if (cpl == 0)
3378#ifdef VBOX
3379 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3380#else
3381 eflags_mask |= IOPL_MASK;
3382#endif
3383 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3384 if (cpl <= iopl)
3385 eflags_mask |= IF_MASK;
3386 if (shift == 0)
3387 eflags_mask &= 0xffff;
3388 load_eflags(new_eflags, eflags_mask);
3389 }
3390 return;
3391
3392 return_to_vm86:
3393 POPL(ssp, sp, sp_mask, new_esp);
3394 POPL(ssp, sp, sp_mask, new_ss);
3395 POPL(ssp, sp, sp_mask, new_es);
3396 POPL(ssp, sp, sp_mask, new_ds);
3397 POPL(ssp, sp, sp_mask, new_fs);
3398 POPL(ssp, sp, sp_mask, new_gs);
3399
3400 /* modify processor state */
3401 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3402 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3403 load_seg_vm(R_CS, new_cs & 0xffff);
3404 cpu_x86_set_cpl(env, 3);
3405 load_seg_vm(R_SS, new_ss & 0xffff);
3406 load_seg_vm(R_ES, new_es & 0xffff);
3407 load_seg_vm(R_DS, new_ds & 0xffff);
3408 load_seg_vm(R_FS, new_fs & 0xffff);
3409 load_seg_vm(R_GS, new_gs & 0xffff);
3410
3411 env->eip = new_eip & 0xffff;
3412 ESP = new_esp;
3413}
3414
3415void helper_iret_protected(int shift, int next_eip)
3416{
3417 int tss_selector, type;
3418 uint32_t e1, e2;
3419
3420#ifdef VBOX
3421 e1 = e2 = 0; /** @todo Why do we do this? */
3422 remR3TrapClear(env->pVM);
3423#endif
3424
3425 /* specific case for TSS */
3426 if (env->eflags & NT_MASK) {
3427#ifdef TARGET_X86_64
3428 if (env->hflags & HF_LMA_MASK)
3429 raise_exception_err(EXCP0D_GPF, 0);
3430#endif
3431 tss_selector = lduw_kernel(env->tr.base + 0);
3432 if (tss_selector & 4)
3433 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3434 if (load_segment(&e1, &e2, tss_selector) != 0)
3435 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3436 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3437 /* NOTE: we check both segment and busy TSS */
3438 if (type != 3)
3439 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3440 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3441 } else {
3442 helper_ret_protected(shift, 1, 0);
3443 }
3444 env->hflags2 &= ~HF2_NMI_MASK;
3445}
3446
3447void helper_lret_protected(int shift, int addend)
3448{
3449 helper_ret_protected(shift, 0, addend);
3450}
3451
3452void helper_sysenter(void)
3453{
3454 if (env->sysenter_cs == 0) {
3455 raise_exception_err(EXCP0D_GPF, 0);
3456 }
3457 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3458 cpu_x86_set_cpl(env, 0);
3459
3460#ifdef TARGET_X86_64
3461 if (env->hflags & HF_LMA_MASK) {
3462 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3463 0, 0xffffffff,
3464 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3465 DESC_S_MASK |
3466 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3467 } else
3468#endif
3469 {
3470 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3471 0, 0xffffffff,
3472 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3473 DESC_S_MASK |
3474 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3475 }
3476 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3477 0, 0xffffffff,
3478 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3479 DESC_S_MASK |
3480 DESC_W_MASK | DESC_A_MASK);
3481 ESP = env->sysenter_esp;
3482 EIP = env->sysenter_eip;
3483}
3484
3485void helper_sysexit(int dflag)
3486{
3487 int cpl;
3488
3489 cpl = env->hflags & HF_CPL_MASK;
3490 if (env->sysenter_cs == 0 || cpl != 0) {
3491 raise_exception_err(EXCP0D_GPF, 0);
3492 }
3493 cpu_x86_set_cpl(env, 3);
3494#ifdef TARGET_X86_64
3495 if (dflag == 2) {
3496 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3497 0, 0xffffffff,
3498 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3499 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3500 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3501 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3502 0, 0xffffffff,
3503 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3504 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3505 DESC_W_MASK | DESC_A_MASK);
3506 } else
3507#endif
3508 {
3509 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3510 0, 0xffffffff,
3511 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3512 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3513 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3514 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3515 0, 0xffffffff,
3516 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3517 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3518 DESC_W_MASK | DESC_A_MASK);
3519 }
3520 ESP = ECX;
3521 EIP = EDX;
3522}
3523
3524#if defined(CONFIG_USER_ONLY)
3525target_ulong helper_read_crN(int reg)
3526{
3527 return 0;
3528}
3529
3530void helper_write_crN(int reg, target_ulong t0)
3531{
3532}
3533
3534void helper_movl_drN_T0(int reg, target_ulong t0)
3535{
3536}
3537#else
3538target_ulong helper_read_crN(int reg)
3539{
3540 target_ulong val;
3541
3542 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3543 switch(reg) {
3544 default:
3545 val = env->cr[reg];
3546 break;
3547 case 8:
3548 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3549#ifndef VBOX
3550 val = cpu_get_apic_tpr(env->apic_state);
3551#else /* VBOX */
3552 val = cpu_get_apic_tpr(env);
3553#endif /* VBOX */
3554 } else {
3555 val = env->v_tpr;
3556 }
3557 break;
3558 }
3559 return val;
3560}
3561
3562void helper_write_crN(int reg, target_ulong t0)
3563{
3564 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3565 switch(reg) {
3566 case 0:
3567 cpu_x86_update_cr0(env, t0);
3568 break;
3569 case 3:
3570 cpu_x86_update_cr3(env, t0);
3571 break;
3572 case 4:
3573 cpu_x86_update_cr4(env, t0);
3574 break;
3575 case 8:
3576 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3577#ifndef VBOX
3578 cpu_set_apic_tpr(env->apic_state, t0);
3579#else /* VBOX */
3580 cpu_set_apic_tpr(env, t0);
3581#endif /* VBOX */
3582 }
3583 env->v_tpr = t0 & 0x0f;
3584 break;
3585 default:
3586 env->cr[reg] = t0;
3587 break;
3588 }
3589}
3590
3591void helper_movl_drN_T0(int reg, target_ulong t0)
3592{
3593 int i;
3594
3595 if (reg < 4) {
3596 hw_breakpoint_remove(env, reg);
3597 env->dr[reg] = t0;
3598 hw_breakpoint_insert(env, reg);
3599 } else if (reg == 7) {
3600 for (i = 0; i < 4; i++)
3601 hw_breakpoint_remove(env, i);
3602 env->dr[7] = t0;
3603 for (i = 0; i < 4; i++)
3604 hw_breakpoint_insert(env, i);
3605 } else
3606 env->dr[reg] = t0;
3607}
3608#endif
3609
3610void helper_lmsw(target_ulong t0)
3611{
3612 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3613 if already set to one. */
3614 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3615 helper_write_crN(0, t0);
3616}
3617
3618void helper_clts(void)
3619{
3620 env->cr[0] &= ~CR0_TS_MASK;
3621 env->hflags &= ~HF_TS_MASK;
3622}
3623
3624void helper_invlpg(target_ulong addr)
3625{
3626 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3627 tlb_flush_page(env, addr);
3628}
3629
3630void helper_rdtsc(void)
3631{
3632 uint64_t val;
3633
3634 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3635 raise_exception(EXCP0D_GPF);
3636 }
3637 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3638
3639 val = cpu_get_tsc(env) + env->tsc_offset;
3640 EAX = (uint32_t)(val);
3641 EDX = (uint32_t)(val >> 32);
3642}
3643
3644void helper_rdtscp(void)
3645{
3646 helper_rdtsc();
3647#ifndef VBOX
3648 ECX = (uint32_t)(env->tsc_aux);
3649#else /* VBOX */
3650 uint64_t val;
3651 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3652 ECX = (uint32_t)(val);
3653 else
3654 ECX = 0;
3655#endif /* VBOX */
3656}
3657
3658void helper_rdpmc(void)
3659{
3660#ifdef VBOX
3661 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3662 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3663 raise_exception(EXCP0D_GPF);
3664 }
3665 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3666 EAX = 0;
3667 EDX = 0;
3668#else /* !VBOX */
3669 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3670 raise_exception(EXCP0D_GPF);
3671 }
3672 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3673
3674 /* currently unimplemented */
3675 raise_exception_err(EXCP06_ILLOP, 0);
3676#endif /* !VBOX */
3677}
3678
3679#if defined(CONFIG_USER_ONLY)
3680void helper_wrmsr(void)
3681{
3682}
3683
3684void helper_rdmsr(void)
3685{
3686}
3687#else
3688void helper_wrmsr(void)
3689{
3690 uint64_t val;
3691
3692 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3693
3694 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3695
3696 switch((uint32_t)ECX) {
3697 case MSR_IA32_SYSENTER_CS:
3698 env->sysenter_cs = val & 0xffff;
3699 break;
3700 case MSR_IA32_SYSENTER_ESP:
3701 env->sysenter_esp = val;
3702 break;
3703 case MSR_IA32_SYSENTER_EIP:
3704 env->sysenter_eip = val;
3705 break;
3706 case MSR_IA32_APICBASE:
3707# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3708 cpu_set_apic_base(env->apic_state, val);
3709# endif
3710 break;
3711 case MSR_EFER:
3712 {
3713 uint64_t update_mask;
3714 update_mask = 0;
3715 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3716 update_mask |= MSR_EFER_SCE;
3717 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3718 update_mask |= MSR_EFER_LME;
3719 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3720 update_mask |= MSR_EFER_FFXSR;
3721 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3722 update_mask |= MSR_EFER_NXE;
3723 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3724 update_mask |= MSR_EFER_SVME;
3725 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3726 update_mask |= MSR_EFER_FFXSR;
3727 cpu_load_efer(env, (env->efer & ~update_mask) |
3728 (val & update_mask));
3729 }
3730 break;
3731 case MSR_STAR:
3732 env->star = val;
3733 break;
3734 case MSR_PAT:
3735 env->pat = val;
3736 break;
3737 case MSR_VM_HSAVE_PA:
3738 env->vm_hsave = val;
3739 break;
3740#ifdef TARGET_X86_64
3741 case MSR_LSTAR:
3742 env->lstar = val;
3743 break;
3744 case MSR_CSTAR:
3745 env->cstar = val;
3746 break;
3747 case MSR_FMASK:
3748 env->fmask = val;
3749 break;
3750 case MSR_FSBASE:
3751 env->segs[R_FS].base = val;
3752 break;
3753 case MSR_GSBASE:
3754 env->segs[R_GS].base = val;
3755 break;
3756 case MSR_KERNELGSBASE:
3757 env->kernelgsbase = val;
3758 break;
3759#endif
3760# ifndef VBOX
3761 case MSR_MTRRphysBase(0):
3762 case MSR_MTRRphysBase(1):
3763 case MSR_MTRRphysBase(2):
3764 case MSR_MTRRphysBase(3):
3765 case MSR_MTRRphysBase(4):
3766 case MSR_MTRRphysBase(5):
3767 case MSR_MTRRphysBase(6):
3768 case MSR_MTRRphysBase(7):
3769 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3770 break;
3771 case MSR_MTRRphysMask(0):
3772 case MSR_MTRRphysMask(1):
3773 case MSR_MTRRphysMask(2):
3774 case MSR_MTRRphysMask(3):
3775 case MSR_MTRRphysMask(4):
3776 case MSR_MTRRphysMask(5):
3777 case MSR_MTRRphysMask(6):
3778 case MSR_MTRRphysMask(7):
3779 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3780 break;
3781 case MSR_MTRRfix64K_00000:
3782 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3783 break;
3784 case MSR_MTRRfix16K_80000:
3785 case MSR_MTRRfix16K_A0000:
3786 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3787 break;
3788 case MSR_MTRRfix4K_C0000:
3789 case MSR_MTRRfix4K_C8000:
3790 case MSR_MTRRfix4K_D0000:
3791 case MSR_MTRRfix4K_D8000:
3792 case MSR_MTRRfix4K_E0000:
3793 case MSR_MTRRfix4K_E8000:
3794 case MSR_MTRRfix4K_F0000:
3795 case MSR_MTRRfix4K_F8000:
3796 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3797 break;
3798 case MSR_MTRRdefType:
3799 env->mtrr_deftype = val;
3800 break;
3801 case MSR_MCG_STATUS:
3802 env->mcg_status = val;
3803 break;
3804 case MSR_MCG_CTL:
3805 if ((env->mcg_cap & MCG_CTL_P)
3806 && (val == 0 || val == ~(uint64_t)0))
3807 env->mcg_ctl = val;
3808 break;
3809 case MSR_TSC_AUX:
3810 env->tsc_aux = val;
3811 break;
3812# endif /* !VBOX */
3813 default:
3814# ifndef VBOX
3815 if ((uint32_t)ECX >= MSR_MC0_CTL
3816 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3817 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3818 if ((offset & 0x3) != 0
3819 || (val == 0 || val == ~(uint64_t)0))
3820 env->mce_banks[offset] = val;
3821 break;
3822 }
3823 /* XXX: exception ? */
3824# endif
3825 break;
3826 }
3827
3828# ifdef VBOX
3829 /* call CPUM. */
3830 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3831 {
3832 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3833 }
3834# endif
3835}
3836
3837void helper_rdmsr(void)
3838{
3839 uint64_t val;
3840
3841 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3842
3843 switch((uint32_t)ECX) {
3844 case MSR_IA32_SYSENTER_CS:
3845 val = env->sysenter_cs;
3846 break;
3847 case MSR_IA32_SYSENTER_ESP:
3848 val = env->sysenter_esp;
3849 break;
3850 case MSR_IA32_SYSENTER_EIP:
3851 val = env->sysenter_eip;
3852 break;
3853 case MSR_IA32_APICBASE:
3854#ifndef VBOX
3855 val = cpu_get_apic_base(env->apic_state);
3856#else /* VBOX */
3857 val = cpu_get_apic_base(env);
3858#endif /* VBOX */
3859 break;
3860 case MSR_EFER:
3861 val = env->efer;
3862 break;
3863 case MSR_STAR:
3864 val = env->star;
3865 break;
3866 case MSR_PAT:
3867 val = env->pat;
3868 break;
3869 case MSR_VM_HSAVE_PA:
3870 val = env->vm_hsave;
3871 break;
3872# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3873 case MSR_IA32_PERF_STATUS:
3874 /* tsc_increment_by_tick */
3875 val = 1000ULL;
3876 /* CPU multiplier */
3877 val |= (((uint64_t)4ULL) << 40);
3878 break;
3879# endif /* !VBOX */
3880#ifdef TARGET_X86_64
3881 case MSR_LSTAR:
3882 val = env->lstar;
3883 break;
3884 case MSR_CSTAR:
3885 val = env->cstar;
3886 break;
3887 case MSR_FMASK:
3888 val = env->fmask;
3889 break;
3890 case MSR_FSBASE:
3891 val = env->segs[R_FS].base;
3892 break;
3893 case MSR_GSBASE:
3894 val = env->segs[R_GS].base;
3895 break;
3896 case MSR_KERNELGSBASE:
3897 val = env->kernelgsbase;
3898 break;
3899# ifndef VBOX
3900 case MSR_TSC_AUX:
3901 val = env->tsc_aux;
3902 break;
3903# endif /*!VBOX*/
3904#endif
3905# ifndef VBOX
3906 case MSR_MTRRphysBase(0):
3907 case MSR_MTRRphysBase(1):
3908 case MSR_MTRRphysBase(2):
3909 case MSR_MTRRphysBase(3):
3910 case MSR_MTRRphysBase(4):
3911 case MSR_MTRRphysBase(5):
3912 case MSR_MTRRphysBase(6):
3913 case MSR_MTRRphysBase(7):
3914 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3915 break;
3916 case MSR_MTRRphysMask(0):
3917 case MSR_MTRRphysMask(1):
3918 case MSR_MTRRphysMask(2):
3919 case MSR_MTRRphysMask(3):
3920 case MSR_MTRRphysMask(4):
3921 case MSR_MTRRphysMask(5):
3922 case MSR_MTRRphysMask(6):
3923 case MSR_MTRRphysMask(7):
3924 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3925 break;
3926 case MSR_MTRRfix64K_00000:
3927 val = env->mtrr_fixed[0];
3928 break;
3929 case MSR_MTRRfix16K_80000:
3930 case MSR_MTRRfix16K_A0000:
3931 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3932 break;
3933 case MSR_MTRRfix4K_C0000:
3934 case MSR_MTRRfix4K_C8000:
3935 case MSR_MTRRfix4K_D0000:
3936 case MSR_MTRRfix4K_D8000:
3937 case MSR_MTRRfix4K_E0000:
3938 case MSR_MTRRfix4K_E8000:
3939 case MSR_MTRRfix4K_F0000:
3940 case MSR_MTRRfix4K_F8000:
3941 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3942 break;
3943 case MSR_MTRRdefType:
3944 val = env->mtrr_deftype;
3945 break;
3946 case MSR_MTRRcap:
3947 if (env->cpuid_features & CPUID_MTRR)
3948 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3949 else
3950 /* XXX: exception ? */
3951 val = 0;
3952 break;
3953 case MSR_MCG_CAP:
3954 val = env->mcg_cap;
3955 break;
3956 case MSR_MCG_CTL:
3957 if (env->mcg_cap & MCG_CTL_P)
3958 val = env->mcg_ctl;
3959 else
3960 val = 0;
3961 break;
3962 case MSR_MCG_STATUS:
3963 val = env->mcg_status;
3964 break;
3965# endif /* !VBOX */
3966 default:
3967# ifndef VBOX
3968 if ((uint32_t)ECX >= MSR_MC0_CTL
3969 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3970 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3971 val = env->mce_banks[offset];
3972 break;
3973 }
3974 /* XXX: exception ? */
3975 val = 0;
3976# else /* VBOX */
3977 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3978 {
3979 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3980 val = 0;
3981 }
3982# endif /* VBOX */
3983 break;
3984 }
3985 EAX = (uint32_t)(val);
3986 EDX = (uint32_t)(val >> 32);
3987
3988# ifdef VBOX_STRICT
3989 if ((uint32_t)ECX != MSR_IA32_TSC) {
3990 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3991 val = 0;
3992 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3993 }
3994# endif
3995}
3996#endif
3997
3998target_ulong helper_lsl(target_ulong selector1)
3999{
4000 unsigned int limit;
4001 uint32_t e1, e2, eflags, selector;
4002 int rpl, dpl, cpl, type;
4003
4004 selector = selector1 & 0xffff;
4005 eflags = helper_cc_compute_all(CC_OP);
4006 if ((selector & 0xfffc) == 0)
4007 goto fail;
4008 if (load_segment(&e1, &e2, selector) != 0)
4009 goto fail;
4010 rpl = selector & 3;
4011 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4012 cpl = env->hflags & HF_CPL_MASK;
4013 if (e2 & DESC_S_MASK) {
4014 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4015 /* conforming */
4016 } else {
4017 if (dpl < cpl || dpl < rpl)
4018 goto fail;
4019 }
4020 } else {
4021 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4022 switch(type) {
4023 case 1:
4024 case 2:
4025 case 3:
4026 case 9:
4027 case 11:
4028 break;
4029 default:
4030 goto fail;
4031 }
4032 if (dpl < cpl || dpl < rpl) {
4033 fail:
4034 CC_SRC = eflags & ~CC_Z;
4035 return 0;
4036 }
4037 }
4038 limit = get_seg_limit(e1, e2);
4039 CC_SRC = eflags | CC_Z;
4040 return limit;
4041}
4042
4043target_ulong helper_lar(target_ulong selector1)
4044{
4045 uint32_t e1, e2, eflags, selector;
4046 int rpl, dpl, cpl, type;
4047
4048 selector = selector1 & 0xffff;
4049 eflags = helper_cc_compute_all(CC_OP);
4050 if ((selector & 0xfffc) == 0)
4051 goto fail;
4052 if (load_segment(&e1, &e2, selector) != 0)
4053 goto fail;
4054 rpl = selector & 3;
4055 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4056 cpl = env->hflags & HF_CPL_MASK;
4057 if (e2 & DESC_S_MASK) {
4058 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4059 /* conforming */
4060 } else {
4061 if (dpl < cpl || dpl < rpl)
4062 goto fail;
4063 }
4064 } else {
4065 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4066 switch(type) {
4067 case 1:
4068 case 2:
4069 case 3:
4070 case 4:
4071 case 5:
4072 case 9:
4073 case 11:
4074 case 12:
4075 break;
4076 default:
4077 goto fail;
4078 }
4079 if (dpl < cpl || dpl < rpl) {
4080 fail:
4081 CC_SRC = eflags & ~CC_Z;
4082 return 0;
4083 }
4084 }
4085 CC_SRC = eflags | CC_Z;
4086 return e2 & 0x00f0ff00;
4087}
4088
4089void helper_verr(target_ulong selector1)
4090{
4091 uint32_t e1, e2, eflags, selector;
4092 int rpl, dpl, cpl;
4093
4094 selector = selector1 & 0xffff;
4095 eflags = helper_cc_compute_all(CC_OP);
4096 if ((selector & 0xfffc) == 0)
4097 goto fail;
4098 if (load_segment(&e1, &e2, selector) != 0)
4099 goto fail;
4100 if (!(e2 & DESC_S_MASK))
4101 goto fail;
4102 rpl = selector & 3;
4103 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4104 cpl = env->hflags & HF_CPL_MASK;
4105 if (e2 & DESC_CS_MASK) {
4106 if (!(e2 & DESC_R_MASK))
4107 goto fail;
4108 if (!(e2 & DESC_C_MASK)) {
4109 if (dpl < cpl || dpl < rpl)
4110 goto fail;
4111 }
4112 } else {
4113 if (dpl < cpl || dpl < rpl) {
4114 fail:
4115 CC_SRC = eflags & ~CC_Z;
4116 return;
4117 }
4118 }
4119 CC_SRC = eflags | CC_Z;
4120}
4121
4122void helper_verw(target_ulong selector1)
4123{
4124 uint32_t e1, e2, eflags, selector;
4125 int rpl, dpl, cpl;
4126
4127 selector = selector1 & 0xffff;
4128 eflags = helper_cc_compute_all(CC_OP);
4129 if ((selector & 0xfffc) == 0)
4130 goto fail;
4131 if (load_segment(&e1, &e2, selector) != 0)
4132 goto fail;
4133 if (!(e2 & DESC_S_MASK))
4134 goto fail;
4135 rpl = selector & 3;
4136 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4137 cpl = env->hflags & HF_CPL_MASK;
4138 if (e2 & DESC_CS_MASK) {
4139 goto fail;
4140 } else {
4141 if (dpl < cpl || dpl < rpl)
4142 goto fail;
4143 if (!(e2 & DESC_W_MASK)) {
4144 fail:
4145 CC_SRC = eflags & ~CC_Z;
4146 return;
4147 }
4148 }
4149 CC_SRC = eflags | CC_Z;
4150}
4151
4152/* x87 FPU helpers */
4153
4154static void fpu_set_exception(int mask)
4155{
4156 env->fpus |= mask;
4157 if (env->fpus & (~env->fpuc & FPUC_EM))
4158 env->fpus |= FPUS_SE | FPUS_B;
4159}
4160
4161static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4162{
4163 if (b == 0.0)
4164 fpu_set_exception(FPUS_ZE);
4165 return a / b;
4166}
4167
4168static void fpu_raise_exception(void)
4169{
4170 if (env->cr[0] & CR0_NE_MASK) {
4171 raise_exception(EXCP10_COPR);
4172 }
4173#if !defined(CONFIG_USER_ONLY)
4174 else {
4175 cpu_set_ferr(env);
4176 }
4177#endif
4178}
4179
4180void helper_flds_FT0(uint32_t val)
4181{
4182 union {
4183 float32 f;
4184 uint32_t i;
4185 } u;
4186 u.i = val;
4187 FT0 = float32_to_floatx(u.f, &env->fp_status);
4188}
4189
4190void helper_fldl_FT0(uint64_t val)
4191{
4192 union {
4193 float64 f;
4194 uint64_t i;
4195 } u;
4196 u.i = val;
4197 FT0 = float64_to_floatx(u.f, &env->fp_status);
4198}
4199
4200void helper_fildl_FT0(int32_t val)
4201{
4202 FT0 = int32_to_floatx(val, &env->fp_status);
4203}
4204
4205void helper_flds_ST0(uint32_t val)
4206{
4207 int new_fpstt;
4208 union {
4209 float32 f;
4210 uint32_t i;
4211 } u;
4212 new_fpstt = (env->fpstt - 1) & 7;
4213 u.i = val;
4214 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4215 env->fpstt = new_fpstt;
4216 env->fptags[new_fpstt] = 0; /* validate stack entry */
4217}
4218
4219void helper_fldl_ST0(uint64_t val)
4220{
4221 int new_fpstt;
4222 union {
4223 float64 f;
4224 uint64_t i;
4225 } u;
4226 new_fpstt = (env->fpstt - 1) & 7;
4227 u.i = val;
4228 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4229 env->fpstt = new_fpstt;
4230 env->fptags[new_fpstt] = 0; /* validate stack entry */
4231}
4232
4233void helper_fildl_ST0(int32_t val)
4234{
4235 int new_fpstt;
4236 new_fpstt = (env->fpstt - 1) & 7;
4237 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4238 env->fpstt = new_fpstt;
4239 env->fptags[new_fpstt] = 0; /* validate stack entry */
4240}
4241
4242void helper_fildll_ST0(int64_t val)
4243{
4244 int new_fpstt;
4245 new_fpstt = (env->fpstt - 1) & 7;
4246 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4247 env->fpstt = new_fpstt;
4248 env->fptags[new_fpstt] = 0; /* validate stack entry */
4249}
4250
4251#ifndef VBOX
4252uint32_t helper_fsts_ST0(void)
4253#else
4254RTCCUINTREG helper_fsts_ST0(void)
4255#endif
4256{
4257 union {
4258 float32 f;
4259 uint32_t i;
4260 } u;
4261 u.f = floatx_to_float32(ST0, &env->fp_status);
4262 return u.i;
4263}
4264
4265uint64_t helper_fstl_ST0(void)
4266{
4267 union {
4268 float64 f;
4269 uint64_t i;
4270 } u;
4271 u.f = floatx_to_float64(ST0, &env->fp_status);
4272 return u.i;
4273}
4274
4275#ifndef VBOX
4276int32_t helper_fist_ST0(void)
4277#else
4278RTCCINTREG helper_fist_ST0(void)
4279#endif
4280{
4281 int32_t val;
4282 val = floatx_to_int32(ST0, &env->fp_status);
4283 if (val != (int16_t)val)
4284 val = -32768;
4285 return val;
4286}
4287
4288#ifndef VBOX
4289int32_t helper_fistl_ST0(void)
4290#else
4291RTCCINTREG helper_fistl_ST0(void)
4292#endif
4293{
4294 int32_t val;
4295 val = floatx_to_int32(ST0, &env->fp_status);
4296 return val;
4297}
4298
4299int64_t helper_fistll_ST0(void)
4300{
4301 int64_t val;
4302 val = floatx_to_int64(ST0, &env->fp_status);
4303 return val;
4304}
4305
4306#ifndef VBOX
4307int32_t helper_fistt_ST0(void)
4308#else
4309RTCCINTREG helper_fistt_ST0(void)
4310#endif
4311{
4312 int32_t val;
4313 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4314 if (val != (int16_t)val)
4315 val = -32768;
4316 return val;
4317}
4318
4319#ifndef VBOX
4320int32_t helper_fisttl_ST0(void)
4321#else
4322RTCCINTREG helper_fisttl_ST0(void)
4323#endif
4324{
4325 int32_t val;
4326 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4327 return val;
4328}
4329
4330int64_t helper_fisttll_ST0(void)
4331{
4332 int64_t val;
4333 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4334 return val;
4335}
4336
4337void helper_fldt_ST0(target_ulong ptr)
4338{
4339 int new_fpstt;
4340 new_fpstt = (env->fpstt - 1) & 7;
4341 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4342 env->fpstt = new_fpstt;
4343 env->fptags[new_fpstt] = 0; /* validate stack entry */
4344}
4345
4346void helper_fstt_ST0(target_ulong ptr)
4347{
4348 helper_fstt(ST0, ptr);
4349}
4350
4351void helper_fpush(void)
4352{
4353 fpush();
4354}
4355
4356void helper_fpop(void)
4357{
4358 fpop();
4359}
4360
4361void helper_fdecstp(void)
4362{
4363 env->fpstt = (env->fpstt - 1) & 7;
4364 env->fpus &= (~0x4700);
4365}
4366
4367void helper_fincstp(void)
4368{
4369 env->fpstt = (env->fpstt + 1) & 7;
4370 env->fpus &= (~0x4700);
4371}
4372
4373/* FPU move */
4374
4375void helper_ffree_STN(int st_index)
4376{
4377 env->fptags[(env->fpstt + st_index) & 7] = 1;
4378}
4379
4380void helper_fmov_ST0_FT0(void)
4381{
4382 ST0 = FT0;
4383}
4384
4385void helper_fmov_FT0_STN(int st_index)
4386{
4387 FT0 = ST(st_index);
4388}
4389
4390void helper_fmov_ST0_STN(int st_index)
4391{
4392 ST0 = ST(st_index);
4393}
4394
4395void helper_fmov_STN_ST0(int st_index)
4396{
4397 ST(st_index) = ST0;
4398}
4399
4400void helper_fxchg_ST0_STN(int st_index)
4401{
4402 CPU86_LDouble tmp;
4403 tmp = ST(st_index);
4404 ST(st_index) = ST0;
4405 ST0 = tmp;
4406}
4407
4408/* FPU operations */
4409
4410static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4411
4412void helper_fcom_ST0_FT0(void)
4413{
4414 int ret;
4415
4416 ret = floatx_compare(ST0, FT0, &env->fp_status);
4417 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4418}
4419
4420void helper_fucom_ST0_FT0(void)
4421{
4422 int ret;
4423
4424 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4425 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4426}
4427
4428static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4429
4430void helper_fcomi_ST0_FT0(void)
4431{
4432 int eflags;
4433 int ret;
4434
4435 ret = floatx_compare(ST0, FT0, &env->fp_status);
4436 eflags = helper_cc_compute_all(CC_OP);
4437 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4438 CC_SRC = eflags;
4439}
4440
4441void helper_fucomi_ST0_FT0(void)
4442{
4443 int eflags;
4444 int ret;
4445
4446 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4447 eflags = helper_cc_compute_all(CC_OP);
4448 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4449 CC_SRC = eflags;
4450}
4451
4452void helper_fadd_ST0_FT0(void)
4453{
4454 ST0 += FT0;
4455}
4456
4457void helper_fmul_ST0_FT0(void)
4458{
4459 ST0 *= FT0;
4460}
4461
4462void helper_fsub_ST0_FT0(void)
4463{
4464 ST0 -= FT0;
4465}
4466
4467void helper_fsubr_ST0_FT0(void)
4468{
4469 ST0 = FT0 - ST0;
4470}
4471
4472void helper_fdiv_ST0_FT0(void)
4473{
4474 ST0 = helper_fdiv(ST0, FT0);
4475}
4476
4477void helper_fdivr_ST0_FT0(void)
4478{
4479 ST0 = helper_fdiv(FT0, ST0);
4480}
4481
4482/* fp operations between STN and ST0 */
4483
4484void helper_fadd_STN_ST0(int st_index)
4485{
4486 ST(st_index) += ST0;
4487}
4488
4489void helper_fmul_STN_ST0(int st_index)
4490{
4491 ST(st_index) *= ST0;
4492}
4493
4494void helper_fsub_STN_ST0(int st_index)
4495{
4496 ST(st_index) -= ST0;
4497}
4498
4499void helper_fsubr_STN_ST0(int st_index)
4500{
4501 CPU86_LDouble *p;
4502 p = &ST(st_index);
4503 *p = ST0 - *p;
4504}
4505
4506void helper_fdiv_STN_ST0(int st_index)
4507{
4508 CPU86_LDouble *p;
4509 p = &ST(st_index);
4510 *p = helper_fdiv(*p, ST0);
4511}
4512
4513void helper_fdivr_STN_ST0(int st_index)
4514{
4515 CPU86_LDouble *p;
4516 p = &ST(st_index);
4517 *p = helper_fdiv(ST0, *p);
4518}
4519
4520/* misc FPU operations */
4521void helper_fchs_ST0(void)
4522{
4523 ST0 = floatx_chs(ST0);
4524}
4525
4526void helper_fabs_ST0(void)
4527{
4528 ST0 = floatx_abs(ST0);
4529}
4530
4531void helper_fld1_ST0(void)
4532{
4533 ST0 = f15rk[1];
4534}
4535
4536void helper_fldl2t_ST0(void)
4537{
4538 ST0 = f15rk[6];
4539}
4540
4541void helper_fldl2e_ST0(void)
4542{
4543 ST0 = f15rk[5];
4544}
4545
4546void helper_fldpi_ST0(void)
4547{
4548 ST0 = f15rk[2];
4549}
4550
4551void helper_fldlg2_ST0(void)
4552{
4553 ST0 = f15rk[3];
4554}
4555
4556void helper_fldln2_ST0(void)
4557{
4558 ST0 = f15rk[4];
4559}
4560
4561void helper_fldz_ST0(void)
4562{
4563 ST0 = f15rk[0];
4564}
4565
4566void helper_fldz_FT0(void)
4567{
4568 FT0 = f15rk[0];
4569}
4570
4571#ifndef VBOX
4572uint32_t helper_fnstsw(void)
4573#else
4574RTCCUINTREG helper_fnstsw(void)
4575#endif
4576{
4577 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4578}
4579
4580#ifndef VBOX
4581uint32_t helper_fnstcw(void)
4582#else
4583RTCCUINTREG helper_fnstcw(void)
4584#endif
4585{
4586 return env->fpuc;
4587}
4588
4589static void update_fp_status(void)
4590{
4591 int rnd_type;
4592
4593 /* set rounding mode */
4594 switch(env->fpuc & RC_MASK) {
4595 default:
4596 case RC_NEAR:
4597 rnd_type = float_round_nearest_even;
4598 break;
4599 case RC_DOWN:
4600 rnd_type = float_round_down;
4601 break;
4602 case RC_UP:
4603 rnd_type = float_round_up;
4604 break;
4605 case RC_CHOP:
4606 rnd_type = float_round_to_zero;
4607 break;
4608 }
4609 set_float_rounding_mode(rnd_type, &env->fp_status);
4610#ifdef FLOATX80
4611 switch((env->fpuc >> 8) & 3) {
4612 case 0:
4613 rnd_type = 32;
4614 break;
4615 case 2:
4616 rnd_type = 64;
4617 break;
4618 case 3:
4619 default:
4620 rnd_type = 80;
4621 break;
4622 }
4623 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4624#endif
4625}
4626
4627void helper_fldcw(uint32_t val)
4628{
4629 env->fpuc = val;
4630 update_fp_status();
4631}
4632
4633void helper_fclex(void)
4634{
4635 env->fpus &= 0x7f00;
4636}
4637
4638void helper_fwait(void)
4639{
4640 if (env->fpus & FPUS_SE)
4641 fpu_raise_exception();
4642}
4643
4644void helper_fninit(void)
4645{
4646 env->fpus = 0;
4647 env->fpstt = 0;
4648 env->fpuc = 0x37f;
4649 env->fptags[0] = 1;
4650 env->fptags[1] = 1;
4651 env->fptags[2] = 1;
4652 env->fptags[3] = 1;
4653 env->fptags[4] = 1;
4654 env->fptags[5] = 1;
4655 env->fptags[6] = 1;
4656 env->fptags[7] = 1;
4657}
4658
4659/* BCD ops */
4660
4661void helper_fbld_ST0(target_ulong ptr)
4662{
4663 CPU86_LDouble tmp;
4664 uint64_t val;
4665 unsigned int v;
4666 int i;
4667
4668 val = 0;
4669 for(i = 8; i >= 0; i--) {
4670 v = ldub(ptr + i);
4671 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4672 }
4673 tmp = val;
4674 if (ldub(ptr + 9) & 0x80)
4675 tmp = -tmp;
4676 fpush();
4677 ST0 = tmp;
4678}
4679
4680void helper_fbst_ST0(target_ulong ptr)
4681{
4682 int v;
4683 target_ulong mem_ref, mem_end;
4684 int64_t val;
4685
4686 val = floatx_to_int64(ST0, &env->fp_status);
4687 mem_ref = ptr;
4688 mem_end = mem_ref + 9;
4689 if (val < 0) {
4690 stb(mem_end, 0x80);
4691 val = -val;
4692 } else {
4693 stb(mem_end, 0x00);
4694 }
4695 while (mem_ref < mem_end) {
4696 if (val == 0)
4697 break;
4698 v = val % 100;
4699 val = val / 100;
4700 v = ((v / 10) << 4) | (v % 10);
4701 stb(mem_ref++, v);
4702 }
4703 while (mem_ref < mem_end) {
4704 stb(mem_ref++, 0);
4705 }
4706}
4707
4708void helper_f2xm1(void)
4709{
4710 ST0 = pow(2.0,ST0) - 1.0;
4711}
4712
4713void helper_fyl2x(void)
4714{
4715 CPU86_LDouble fptemp;
4716
4717 fptemp = ST0;
4718 if (fptemp>0.0){
4719 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4720 ST1 *= fptemp;
4721 fpop();
4722 } else {
4723 env->fpus &= (~0x4700);
4724 env->fpus |= 0x400;
4725 }
4726}
4727
4728void helper_fptan(void)
4729{
4730 CPU86_LDouble fptemp;
4731
4732 fptemp = ST0;
4733 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4734 env->fpus |= 0x400;
4735 } else {
4736 ST0 = tan(fptemp);
4737 fpush();
4738 ST0 = 1.0;
4739 env->fpus &= (~0x400); /* C2 <-- 0 */
4740 /* the above code is for |arg| < 2**52 only */
4741 }
4742}
4743
4744void helper_fpatan(void)
4745{
4746 CPU86_LDouble fptemp, fpsrcop;
4747
4748 fpsrcop = ST1;
4749 fptemp = ST0;
4750 ST1 = atan2(fpsrcop,fptemp);
4751 fpop();
4752}
4753
4754void helper_fxtract(void)
4755{
4756 CPU86_LDoubleU temp;
4757 unsigned int expdif;
4758
4759 temp.d = ST0;
4760 expdif = EXPD(temp) - EXPBIAS;
4761 /*DP exponent bias*/
4762 ST0 = expdif;
4763 fpush();
4764 BIASEXPONENT(temp);
4765 ST0 = temp.d;
4766}
4767
4768void helper_fprem1(void)
4769{
4770 CPU86_LDouble dblq, fpsrcop, fptemp;
4771 CPU86_LDoubleU fpsrcop1, fptemp1;
4772 int expdif;
4773 signed long long int q;
4774
4775#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4776 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4777#else
4778 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4779#endif
4780 ST0 = 0.0 / 0.0; /* NaN */
4781 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4782 return;
4783 }
4784
4785 fpsrcop = ST0;
4786 fptemp = ST1;
4787 fpsrcop1.d = fpsrcop;
4788 fptemp1.d = fptemp;
4789 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4790
4791 if (expdif < 0) {
4792 /* optimisation? taken from the AMD docs */
4793 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4794 /* ST0 is unchanged */
4795 return;
4796 }
4797
4798 if (expdif < 53) {
4799 dblq = fpsrcop / fptemp;
4800 /* round dblq towards nearest integer */
4801 dblq = rint(dblq);
4802 ST0 = fpsrcop - fptemp * dblq;
4803
4804 /* convert dblq to q by truncating towards zero */
4805 if (dblq < 0.0)
4806 q = (signed long long int)(-dblq);
4807 else
4808 q = (signed long long int)dblq;
4809
4810 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4811 /* (C0,C3,C1) <-- (q2,q1,q0) */
4812 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4813 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4814 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4815 } else {
4816 env->fpus |= 0x400; /* C2 <-- 1 */
4817 fptemp = pow(2.0, expdif - 50);
4818 fpsrcop = (ST0 / ST1) / fptemp;
4819 /* fpsrcop = integer obtained by chopping */
4820 fpsrcop = (fpsrcop < 0.0) ?
4821 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4822 ST0 -= (ST1 * fpsrcop * fptemp);
4823 }
4824}
4825
4826void helper_fprem(void)
4827{
4828 CPU86_LDouble dblq, fpsrcop, fptemp;
4829 CPU86_LDoubleU fpsrcop1, fptemp1;
4830 int expdif;
4831 signed long long int q;
4832
4833#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4834 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4835#else
4836 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4837#endif
4838 ST0 = 0.0 / 0.0; /* NaN */
4839 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4840 return;
4841 }
4842
4843 fpsrcop = (CPU86_LDouble)ST0;
4844 fptemp = (CPU86_LDouble)ST1;
4845 fpsrcop1.d = fpsrcop;
4846 fptemp1.d = fptemp;
4847 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4848
4849 if (expdif < 0) {
4850 /* optimisation? taken from the AMD docs */
4851 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4852 /* ST0 is unchanged */
4853 return;
4854 }
4855
4856 if ( expdif < 53 ) {
4857 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4858 /* round dblq towards zero */
4859 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4860 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4861
4862 /* convert dblq to q by truncating towards zero */
4863 if (dblq < 0.0)
4864 q = (signed long long int)(-dblq);
4865 else
4866 q = (signed long long int)dblq;
4867
4868 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4869 /* (C0,C3,C1) <-- (q2,q1,q0) */
4870 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4871 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4872 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4873 } else {
4874 int N = 32 + (expdif % 32); /* as per AMD docs */
4875 env->fpus |= 0x400; /* C2 <-- 1 */
4876 fptemp = pow(2.0, (double)(expdif - N));
4877 fpsrcop = (ST0 / ST1) / fptemp;
4878 /* fpsrcop = integer obtained by chopping */
4879 fpsrcop = (fpsrcop < 0.0) ?
4880 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4881 ST0 -= (ST1 * fpsrcop * fptemp);
4882 }
4883}
4884
4885void helper_fyl2xp1(void)
4886{
4887 CPU86_LDouble fptemp;
4888
4889 fptemp = ST0;
4890 if ((fptemp+1.0)>0.0) {
4891 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4892 ST1 *= fptemp;
4893 fpop();
4894 } else {
4895 env->fpus &= (~0x4700);
4896 env->fpus |= 0x400;
4897 }
4898}
4899
4900void helper_fsqrt(void)
4901{
4902 CPU86_LDouble fptemp;
4903
4904 fptemp = ST0;
4905 if (fptemp<0.0) {
4906 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4907 env->fpus |= 0x400;
4908 }
4909 ST0 = sqrt(fptemp);
4910}
4911
4912void helper_fsincos(void)
4913{
4914 CPU86_LDouble fptemp;
4915
4916 fptemp = ST0;
4917 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4918 env->fpus |= 0x400;
4919 } else {
4920 ST0 = sin(fptemp);
4921 fpush();
4922 ST0 = cos(fptemp);
4923 env->fpus &= (~0x400); /* C2 <-- 0 */
4924 /* the above code is for |arg| < 2**63 only */
4925 }
4926}
4927
4928void helper_frndint(void)
4929{
4930 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4931}
4932
4933void helper_fscale(void)
4934{
4935 ST0 = ldexp (ST0, (int)(ST1));
4936}
4937
4938void helper_fsin(void)
4939{
4940 CPU86_LDouble fptemp;
4941
4942 fptemp = ST0;
4943 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4944 env->fpus |= 0x400;
4945 } else {
4946 ST0 = sin(fptemp);
4947 env->fpus &= (~0x400); /* C2 <-- 0 */
4948 /* the above code is for |arg| < 2**53 only */
4949 }
4950}
4951
4952void helper_fcos(void)
4953{
4954 CPU86_LDouble fptemp;
4955
4956 fptemp = ST0;
4957 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4958 env->fpus |= 0x400;
4959 } else {
4960 ST0 = cos(fptemp);
4961 env->fpus &= (~0x400); /* C2 <-- 0 */
4962 /* the above code is for |arg5 < 2**63 only */
4963 }
4964}
4965
4966void helper_fxam_ST0(void)
4967{
4968 CPU86_LDoubleU temp;
4969 int expdif;
4970
4971 temp.d = ST0;
4972
4973 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4974 if (SIGND(temp))
4975 env->fpus |= 0x200; /* C1 <-- 1 */
4976
4977 /* XXX: test fptags too */
4978 expdif = EXPD(temp);
4979 if (expdif == MAXEXPD) {
4980#ifdef USE_X86LDOUBLE
4981 if (MANTD(temp) == 0x8000000000000000ULL)
4982#else
4983 if (MANTD(temp) == 0)
4984#endif
4985 env->fpus |= 0x500 /*Infinity*/;
4986 else
4987 env->fpus |= 0x100 /*NaN*/;
4988 } else if (expdif == 0) {
4989 if (MANTD(temp) == 0)
4990 env->fpus |= 0x4000 /*Zero*/;
4991 else
4992 env->fpus |= 0x4400 /*Denormal*/;
4993 } else {
4994 env->fpus |= 0x400;
4995 }
4996}
4997
4998void helper_fstenv(target_ulong ptr, int data32)
4999{
5000 int fpus, fptag, exp, i;
5001 uint64_t mant;
5002 CPU86_LDoubleU tmp;
5003
5004 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5005 fptag = 0;
5006 for (i=7; i>=0; i--) {
5007 fptag <<= 2;
5008 if (env->fptags[i]) {
5009 fptag |= 3;
5010 } else {
5011 tmp.d = env->fpregs[i].d;
5012 exp = EXPD(tmp);
5013 mant = MANTD(tmp);
5014 if (exp == 0 && mant == 0) {
5015 /* zero */
5016 fptag |= 1;
5017 } else if (exp == 0 || exp == MAXEXPD
5018#ifdef USE_X86LDOUBLE
5019 || (mant & (1LL << 63)) == 0
5020#endif
5021 ) {
5022 /* NaNs, infinity, denormal */
5023 fptag |= 2;
5024 }
5025 }
5026 }
5027 if (data32) {
5028 /* 32 bit */
5029 stl(ptr, env->fpuc);
5030 stl(ptr + 4, fpus);
5031 stl(ptr + 8, fptag);
5032 stl(ptr + 12, 0); /* fpip */
5033 stl(ptr + 16, 0); /* fpcs */
5034 stl(ptr + 20, 0); /* fpoo */
5035 stl(ptr + 24, 0); /* fpos */
5036 } else {
5037 /* 16 bit */
5038 stw(ptr, env->fpuc);
5039 stw(ptr + 2, fpus);
5040 stw(ptr + 4, fptag);
5041 stw(ptr + 6, 0);
5042 stw(ptr + 8, 0);
5043 stw(ptr + 10, 0);
5044 stw(ptr + 12, 0);
5045 }
5046}
5047
5048void helper_fldenv(target_ulong ptr, int data32)
5049{
5050 int i, fpus, fptag;
5051
5052 if (data32) {
5053 env->fpuc = lduw(ptr);
5054 fpus = lduw(ptr + 4);
5055 fptag = lduw(ptr + 8);
5056 }
5057 else {
5058 env->fpuc = lduw(ptr);
5059 fpus = lduw(ptr + 2);
5060 fptag = lduw(ptr + 4);
5061 }
5062 env->fpstt = (fpus >> 11) & 7;
5063 env->fpus = fpus & ~0x3800;
5064 for(i = 0;i < 8; i++) {
5065 env->fptags[i] = ((fptag & 3) == 3);
5066 fptag >>= 2;
5067 }
5068}
5069
5070void helper_fsave(target_ulong ptr, int data32)
5071{
5072 CPU86_LDouble tmp;
5073 int i;
5074
5075 helper_fstenv(ptr, data32);
5076
5077 ptr += (14 << data32);
5078 for(i = 0;i < 8; i++) {
5079 tmp = ST(i);
5080 helper_fstt(tmp, ptr);
5081 ptr += 10;
5082 }
5083
5084 /* fninit */
5085 env->fpus = 0;
5086 env->fpstt = 0;
5087 env->fpuc = 0x37f;
5088 env->fptags[0] = 1;
5089 env->fptags[1] = 1;
5090 env->fptags[2] = 1;
5091 env->fptags[3] = 1;
5092 env->fptags[4] = 1;
5093 env->fptags[5] = 1;
5094 env->fptags[6] = 1;
5095 env->fptags[7] = 1;
5096}
5097
5098void helper_frstor(target_ulong ptr, int data32)
5099{
5100 CPU86_LDouble tmp;
5101 int i;
5102
5103 helper_fldenv(ptr, data32);
5104 ptr += (14 << data32);
5105
5106 for(i = 0;i < 8; i++) {
5107 tmp = helper_fldt(ptr);
5108 ST(i) = tmp;
5109 ptr += 10;
5110 }
5111}
5112
5113void helper_fxsave(target_ulong ptr, int data64)
5114{
5115 int fpus, fptag, i, nb_xmm_regs;
5116 CPU86_LDouble tmp;
5117 target_ulong addr;
5118
5119 /* The operand must be 16 byte aligned */
5120 if (ptr & 0xf) {
5121 raise_exception(EXCP0D_GPF);
5122 }
5123
5124 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5125 fptag = 0;
5126 for(i = 0; i < 8; i++) {
5127 fptag |= (env->fptags[i] << i);
5128 }
5129 stw(ptr, env->fpuc);
5130 stw(ptr + 2, fpus);
5131 stw(ptr + 4, fptag ^ 0xff);
5132#ifdef TARGET_X86_64
5133 if (data64) {
5134 stq(ptr + 0x08, 0); /* rip */
5135 stq(ptr + 0x10, 0); /* rdp */
5136 } else
5137#endif
5138 {
5139 stl(ptr + 0x08, 0); /* eip */
5140 stl(ptr + 0x0c, 0); /* sel */
5141 stl(ptr + 0x10, 0); /* dp */
5142 stl(ptr + 0x14, 0); /* sel */
5143 }
5144
5145 addr = ptr + 0x20;
5146 for(i = 0;i < 8; i++) {
5147 tmp = ST(i);
5148 helper_fstt(tmp, addr);
5149 addr += 16;
5150 }
5151
5152 if (env->cr[4] & CR4_OSFXSR_MASK) {
5153 /* XXX: finish it */
5154 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5155 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5156 if (env->hflags & HF_CS64_MASK)
5157 nb_xmm_regs = 16;
5158 else
5159 nb_xmm_regs = 8;
5160 addr = ptr + 0xa0;
5161 /* Fast FXSAVE leaves out the XMM registers */
5162 if (!(env->efer & MSR_EFER_FFXSR)
5163 || (env->hflags & HF_CPL_MASK)
5164 || !(env->hflags & HF_LMA_MASK)) {
5165 for(i = 0; i < nb_xmm_regs; i++) {
5166 stq(addr, env->xmm_regs[i].XMM_Q(0));
5167 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5168 addr += 16;
5169 }
5170 }
5171 }
5172}
5173
5174void helper_fxrstor(target_ulong ptr, int data64)
5175{
5176 int i, fpus, fptag, nb_xmm_regs;
5177 CPU86_LDouble tmp;
5178 target_ulong addr;
5179
5180 /* The operand must be 16 byte aligned */
5181 if (ptr & 0xf) {
5182 raise_exception(EXCP0D_GPF);
5183 }
5184
5185 env->fpuc = lduw(ptr);
5186 fpus = lduw(ptr + 2);
5187 fptag = lduw(ptr + 4);
5188 env->fpstt = (fpus >> 11) & 7;
5189 env->fpus = fpus & ~0x3800;
5190 fptag ^= 0xff;
5191 for(i = 0;i < 8; i++) {
5192 env->fptags[i] = ((fptag >> i) & 1);
5193 }
5194
5195 addr = ptr + 0x20;
5196 for(i = 0;i < 8; i++) {
5197 tmp = helper_fldt(addr);
5198 ST(i) = tmp;
5199 addr += 16;
5200 }
5201
5202 if (env->cr[4] & CR4_OSFXSR_MASK) {
5203 /* XXX: finish it */
5204 env->mxcsr = ldl(ptr + 0x18);
5205 //ldl(ptr + 0x1c);
5206 if (env->hflags & HF_CS64_MASK)
5207 nb_xmm_regs = 16;
5208 else
5209 nb_xmm_regs = 8;
5210 addr = ptr + 0xa0;
5211 /* Fast FXRESTORE leaves out the XMM registers */
5212 if (!(env->efer & MSR_EFER_FFXSR)
5213 || (env->hflags & HF_CPL_MASK)
5214 || !(env->hflags & HF_LMA_MASK)) {
5215 for(i = 0; i < nb_xmm_regs; i++) {
5216#if !defined(VBOX) || __GNUC__ < 4
5217 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5218 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5219#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5220# if 1
5221 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5222 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5223 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5224 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5225# else
5226 /* this works fine on Mac OS X, gcc 4.0.1 */
5227 uint64_t u64 = ldq(addr);
5228 env->xmm_regs[i].XMM_Q(0);
5229 u64 = ldq(addr + 4);
5230 env->xmm_regs[i].XMM_Q(1) = u64;
5231# endif
5232#endif
5233 addr += 16;
5234 }
5235 }
5236 }
5237}
5238
5239#ifndef USE_X86LDOUBLE
5240
5241void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5242{
5243 CPU86_LDoubleU temp;
5244 int e;
5245
5246 temp.d = f;
5247 /* mantissa */
5248 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5249 /* exponent + sign */
5250 e = EXPD(temp) - EXPBIAS + 16383;
5251 e |= SIGND(temp) >> 16;
5252 *pexp = e;
5253}
5254
5255CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5256{
5257 CPU86_LDoubleU temp;
5258 int e;
5259 uint64_t ll;
5260
5261 /* XXX: handle overflow ? */
5262 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5263 e |= (upper >> 4) & 0x800; /* sign */
5264 ll = (mant >> 11) & ((1LL << 52) - 1);
5265#ifdef __arm__
5266 temp.l.upper = (e << 20) | (ll >> 32);
5267 temp.l.lower = ll;
5268#else
5269 temp.ll = ll | ((uint64_t)e << 52);
5270#endif
5271 return temp.d;
5272}
5273
5274#else
5275
5276void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5277{
5278 CPU86_LDoubleU temp;
5279
5280 temp.d = f;
5281 *pmant = temp.l.lower;
5282 *pexp = temp.l.upper;
5283}
5284
5285CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5286{
5287 CPU86_LDoubleU temp;
5288
5289 temp.l.upper = upper;
5290 temp.l.lower = mant;
5291 return temp.d;
5292}
5293#endif
5294
5295#ifdef TARGET_X86_64
5296
5297//#define DEBUG_MULDIV
5298
5299static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5300{
5301 *plow += a;
5302 /* carry test */
5303 if (*plow < a)
5304 (*phigh)++;
5305 *phigh += b;
5306}
5307
5308static void neg128(uint64_t *plow, uint64_t *phigh)
5309{
5310 *plow = ~ *plow;
5311 *phigh = ~ *phigh;
5312 add128(plow, phigh, 1, 0);
5313}
5314
5315/* return TRUE if overflow */
5316static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5317{
5318 uint64_t q, r, a1, a0;
5319 int i, qb, ab;
5320
5321 a0 = *plow;
5322 a1 = *phigh;
5323 if (a1 == 0) {
5324 q = a0 / b;
5325 r = a0 % b;
5326 *plow = q;
5327 *phigh = r;
5328 } else {
5329 if (a1 >= b)
5330 return 1;
5331 /* XXX: use a better algorithm */
5332 for(i = 0; i < 64; i++) {
5333 ab = a1 >> 63;
5334 a1 = (a1 << 1) | (a0 >> 63);
5335 if (ab || a1 >= b) {
5336 a1 -= b;
5337 qb = 1;
5338 } else {
5339 qb = 0;
5340 }
5341 a0 = (a0 << 1) | qb;
5342 }
5343#if defined(DEBUG_MULDIV)
5344 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5345 *phigh, *plow, b, a0, a1);
5346#endif
5347 *plow = a0;
5348 *phigh = a1;
5349 }
5350 return 0;
5351}
5352
5353/* return TRUE if overflow */
5354static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5355{
5356 int sa, sb;
5357 sa = ((int64_t)*phigh < 0);
5358 if (sa)
5359 neg128(plow, phigh);
5360 sb = (b < 0);
5361 if (sb)
5362 b = -b;
5363 if (div64(plow, phigh, b) != 0)
5364 return 1;
5365 if (sa ^ sb) {
5366 if (*plow > (1ULL << 63))
5367 return 1;
5368 *plow = - *plow;
5369 } else {
5370 if (*plow >= (1ULL << 63))
5371 return 1;
5372 }
5373 if (sa)
5374 *phigh = - *phigh;
5375 return 0;
5376}
5377
5378void helper_mulq_EAX_T0(target_ulong t0)
5379{
5380 uint64_t r0, r1;
5381
5382 mulu64(&r0, &r1, EAX, t0);
5383 EAX = r0;
5384 EDX = r1;
5385 CC_DST = r0;
5386 CC_SRC = r1;
5387}
5388
5389void helper_imulq_EAX_T0(target_ulong t0)
5390{
5391 uint64_t r0, r1;
5392
5393 muls64(&r0, &r1, EAX, t0);
5394 EAX = r0;
5395 EDX = r1;
5396 CC_DST = r0;
5397 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5398}
5399
5400target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5401{
5402 uint64_t r0, r1;
5403
5404 muls64(&r0, &r1, t0, t1);
5405 CC_DST = r0;
5406 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5407 return r0;
5408}
5409
5410void helper_divq_EAX(target_ulong t0)
5411{
5412 uint64_t r0, r1;
5413 if (t0 == 0) {
5414 raise_exception(EXCP00_DIVZ);
5415 }
5416 r0 = EAX;
5417 r1 = EDX;
5418 if (div64(&r0, &r1, t0))
5419 raise_exception(EXCP00_DIVZ);
5420 EAX = r0;
5421 EDX = r1;
5422}
5423
5424void helper_idivq_EAX(target_ulong t0)
5425{
5426 uint64_t r0, r1;
5427 if (t0 == 0) {
5428 raise_exception(EXCP00_DIVZ);
5429 }
5430 r0 = EAX;
5431 r1 = EDX;
5432 if (idiv64(&r0, &r1, t0))
5433 raise_exception(EXCP00_DIVZ);
5434 EAX = r0;
5435 EDX = r1;
5436}
5437#endif
5438
5439static void do_hlt(void)
5440{
5441 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5442 env->halted = 1;
5443 env->exception_index = EXCP_HLT;
5444 cpu_loop_exit();
5445}
5446
5447void helper_hlt(int next_eip_addend)
5448{
5449 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5450 EIP += next_eip_addend;
5451
5452 do_hlt();
5453}
5454
5455void helper_monitor(target_ulong ptr)
5456{
5457#ifdef VBOX
5458 if ((uint32_t)ECX > 1)
5459 raise_exception(EXCP0D_GPF);
5460#else /* !VBOX */
5461 if ((uint32_t)ECX != 0)
5462 raise_exception(EXCP0D_GPF);
5463#endif /* !VBOX */
5464 /* XXX: store address ? */
5465 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5466}
5467
5468void helper_mwait(int next_eip_addend)
5469{
5470 if ((uint32_t)ECX != 0)
5471 raise_exception(EXCP0D_GPF);
5472#ifdef VBOX
5473 helper_hlt(next_eip_addend);
5474#else /* !VBOX */
5475 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5476 EIP += next_eip_addend;
5477
5478 /* XXX: not complete but not completely erroneous */
5479 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5480 /* more than one CPU: do not sleep because another CPU may
5481 wake this one */
5482 } else {
5483 do_hlt();
5484 }
5485#endif /* !VBOX */
5486}
5487
5488void helper_debug(void)
5489{
5490 env->exception_index = EXCP_DEBUG;
5491 cpu_loop_exit();
5492}
5493
5494void helper_reset_rf(void)
5495{
5496 env->eflags &= ~RF_MASK;
5497}
5498
5499void helper_raise_interrupt(int intno, int next_eip_addend)
5500{
5501 raise_interrupt(intno, 1, 0, next_eip_addend);
5502}
5503
5504void helper_raise_exception(int exception_index)
5505{
5506 raise_exception(exception_index);
5507}
5508
5509void helper_cli(void)
5510{
5511 env->eflags &= ~IF_MASK;
5512}
5513
5514void helper_sti(void)
5515{
5516 env->eflags |= IF_MASK;
5517}
5518
5519#ifdef VBOX
5520void helper_cli_vme(void)
5521{
5522 env->eflags &= ~VIF_MASK;
5523}
5524
5525void helper_sti_vme(void)
5526{
5527 /* First check, then change eflags according to the AMD manual */
5528 if (env->eflags & VIP_MASK) {
5529 raise_exception(EXCP0D_GPF);
5530 }
5531 env->eflags |= VIF_MASK;
5532}
5533#endif /* VBOX */
5534
5535#if 0
5536/* vm86plus instructions */
5537void helper_cli_vm(void)
5538{
5539 env->eflags &= ~VIF_MASK;
5540}
5541
5542void helper_sti_vm(void)
5543{
5544 env->eflags |= VIF_MASK;
5545 if (env->eflags & VIP_MASK) {
5546 raise_exception(EXCP0D_GPF);
5547 }
5548}
5549#endif
5550
5551void helper_set_inhibit_irq(void)
5552{
5553 env->hflags |= HF_INHIBIT_IRQ_MASK;
5554}
5555
5556void helper_reset_inhibit_irq(void)
5557{
5558 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5559}
5560
5561void helper_boundw(target_ulong a0, int v)
5562{
5563 int low, high;
5564 low = ldsw(a0);
5565 high = ldsw(a0 + 2);
5566 v = (int16_t)v;
5567 if (v < low || v > high) {
5568 raise_exception(EXCP05_BOUND);
5569 }
5570}
5571
5572void helper_boundl(target_ulong a0, int v)
5573{
5574 int low, high;
5575 low = ldl(a0);
5576 high = ldl(a0 + 4);
5577 if (v < low || v > high) {
5578 raise_exception(EXCP05_BOUND);
5579 }
5580}
5581
5582static float approx_rsqrt(float a)
5583{
5584 return 1.0 / sqrt(a);
5585}
5586
5587static float approx_rcp(float a)
5588{
5589 return 1.0 / a;
5590}
5591
5592#if !defined(CONFIG_USER_ONLY)
5593
5594#define MMUSUFFIX _mmu
5595
5596#define SHIFT 0
5597#include "softmmu_template.h"
5598
5599#define SHIFT 1
5600#include "softmmu_template.h"
5601
5602#define SHIFT 2
5603#include "softmmu_template.h"
5604
5605#define SHIFT 3
5606#include "softmmu_template.h"
5607
5608#endif
5609
5610#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5611/* This code assumes real physical address always fit into host CPU reg,
5612 which is wrong in general, but true for our current use cases. */
5613RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5614{
5615 return remR3PhysReadS8(addr);
5616}
5617RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5618{
5619 return remR3PhysReadU8(addr);
5620}
5621void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5622{
5623 remR3PhysWriteU8(addr, val);
5624}
5625RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5626{
5627 return remR3PhysReadS16(addr);
5628}
5629RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5630{
5631 return remR3PhysReadU16(addr);
5632}
5633void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5634{
5635 remR3PhysWriteU16(addr, val);
5636}
5637RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5638{
5639 return remR3PhysReadS32(addr);
5640}
5641RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5642{
5643 return remR3PhysReadU32(addr);
5644}
5645void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5646{
5647 remR3PhysWriteU32(addr, val);
5648}
5649uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5650{
5651 return remR3PhysReadU64(addr);
5652}
5653void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5654{
5655 remR3PhysWriteU64(addr, val);
5656}
5657#endif /* VBOX */
5658
5659#if !defined(CONFIG_USER_ONLY)
5660/* try to fill the TLB and return an exception if error. If retaddr is
5661 NULL, it means that the function was called in C code (i.e. not
5662 from generated code or from helper.c) */
5663/* XXX: fix it to restore all registers */
5664void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5665{
5666 TranslationBlock *tb;
5667 int ret;
5668 uintptr_t pc;
5669 CPUX86State *saved_env;
5670
5671 /* XXX: hack to restore env in all cases, even if not called from
5672 generated code */
5673 saved_env = env;
5674 env = cpu_single_env;
5675
5676 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5677 if (ret) {
5678 if (retaddr) {
5679 /* now we have a real cpu fault */
5680 pc = (uintptr_t)retaddr;
5681 tb = tb_find_pc(pc);
5682 if (tb) {
5683 /* the PC is inside the translated code. It means that we have
5684 a virtual CPU fault */
5685 cpu_restore_state(tb, env, pc, NULL);
5686 }
5687 }
5688 raise_exception_err(env->exception_index, env->error_code);
5689 }
5690 env = saved_env;
5691}
5692#endif
5693
5694#ifdef VBOX
5695
5696/**
5697 * Correctly computes the eflags.
5698 * @returns eflags.
5699 * @param env1 CPU environment.
5700 */
5701uint32_t raw_compute_eflags(CPUX86State *env1)
5702{
5703 CPUX86State *savedenv = env;
5704 uint32_t efl;
5705 env = env1;
5706 efl = compute_eflags();
5707 env = savedenv;
5708 return efl;
5709}
5710
5711/**
5712 * Reads byte from virtual address in guest memory area.
5713 * XXX: is it working for any addresses? swapped out pages?
5714 * @returns read data byte.
5715 * @param env1 CPU environment.
5716 * @param pvAddr GC Virtual address.
5717 */
5718uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5719{
5720 CPUX86State *savedenv = env;
5721 uint8_t u8;
5722 env = env1;
5723 u8 = ldub_kernel(addr);
5724 env = savedenv;
5725 return u8;
5726}
5727
5728/**
5729 * Reads byte from virtual address in guest memory area.
5730 * XXX: is it working for any addresses? swapped out pages?
5731 * @returns read data byte.
5732 * @param env1 CPU environment.
5733 * @param pvAddr GC Virtual address.
5734 */
5735uint16_t read_word(CPUX86State *env1, target_ulong addr)
5736{
5737 CPUX86State *savedenv = env;
5738 uint16_t u16;
5739 env = env1;
5740 u16 = lduw_kernel(addr);
5741 env = savedenv;
5742 return u16;
5743}
5744
5745/**
5746 * Reads byte from virtual address in guest memory area.
5747 * XXX: is it working for any addresses? swapped out pages?
5748 * @returns read data byte.
5749 * @param env1 CPU environment.
5750 * @param pvAddr GC Virtual address.
5751 */
5752uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5753{
5754 CPUX86State *savedenv = env;
5755 uint32_t u32;
5756 env = env1;
5757 u32 = ldl_kernel(addr);
5758 env = savedenv;
5759 return u32;
5760}
5761
5762/**
5763 * Writes byte to virtual address in guest memory area.
5764 * XXX: is it working for any addresses? swapped out pages?
5765 * @returns read data byte.
5766 * @param env1 CPU environment.
5767 * @param pvAddr GC Virtual address.
5768 * @param val byte value
5769 */
5770void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5771{
5772 CPUX86State *savedenv = env;
5773 env = env1;
5774 stb(addr, val);
5775 env = savedenv;
5776}
5777
5778void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5779{
5780 CPUX86State *savedenv = env;
5781 env = env1;
5782 stw(addr, val);
5783 env = savedenv;
5784}
5785
5786void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5787{
5788 CPUX86State *savedenv = env;
5789 env = env1;
5790 stl(addr, val);
5791 env = savedenv;
5792}
5793
5794/**
5795 * Correctly loads selector into segment register with updating internal
5796 * qemu data/caches.
5797 * @param env1 CPU environment.
5798 * @param seg_reg Segment register.
5799 * @param selector Selector to load.
5800 */
5801void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5802{
5803 CPUX86State *savedenv = env;
5804#ifdef FORCE_SEGMENT_SYNC
5805 jmp_buf old_buf;
5806#endif
5807
5808 env = env1;
5809
5810 if ( env->eflags & X86_EFL_VM
5811 || !(env->cr[0] & X86_CR0_PE))
5812 {
5813 load_seg_vm(seg_reg, selector);
5814
5815 env = savedenv;
5816
5817 /* Successful sync. */
5818 Assert(env1->segs[seg_reg].newselector == 0);
5819 }
5820 else
5821 {
5822 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5823 time critical - let's not do that */
5824#ifdef FORCE_SEGMENT_SYNC
5825 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5826#endif
5827 if (setjmp(env1->jmp_env) == 0)
5828 {
5829 if (seg_reg == R_CS)
5830 {
5831 uint32_t e1, e2;
5832 e1 = e2 = 0;
5833 load_segment(&e1, &e2, selector);
5834 cpu_x86_load_seg_cache(env, R_CS, selector,
5835 get_seg_base(e1, e2),
5836 get_seg_limit(e1, e2),
5837 e2);
5838 }
5839 else
5840 helper_load_seg(seg_reg, selector);
5841 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5842 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5843
5844 env = savedenv;
5845
5846 /* Successful sync. */
5847 Assert(env1->segs[seg_reg].newselector == 0);
5848 }
5849 else
5850 {
5851 env = savedenv;
5852
5853 /* Postpone sync until the guest uses the selector. */
5854 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5855 env1->segs[seg_reg].newselector = selector;
5856 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5857 env1->exception_index = -1;
5858 env1->error_code = 0;
5859 env1->old_exception = -1;
5860 }
5861#ifdef FORCE_SEGMENT_SYNC
5862 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5863#endif
5864 }
5865
5866}
5867
5868DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5869{
5870 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5871}
5872
5873
5874int emulate_single_instr(CPUX86State *env1)
5875{
5876 TranslationBlock *tb;
5877 TranslationBlock *current;
5878 int flags;
5879 uint8_t *tc_ptr;
5880 target_ulong old_eip;
5881
5882 /* ensures env is loaded! */
5883 CPUX86State *savedenv = env;
5884 env = env1;
5885
5886 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5887
5888 current = env->current_tb;
5889 env->current_tb = NULL;
5890 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5891
5892 /*
5893 * Translate only one instruction.
5894 */
5895 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5896 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5897 env->segs[R_CS].base, flags, 0);
5898
5899 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5900
5901
5902 /* tb_link_phys: */
5903 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5904 tb->jmp_next[0] = NULL;
5905 tb->jmp_next[1] = NULL;
5906 Assert(tb->jmp_next[0] == NULL);
5907 Assert(tb->jmp_next[1] == NULL);
5908 if (tb->tb_next_offset[0] != 0xffff)
5909 tb_reset_jump(tb, 0);
5910 if (tb->tb_next_offset[1] != 0xffff)
5911 tb_reset_jump(tb, 1);
5912
5913 /*
5914 * Execute it using emulation
5915 */
5916 old_eip = env->eip;
5917 env->current_tb = tb;
5918
5919 /*
5920 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5921 * perhaps not a very safe hack
5922 */
5923 while (old_eip == env->eip)
5924 {
5925 tc_ptr = tb->tc_ptr;
5926
5927#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5928 int fake_ret;
5929 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5930#else
5931 tcg_qemu_tb_exec(tc_ptr);
5932#endif
5933
5934 /*
5935 * Exit once we detect an external interrupt and interrupts are enabled
5936 */
5937 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
5938 || ( (env->eflags & IF_MASK)
5939 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
5940 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
5941 )
5942 {
5943 break;
5944 }
5945 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
5946 tlb_flush(env, true);
5947 }
5948 }
5949 env->current_tb = current;
5950
5951 tb_phys_invalidate(tb, -1);
5952 tb_free(tb);
5953/*
5954 Assert(tb->tb_next_offset[0] == 0xffff);
5955 Assert(tb->tb_next_offset[1] == 0xffff);
5956 Assert(tb->tb_next[0] == 0xffff);
5957 Assert(tb->tb_next[1] == 0xffff);
5958 Assert(tb->jmp_next[0] == NULL);
5959 Assert(tb->jmp_next[1] == NULL);
5960 Assert(tb->jmp_first == NULL); */
5961
5962 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5963
5964 /*
5965 * Execute the next instruction when we encounter instruction fusing.
5966 */
5967 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5968 {
5969 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5970 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5971 emulate_single_instr(env);
5972 }
5973
5974 env = savedenv;
5975 return 0;
5976}
5977
5978/**
5979 * Correctly loads a new ldtr selector.
5980 *
5981 * @param env1 CPU environment.
5982 * @param selector Selector to load.
5983 */
5984void sync_ldtr(CPUX86State *env1, int selector)
5985{
5986 CPUX86State *saved_env = env;
5987 if (setjmp(env1->jmp_env) == 0)
5988 {
5989 env = env1;
5990 helper_lldt(selector);
5991 env = saved_env;
5992 }
5993 else
5994 {
5995 env = saved_env;
5996#ifdef VBOX_STRICT
5997 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5998#endif
5999 }
6000}
6001
6002int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
6003 uint32_t *esp_ptr, int dpl)
6004{
6005 int type, index, shift;
6006
6007 CPUX86State *savedenv = env;
6008 env = env1;
6009
6010 if (!(env->tr.flags & DESC_P_MASK))
6011 cpu_abort(env, "invalid tss");
6012 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
6013 if ((type & 7) != 1)
6014 cpu_abort(env, "invalid tss type %d", type);
6015 shift = type >> 3;
6016 index = (dpl * 4 + 2) << shift;
6017 if (index + (4 << shift) - 1 > env->tr.limit)
6018 {
6019 env = savedenv;
6020 return 0;
6021 }
6022 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6023
6024 if (shift == 0) {
6025 *esp_ptr = lduw_kernel(env->tr.base + index);
6026 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6027 } else {
6028 *esp_ptr = ldl_kernel(env->tr.base + index);
6029 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6030 }
6031
6032 env = savedenv;
6033 return 1;
6034}
6035
6036//*****************************************************************************
6037// Needs to be at the bottom of the file (overriding macros)
6038
6039static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6040{
6041#ifdef USE_X86LDOUBLE
6042 CPU86_LDoubleU tmp;
6043 tmp.l.lower = *(uint64_t const *)ptr;
6044 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6045 return tmp.d;
6046#else
6047# error "Busted FPU saving/restoring!"
6048 return *(CPU86_LDouble *)ptr;
6049#endif
6050}
6051
6052static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6053{
6054#ifdef USE_X86LDOUBLE
6055 CPU86_LDoubleU tmp;
6056 tmp.d = f;
6057 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6058 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6059 *(uint16_t *)(ptr + 10) = 0;
6060 *(uint32_t *)(ptr + 12) = 0;
6061 AssertCompile(sizeof(long double) > 8);
6062#else
6063# error "Busted FPU saving/restoring!"
6064 *(CPU86_LDouble *)ptr = f;
6065#endif
6066}
6067
6068#undef stw
6069#undef stl
6070#undef stq
6071#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6072#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6073#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6074
6075//*****************************************************************************
6076void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6077{
6078 int fpus, fptag, i, nb_xmm_regs;
6079 CPU86_LDouble tmp;
6080 uint8_t *addr;
6081 int data64 = !!(env->hflags & HF_LMA_MASK);
6082
6083 if (env->cpuid_features & CPUID_FXSR)
6084 {
6085 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6086 fptag = 0;
6087 for(i = 0; i < 8; i++) {
6088 fptag |= (env->fptags[i] << i);
6089 }
6090 stw(ptr, env->fpuc);
6091 stw(ptr + 2, fpus);
6092 stw(ptr + 4, fptag ^ 0xff);
6093
6094 addr = ptr + 0x20;
6095 for(i = 0;i < 8; i++) {
6096 tmp = ST(i);
6097 helper_fstt_raw(tmp, addr);
6098 addr += 16;
6099 }
6100
6101 if (env->cr[4] & CR4_OSFXSR_MASK) {
6102 /* XXX: finish it */
6103 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6104 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6105 nb_xmm_regs = 8 << data64;
6106 addr = ptr + 0xa0;
6107 for(i = 0; i < nb_xmm_regs; i++) {
6108#if __GNUC__ < 4
6109 stq(addr, env->xmm_regs[i].XMM_Q(0));
6110 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6111#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6112 stl(addr, env->xmm_regs[i].XMM_L(0));
6113 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6114 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6115 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6116#endif
6117 addr += 16;
6118 }
6119 }
6120 }
6121 else
6122 {
6123 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6124 int fptag;
6125
6126 fp->FCW = env->fpuc;
6127 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6128 fptag = 0;
6129 for (i=7; i>=0; i--) {
6130 fptag <<= 2;
6131 if (env->fptags[i]) {
6132 fptag |= 3;
6133 } else {
6134 /* the FPU automatically computes it */
6135 }
6136 }
6137 fp->FTW = fptag;
6138
6139 for(i = 0;i < 8; i++) {
6140 tmp = ST(i);
6141 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6142 }
6143 }
6144}
6145
6146//*****************************************************************************
6147#undef lduw
6148#undef ldl
6149#undef ldq
6150#define lduw(a) *(uint16_t *)(a)
6151#define ldl(a) *(uint32_t *)(a)
6152#define ldq(a) *(uint64_t *)(a)
6153//*****************************************************************************
6154void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6155{
6156 int i, fpus, fptag, nb_xmm_regs;
6157 CPU86_LDouble tmp;
6158 uint8_t *addr;
6159 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6160
6161 if (env->cpuid_features & CPUID_FXSR)
6162 {
6163 env->fpuc = lduw(ptr);
6164 fpus = lduw(ptr + 2);
6165 fptag = lduw(ptr + 4);
6166 env->fpstt = (fpus >> 11) & 7;
6167 env->fpus = fpus & ~0x3800;
6168 fptag ^= 0xff;
6169 for(i = 0;i < 8; i++) {
6170 env->fptags[i] = ((fptag >> i) & 1);
6171 }
6172
6173 addr = ptr + 0x20;
6174 for(i = 0;i < 8; i++) {
6175 tmp = helper_fldt_raw(addr);
6176 ST(i) = tmp;
6177 addr += 16;
6178 }
6179
6180 if (env->cr[4] & CR4_OSFXSR_MASK) {
6181 /* XXX: finish it, endianness */
6182 env->mxcsr = ldl(ptr + 0x18);
6183 //ldl(ptr + 0x1c);
6184 nb_xmm_regs = 8 << data64;
6185 addr = ptr + 0xa0;
6186 for(i = 0; i < nb_xmm_regs; i++) {
6187#if HC_ARCH_BITS == 32
6188 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6189 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6190 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6191 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6192 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6193#else
6194 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6195 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6196#endif
6197 addr += 16;
6198 }
6199 }
6200 }
6201 else
6202 {
6203 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6204 int fptag, j;
6205
6206 env->fpuc = fp->FCW;
6207 env->fpstt = (fp->FSW >> 11) & 7;
6208 env->fpus = fp->FSW & ~0x3800;
6209 fptag = fp->FTW;
6210 for(i = 0;i < 8; i++) {
6211 env->fptags[i] = ((fptag & 3) == 3);
6212 fptag >>= 2;
6213 }
6214 j = env->fpstt;
6215 for(i = 0;i < 8; i++) {
6216 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6217 ST(i) = tmp;
6218 }
6219 }
6220}
6221//*****************************************************************************
6222//*****************************************************************************
6223
6224#endif /* VBOX */
6225
6226/* Secure Virtual Machine helpers */
6227
6228#if defined(CONFIG_USER_ONLY)
6229
6230void helper_vmrun(int aflag, int next_eip_addend)
6231{
6232}
6233void helper_vmmcall(void)
6234{
6235}
6236void helper_vmload(int aflag)
6237{
6238}
6239void helper_vmsave(int aflag)
6240{
6241}
6242void helper_stgi(void)
6243{
6244}
6245void helper_clgi(void)
6246{
6247}
6248void helper_skinit(void)
6249{
6250}
6251void helper_invlpga(int aflag)
6252{
6253}
6254void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6255{
6256}
6257void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6258{
6259}
6260
6261void helper_svm_check_io(uint32_t port, uint32_t param,
6262 uint32_t next_eip_addend)
6263{
6264}
6265#else
6266
6267static inline void svm_save_seg(target_phys_addr_t addr,
6268 const SegmentCache *sc)
6269{
6270 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6271 sc->selector);
6272 stq_phys(addr + offsetof(struct vmcb_seg, base),
6273 sc->base);
6274 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6275 sc->limit);
6276 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6277 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6278}
6279
6280static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6281{
6282 unsigned int flags;
6283
6284 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6285 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6286 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6287 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6288 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6289}
6290
6291static inline void svm_load_seg_cache(target_phys_addr_t addr,
6292 CPUState *env, int seg_reg)
6293{
6294 SegmentCache sc1, *sc = &sc1;
6295 svm_load_seg(addr, sc);
6296 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6297 sc->base, sc->limit, sc->flags);
6298}
6299
6300void helper_vmrun(int aflag, int next_eip_addend)
6301{
6302 target_ulong addr;
6303 uint32_t event_inj;
6304 uint32_t int_ctl;
6305
6306 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6307
6308 if (aflag == 2)
6309 addr = EAX;
6310 else
6311 addr = (uint32_t)EAX;
6312
6313 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6314
6315 env->vm_vmcb = addr;
6316
6317 /* save the current CPU state in the hsave page */
6318 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6319 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6320
6321 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6322 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6323
6324 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6325 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6326 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6327 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6328 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6329 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6330
6331 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6332 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6333
6334 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6335 &env->segs[R_ES]);
6336 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6337 &env->segs[R_CS]);
6338 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6339 &env->segs[R_SS]);
6340 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6341 &env->segs[R_DS]);
6342
6343 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6344 EIP + next_eip_addend);
6345 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6346 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6347
6348 /* load the interception bitmaps so we do not need to access the
6349 vmcb in svm mode */
6350 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6351 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6352 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6353 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6354 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6355 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6356
6357 /* enable intercepts */
6358 env->hflags |= HF_SVMI_MASK;
6359
6360 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6361
6362 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6363 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6364
6365 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6366 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6367
6368 /* clear exit_info_2 so we behave like the real hardware */
6369 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6370
6371 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6372 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6373 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6374 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6375 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6376 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6377 if (int_ctl & V_INTR_MASKING_MASK) {
6378 env->v_tpr = int_ctl & V_TPR_MASK;
6379 env->hflags2 |= HF2_VINTR_MASK;
6380 if (env->eflags & IF_MASK)
6381 env->hflags2 |= HF2_HIF_MASK;
6382 }
6383
6384 cpu_load_efer(env,
6385 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6386 env->eflags = 0;
6387 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6388 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6389 CC_OP = CC_OP_EFLAGS;
6390
6391 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6392 env, R_ES);
6393 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6394 env, R_CS);
6395 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6396 env, R_SS);
6397 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6398 env, R_DS);
6399
6400 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6401 env->eip = EIP;
6402 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6403 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6404 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6405 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6406 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6407
6408 /* FIXME: guest state consistency checks */
6409
6410 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6411 case TLB_CONTROL_DO_NOTHING:
6412 break;
6413 case TLB_CONTROL_FLUSH_ALL_ASID:
6414 /* FIXME: this is not 100% correct but should work for now */
6415 tlb_flush(env, 1);
6416 break;
6417 }
6418
6419 env->hflags2 |= HF2_GIF_MASK;
6420
6421 if (int_ctl & V_IRQ_MASK) {
6422 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6423 }
6424
6425 /* maybe we need to inject an event */
6426 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6427 if (event_inj & SVM_EVTINJ_VALID) {
6428 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6429 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6430 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6431
6432 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6433 /* FIXME: need to implement valid_err */
6434 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6435 case SVM_EVTINJ_TYPE_INTR:
6436 env->exception_index = vector;
6437 env->error_code = event_inj_err;
6438 env->exception_is_int = 0;
6439 env->exception_next_eip = -1;
6440 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6441 /* XXX: is it always correct ? */
6442 do_interrupt(vector, 0, 0, 0, 1);
6443 break;
6444 case SVM_EVTINJ_TYPE_NMI:
6445 env->exception_index = EXCP02_NMI;
6446 env->error_code = event_inj_err;
6447 env->exception_is_int = 0;
6448 env->exception_next_eip = EIP;
6449 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6450 cpu_loop_exit();
6451 break;
6452 case SVM_EVTINJ_TYPE_EXEPT:
6453 env->exception_index = vector;
6454 env->error_code = event_inj_err;
6455 env->exception_is_int = 0;
6456 env->exception_next_eip = -1;
6457 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6458 cpu_loop_exit();
6459 break;
6460 case SVM_EVTINJ_TYPE_SOFT:
6461 env->exception_index = vector;
6462 env->error_code = event_inj_err;
6463 env->exception_is_int = 1;
6464 env->exception_next_eip = EIP;
6465 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6466 cpu_loop_exit();
6467 break;
6468 }
6469 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6470 }
6471}
6472
6473void helper_vmmcall(void)
6474{
6475 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6476 raise_exception(EXCP06_ILLOP);
6477}
6478
6479void helper_vmload(int aflag)
6480{
6481 target_ulong addr;
6482 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6483
6484 if (aflag == 2)
6485 addr = EAX;
6486 else
6487 addr = (uint32_t)EAX;
6488
6489 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6490 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6491 env->segs[R_FS].base);
6492
6493 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6494 env, R_FS);
6495 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6496 env, R_GS);
6497 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6498 &env->tr);
6499 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6500 &env->ldt);
6501
6502#ifdef TARGET_X86_64
6503 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6504 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6505 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6506 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6507#endif
6508 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6509 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6510 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6511 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6512}
6513
6514void helper_vmsave(int aflag)
6515{
6516 target_ulong addr;
6517 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6518
6519 if (aflag == 2)
6520 addr = EAX;
6521 else
6522 addr = (uint32_t)EAX;
6523
6524 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6525 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6526 env->segs[R_FS].base);
6527
6528 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6529 &env->segs[R_FS]);
6530 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6531 &env->segs[R_GS]);
6532 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6533 &env->tr);
6534 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6535 &env->ldt);
6536
6537#ifdef TARGET_X86_64
6538 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6539 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6540 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6541 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6542#endif
6543 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6544 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6545 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6546 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6547}
6548
6549void helper_stgi(void)
6550{
6551 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6552 env->hflags2 |= HF2_GIF_MASK;
6553}
6554
6555void helper_clgi(void)
6556{
6557 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6558 env->hflags2 &= ~HF2_GIF_MASK;
6559}
6560
6561void helper_skinit(void)
6562{
6563 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6564 /* XXX: not implemented */
6565 raise_exception(EXCP06_ILLOP);
6566}
6567
6568void helper_invlpga(int aflag)
6569{
6570 target_ulong addr;
6571 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6572
6573 if (aflag == 2)
6574 addr = EAX;
6575 else
6576 addr = (uint32_t)EAX;
6577
6578 /* XXX: could use the ASID to see if it is needed to do the
6579 flush */
6580 tlb_flush_page(env, addr);
6581}
6582
6583void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6584{
6585 if (likely(!(env->hflags & HF_SVMI_MASK)))
6586 return;
6587#ifndef VBOX
6588 switch(type) {
6589 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6590 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6591 helper_vmexit(type, param);
6592 }
6593 break;
6594 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6595 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6596 helper_vmexit(type, param);
6597 }
6598 break;
6599 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6600 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6601 helper_vmexit(type, param);
6602 }
6603 break;
6604 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6605 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6606 helper_vmexit(type, param);
6607 }
6608 break;
6609 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6610 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6611 helper_vmexit(type, param);
6612 }
6613 break;
6614 case SVM_EXIT_MSR:
6615 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6616 /* FIXME: this should be read in at vmrun (faster this way?) */
6617 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6618 uint32_t t0, t1;
6619 switch((uint32_t)ECX) {
6620 case 0 ... 0x1fff:
6621 t0 = (ECX * 2) % 8;
6622 t1 = ECX / 8;
6623 break;
6624 case 0xc0000000 ... 0xc0001fff:
6625 t0 = (8192 + ECX - 0xc0000000) * 2;
6626 t1 = (t0 / 8);
6627 t0 %= 8;
6628 break;
6629 case 0xc0010000 ... 0xc0011fff:
6630 t0 = (16384 + ECX - 0xc0010000) * 2;
6631 t1 = (t0 / 8);
6632 t0 %= 8;
6633 break;
6634 default:
6635 helper_vmexit(type, param);
6636 t0 = 0;
6637 t1 = 0;
6638 break;
6639 }
6640 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6641 helper_vmexit(type, param);
6642 }
6643 break;
6644 default:
6645 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6646 helper_vmexit(type, param);
6647 }
6648 break;
6649 }
6650#else /* VBOX */
6651 AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
6652#endif /* VBOX */
6653}
6654
6655void helper_svm_check_io(uint32_t port, uint32_t param,
6656 uint32_t next_eip_addend)
6657{
6658 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6659 /* FIXME: this should be read in at vmrun (faster this way?) */
6660 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6661 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6662 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6663 /* next EIP */
6664 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6665 env->eip + next_eip_addend);
6666 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6667 }
6668 }
6669}
6670
6671/* Note: currently only 32 bits of exit_code are used */
6672void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6673{
6674 uint32_t int_ctl;
6675
6676 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6677 exit_code, exit_info_1,
6678 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6679 EIP);
6680
6681 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6682 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6683 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6684 } else {
6685 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6686 }
6687
6688 /* Save the VM state in the vmcb */
6689 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6690 &env->segs[R_ES]);
6691 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6692 &env->segs[R_CS]);
6693 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6694 &env->segs[R_SS]);
6695 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6696 &env->segs[R_DS]);
6697
6698 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6699 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6700
6701 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6702 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6703
6704 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6705 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6706 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6707 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6708 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6709
6710 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6711 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6712 int_ctl |= env->v_tpr & V_TPR_MASK;
6713 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6714 int_ctl |= V_IRQ_MASK;
6715 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6716
6717 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6718 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6719 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6720 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6721 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6722 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6723 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6724
6725 /* Reload the host state from vm_hsave */
6726 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6727 env->hflags &= ~HF_SVMI_MASK;
6728 env->intercept = 0;
6729 env->intercept_exceptions = 0;
6730 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6731 env->tsc_offset = 0;
6732
6733 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6734 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6735
6736 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6737 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6738
6739 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6740 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6741 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6742 /* we need to set the efer after the crs so the hidden flags get
6743 set properly */
6744 cpu_load_efer(env,
6745 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6746 env->eflags = 0;
6747 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6748 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6749 CC_OP = CC_OP_EFLAGS;
6750
6751 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6752 env, R_ES);
6753 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6754 env, R_CS);
6755 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6756 env, R_SS);
6757 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6758 env, R_DS);
6759
6760 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6761 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6762 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6763
6764 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6765 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6766
6767 /* other setups */
6768 cpu_x86_set_cpl(env, 0);
6769 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6770 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6771
6772 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6773 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6774 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6775 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6776 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6777
6778 env->hflags2 &= ~HF2_GIF_MASK;
6779 /* FIXME: Resets the current ASID register to zero (host ASID). */
6780
6781 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6782
6783 /* Clears the TSC_OFFSET inside the processor. */
6784
6785 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6786 from the page table indicated the host's CR3. If the PDPEs contain
6787 illegal state, the processor causes a shutdown. */
6788
6789 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6790 env->cr[0] |= CR0_PE_MASK;
6791 env->eflags &= ~VM_MASK;
6792
6793 /* Disables all breakpoints in the host DR7 register. */
6794
6795 /* Checks the reloaded host state for consistency. */
6796
6797 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6798 host's code segment or non-canonical (in the case of long mode), a
6799 #GP fault is delivered inside the host.) */
6800
6801 /* remove any pending exception */
6802 env->exception_index = -1;
6803 env->error_code = 0;
6804 env->old_exception = -1;
6805
6806 cpu_loop_exit();
6807}
6808
6809#endif
6810
6811/* MMX/SSE */
6812/* XXX: optimize by storing fptt and fptags in the static cpu state */
6813void helper_enter_mmx(void)
6814{
6815 env->fpstt = 0;
6816 *(uint32_t *)(env->fptags) = 0;
6817 *(uint32_t *)(env->fptags + 4) = 0;
6818}
6819
6820void helper_emms(void)
6821{
6822 /* set to empty state */
6823 *(uint32_t *)(env->fptags) = 0x01010101;
6824 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6825}
6826
6827/* XXX: suppress */
6828void helper_movq(void *d, void *s)
6829{
6830 *(uint64_t *)d = *(uint64_t *)s;
6831}
6832
6833#define SHIFT 0
6834#include "ops_sse.h"
6835
6836#define SHIFT 1
6837#include "ops_sse.h"
6838
6839#define SHIFT 0
6840#include "helper_template.h"
6841#undef SHIFT
6842
6843#define SHIFT 1
6844#include "helper_template.h"
6845#undef SHIFT
6846
6847#define SHIFT 2
6848#include "helper_template.h"
6849#undef SHIFT
6850
6851#ifdef TARGET_X86_64
6852
6853#define SHIFT 3
6854#include "helper_template.h"
6855#undef SHIFT
6856
6857#endif
6858
6859/* bit operations */
6860target_ulong helper_bsf(target_ulong t0)
6861{
6862 int count;
6863 target_ulong res;
6864
6865 res = t0;
6866 count = 0;
6867 while ((res & 1) == 0) {
6868 count++;
6869 res >>= 1;
6870 }
6871 return count;
6872}
6873
6874target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6875{
6876 int count;
6877 target_ulong res, mask;
6878
6879 if (wordsize > 0 && t0 == 0) {
6880 return wordsize;
6881 }
6882 res = t0;
6883 count = TARGET_LONG_BITS - 1;
6884 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6885 while ((res & mask) == 0) {
6886 count--;
6887 res <<= 1;
6888 }
6889 if (wordsize > 0) {
6890 return wordsize - 1 - count;
6891 }
6892 return count;
6893}
6894
6895target_ulong helper_bsr(target_ulong t0)
6896{
6897 return helper_lzcnt(t0, 0);
6898}
6899
6900static int compute_all_eflags(void)
6901{
6902 return CC_SRC;
6903}
6904
6905static int compute_c_eflags(void)
6906{
6907 return CC_SRC & CC_C;
6908}
6909
6910uint32_t helper_cc_compute_all(int op)
6911{
6912 switch (op) {
6913 default: /* should never happen */ return 0;
6914
6915 case CC_OP_EFLAGS: return compute_all_eflags();
6916
6917 case CC_OP_MULB: return compute_all_mulb();
6918 case CC_OP_MULW: return compute_all_mulw();
6919 case CC_OP_MULL: return compute_all_mull();
6920
6921 case CC_OP_ADDB: return compute_all_addb();
6922 case CC_OP_ADDW: return compute_all_addw();
6923 case CC_OP_ADDL: return compute_all_addl();
6924
6925 case CC_OP_ADCB: return compute_all_adcb();
6926 case CC_OP_ADCW: return compute_all_adcw();
6927 case CC_OP_ADCL: return compute_all_adcl();
6928
6929 case CC_OP_SUBB: return compute_all_subb();
6930 case CC_OP_SUBW: return compute_all_subw();
6931 case CC_OP_SUBL: return compute_all_subl();
6932
6933 case CC_OP_SBBB: return compute_all_sbbb();
6934 case CC_OP_SBBW: return compute_all_sbbw();
6935 case CC_OP_SBBL: return compute_all_sbbl();
6936
6937 case CC_OP_LOGICB: return compute_all_logicb();
6938 case CC_OP_LOGICW: return compute_all_logicw();
6939 case CC_OP_LOGICL: return compute_all_logicl();
6940
6941 case CC_OP_INCB: return compute_all_incb();
6942 case CC_OP_INCW: return compute_all_incw();
6943 case CC_OP_INCL: return compute_all_incl();
6944
6945 case CC_OP_DECB: return compute_all_decb();
6946 case CC_OP_DECW: return compute_all_decw();
6947 case CC_OP_DECL: return compute_all_decl();
6948
6949 case CC_OP_SHLB: return compute_all_shlb();
6950 case CC_OP_SHLW: return compute_all_shlw();
6951 case CC_OP_SHLL: return compute_all_shll();
6952
6953 case CC_OP_SARB: return compute_all_sarb();
6954 case CC_OP_SARW: return compute_all_sarw();
6955 case CC_OP_SARL: return compute_all_sarl();
6956
6957#ifdef TARGET_X86_64
6958 case CC_OP_MULQ: return compute_all_mulq();
6959
6960 case CC_OP_ADDQ: return compute_all_addq();
6961
6962 case CC_OP_ADCQ: return compute_all_adcq();
6963
6964 case CC_OP_SUBQ: return compute_all_subq();
6965
6966 case CC_OP_SBBQ: return compute_all_sbbq();
6967
6968 case CC_OP_LOGICQ: return compute_all_logicq();
6969
6970 case CC_OP_INCQ: return compute_all_incq();
6971
6972 case CC_OP_DECQ: return compute_all_decq();
6973
6974 case CC_OP_SHLQ: return compute_all_shlq();
6975
6976 case CC_OP_SARQ: return compute_all_sarq();
6977#endif
6978 }
6979}
6980
6981uint32_t helper_cc_compute_c(int op)
6982{
6983 switch (op) {
6984 default: /* should never happen */ return 0;
6985
6986 case CC_OP_EFLAGS: return compute_c_eflags();
6987
6988 case CC_OP_MULB: return compute_c_mull();
6989 case CC_OP_MULW: return compute_c_mull();
6990 case CC_OP_MULL: return compute_c_mull();
6991
6992 case CC_OP_ADDB: return compute_c_addb();
6993 case CC_OP_ADDW: return compute_c_addw();
6994 case CC_OP_ADDL: return compute_c_addl();
6995
6996 case CC_OP_ADCB: return compute_c_adcb();
6997 case CC_OP_ADCW: return compute_c_adcw();
6998 case CC_OP_ADCL: return compute_c_adcl();
6999
7000 case CC_OP_SUBB: return compute_c_subb();
7001 case CC_OP_SUBW: return compute_c_subw();
7002 case CC_OP_SUBL: return compute_c_subl();
7003
7004 case CC_OP_SBBB: return compute_c_sbbb();
7005 case CC_OP_SBBW: return compute_c_sbbw();
7006 case CC_OP_SBBL: return compute_c_sbbl();
7007
7008 case CC_OP_LOGICB: return compute_c_logicb();
7009 case CC_OP_LOGICW: return compute_c_logicw();
7010 case CC_OP_LOGICL: return compute_c_logicl();
7011
7012 case CC_OP_INCB: return compute_c_incl();
7013 case CC_OP_INCW: return compute_c_incl();
7014 case CC_OP_INCL: return compute_c_incl();
7015
7016 case CC_OP_DECB: return compute_c_incl();
7017 case CC_OP_DECW: return compute_c_incl();
7018 case CC_OP_DECL: return compute_c_incl();
7019
7020 case CC_OP_SHLB: return compute_c_shlb();
7021 case CC_OP_SHLW: return compute_c_shlw();
7022 case CC_OP_SHLL: return compute_c_shll();
7023
7024 case CC_OP_SARB: return compute_c_sarl();
7025 case CC_OP_SARW: return compute_c_sarl();
7026 case CC_OP_SARL: return compute_c_sarl();
7027
7028#ifdef TARGET_X86_64
7029 case CC_OP_MULQ: return compute_c_mull();
7030
7031 case CC_OP_ADDQ: return compute_c_addq();
7032
7033 case CC_OP_ADCQ: return compute_c_adcq();
7034
7035 case CC_OP_SUBQ: return compute_c_subq();
7036
7037 case CC_OP_SBBQ: return compute_c_sbbq();
7038
7039 case CC_OP_LOGICQ: return compute_c_logicq();
7040
7041 case CC_OP_INCQ: return compute_c_incl();
7042
7043 case CC_OP_DECQ: return compute_c_incl();
7044
7045 case CC_OP_SHLQ: return compute_c_shlq();
7046
7047 case CC_OP_SARQ: return compute_c_sarl();
7048#endif
7049 }
7050}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette