VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper.c@ 17637

Last change on this file since 17637 was 17039, checked in by vboxsync, 16 years ago

Old REM: New TR sync.

  • Property svn:eol-style set to native
File size: 135.0 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#ifdef VBOX
30# include <VBox/err.h>
31# ifdef VBOX_WITH_VMI
32# include <VBox/parav.h>
33# endif
34#endif
35#include "exec.h"
36
37//#define DEBUG_PCALL
38
39#if 0
40#define raise_exception_err(a, b)\
41do {\
42 if (logfile)\
43 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
44 (raise_exception_err)(a, b);\
45} while (0)
46#endif
47
48const uint8_t parity_table[256] = {
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
81};
82
83/* modulo 17 table */
84const uint8_t rclw_table[32] = {
85 0, 1, 2, 3, 4, 5, 6, 7,
86 8, 9,10,11,12,13,14,15,
87 16, 0, 1, 2, 3, 4, 5, 6,
88 7, 8, 9,10,11,12,13,14,
89};
90
91/* modulo 9 table */
92const uint8_t rclb_table[32] = {
93 0, 1, 2, 3, 4, 5, 6, 7,
94 8, 0, 1, 2, 3, 4, 5, 6,
95 7, 8, 0, 1, 2, 3, 4, 5,
96 6, 7, 8, 0, 1, 2, 3, 4,
97};
98
99const CPU86_LDouble f15rk[7] =
100{
101 0.00000000000000000000L,
102 1.00000000000000000000L,
103 3.14159265358979323851L, /*pi*/
104 0.30102999566398119523L, /*lg2*/
105 0.69314718055994530943L, /*ln2*/
106 1.44269504088896340739L, /*l2e*/
107 3.32192809488736234781L, /*l2t*/
108};
109
110/* thread support */
111
112spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
113
114void cpu_lock(void)
115{
116 spin_lock(&global_cpu_lock);
117}
118
119void cpu_unlock(void)
120{
121 spin_unlock(&global_cpu_lock);
122}
123
124void cpu_loop_exit(void)
125{
126 /* NOTE: the register at this point must be saved by hand because
127 longjmp restore them */
128 regs_to_env();
129 longjmp(env->jmp_env, 1);
130}
131
132/* return non zero if error */
133static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
134 int selector)
135{
136 SegmentCache *dt;
137 int index;
138 target_ulong ptr;
139
140 if (selector & 0x4)
141 dt = &env->ldt;
142 else
143 dt = &env->gdt;
144 index = selector & ~7;
145 if ((index + 7) > dt->limit)
146 return -1;
147 ptr = dt->base + index;
148 *e1_ptr = ldl_kernel(ptr);
149 *e2_ptr = ldl_kernel(ptr + 4);
150 return 0;
151}
152
153static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
154{
155 unsigned int limit;
156 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
157 if (e2 & DESC_G_MASK)
158 limit = (limit << 12) | 0xfff;
159 return limit;
160}
161
162static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
163{
164 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
165}
166
167static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
168{
169 sc->base = get_seg_base(e1, e2);
170 sc->limit = get_seg_limit(e1, e2);
171 sc->flags = e2;
172}
173
174/* init the segment cache in vm86 mode. */
175static inline void load_seg_vm(int seg, int selector)
176{
177 selector &= 0xffff;
178#ifdef VBOX
179 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
180
181 if (seg == R_CS)
182 flags |= DESC_CS_MASK;
183
184 cpu_x86_load_seg_cache(env, seg, selector,
185 (selector << 4), 0xffff, flags);
186#else
187 cpu_x86_load_seg_cache(env, seg, selector,
188 (selector << 4), 0xffff, 0);
189#endif
190}
191
192static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
193 uint32_t *esp_ptr, int dpl)
194{
195 int type, index, shift;
196
197#if 0
198 {
199 int i;
200 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
201 for(i=0;i<env->tr.limit;i++) {
202 printf("%02x ", env->tr.base[i]);
203 if ((i & 7) == 7) printf("\n");
204 }
205 printf("\n");
206 }
207#endif
208
209 if (!(env->tr.flags & DESC_P_MASK))
210 cpu_abort(env, "invalid tss");
211 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
212 if ((type & 7) != 1)
213 cpu_abort(env, "invalid tss type %d", type);
214 shift = type >> 3;
215 index = (dpl * 4 + 2) << shift;
216 if (index + (4 << shift) - 1 > env->tr.limit)
217 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
218 if (shift == 0) {
219 *esp_ptr = lduw_kernel(env->tr.base + index);
220 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
221 } else {
222 *esp_ptr = ldl_kernel(env->tr.base + index);
223 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
224 }
225}
226
227/* XXX: merge with load_seg() */
228static void tss_load_seg(int seg_reg, int selector)
229{
230 uint32_t e1, e2;
231 int rpl, dpl, cpl;
232
233 if ((selector & 0xfffc) != 0) {
234 if (load_segment(&e1, &e2, selector) != 0)
235 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236 if (!(e2 & DESC_S_MASK))
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 rpl = selector & 3;
239 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
240 cpl = env->hflags & HF_CPL_MASK;
241 if (seg_reg == R_CS) {
242 if (!(e2 & DESC_CS_MASK))
243 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
244 /* XXX: is it correct ? */
245 if (dpl != rpl)
246 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247 if ((e2 & DESC_C_MASK) && dpl > rpl)
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249 } else if (seg_reg == R_SS) {
250 /* SS must be writable data */
251 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
252 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253 if (dpl != cpl || dpl != rpl)
254 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
255 } else {
256 /* not readable code */
257 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
258 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
259 /* if data or non conforming code, checks the rights */
260 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
261 if (dpl < cpl || dpl < rpl)
262 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
263 }
264 }
265 if (!(e2 & DESC_P_MASK))
266 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
267 cpu_x86_load_seg_cache(env, seg_reg, selector,
268 get_seg_base(e1, e2),
269 get_seg_limit(e1, e2),
270 e2);
271 } else {
272 if (seg_reg == R_SS || seg_reg == R_CS)
273 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
274 }
275}
276
277#define SWITCH_TSS_JMP 0
278#define SWITCH_TSS_IRET 1
279#define SWITCH_TSS_CALL 2
280
281/* XXX: restore CPU state in registers (PowerPC case) */
282static void switch_tss(int tss_selector,
283 uint32_t e1, uint32_t e2, int source,
284 uint32_t next_eip)
285{
286 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
287 target_ulong tss_base;
288 uint32_t new_regs[8], new_segs[6];
289 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
290 uint32_t old_eflags, eflags_mask;
291 SegmentCache *dt;
292 int index;
293 target_ulong ptr;
294
295 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296#ifdef DEBUG_PCALL
297 if (loglevel & CPU_LOG_PCALL)
298 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
299#endif
300
301#if defined(VBOX) && defined(DEBUG)
302 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
303#endif
304
305 /* if task gate, we read the TSS segment and we load it */
306 if (type == 5) {
307 if (!(e2 & DESC_P_MASK))
308 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
309 tss_selector = e1 >> 16;
310 if (tss_selector & 4)
311 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312 if (load_segment(&e1, &e2, tss_selector) != 0)
313 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
314 if (e2 & DESC_S_MASK)
315 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
316 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
317 if ((type & 7) != 1)
318 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
319 }
320
321 if (!(e2 & DESC_P_MASK))
322 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
323
324 if (type & 8)
325 tss_limit_max = 103;
326 else
327 tss_limit_max = 43;
328 tss_limit = get_seg_limit(e1, e2);
329 tss_base = get_seg_base(e1, e2);
330 if ((tss_selector & 4) != 0 ||
331 tss_limit < tss_limit_max)
332 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
333 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
334 if (old_type & 8)
335 old_tss_limit_max = 103;
336 else
337 old_tss_limit_max = 43;
338
339 /* read all the registers from the new TSS */
340 if (type & 8) {
341 /* 32 bit */
342 new_cr3 = ldl_kernel(tss_base + 0x1c);
343 new_eip = ldl_kernel(tss_base + 0x20);
344 new_eflags = ldl_kernel(tss_base + 0x24);
345 for(i = 0; i < 8; i++)
346 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
347 for(i = 0; i < 6; i++)
348 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
349 new_ldt = lduw_kernel(tss_base + 0x60);
350 new_trap = ldl_kernel(tss_base + 0x64);
351 } else {
352 /* 16 bit */
353 new_cr3 = 0;
354 new_eip = lduw_kernel(tss_base + 0x0e);
355 new_eflags = lduw_kernel(tss_base + 0x10);
356 for(i = 0; i < 8; i++)
357 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
358 for(i = 0; i < 4; i++)
359 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
360 new_ldt = lduw_kernel(tss_base + 0x2a);
361 new_segs[R_FS] = 0;
362 new_segs[R_GS] = 0;
363 new_trap = 0;
364 }
365
366 /* NOTE: we must avoid memory exceptions during the task switch,
367 so we make dummy accesses before */
368 /* XXX: it can still fail in some cases, so a bigger hack is
369 necessary to valid the TLB after having done the accesses */
370
371 v1 = ldub_kernel(env->tr.base);
372 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
373 stb_kernel(env->tr.base, v1);
374 stb_kernel(env->tr.base + old_tss_limit_max, v2);
375
376 /* clear busy bit (it is restartable) */
377 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
378 target_ulong ptr;
379 uint32_t e2;
380 ptr = env->gdt.base + (env->tr.selector & ~7);
381 e2 = ldl_kernel(ptr + 4);
382 e2 &= ~DESC_TSS_BUSY_MASK;
383 stl_kernel(ptr + 4, e2);
384 }
385 old_eflags = compute_eflags();
386 if (source == SWITCH_TSS_IRET)
387 old_eflags &= ~NT_MASK;
388
389 /* save the current state in the old TSS */
390 if (type & 8) {
391 /* 32 bit */
392 stl_kernel(env->tr.base + 0x20, next_eip);
393 stl_kernel(env->tr.base + 0x24, old_eflags);
394 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
395 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
396 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
397 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
398 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
399 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
400 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
401 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
402 for(i = 0; i < 6; i++)
403 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
404#if defined(VBOX) && defined(DEBUG)
405 printf("TSS 32 bits switch\n");
406 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
407#endif
408 } else {
409 /* 16 bit */
410 stw_kernel(env->tr.base + 0x0e, next_eip);
411 stw_kernel(env->tr.base + 0x10, old_eflags);
412 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
413 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
414 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
415 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
416 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
417 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
418 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
419 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
420 for(i = 0; i < 4; i++)
421 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
422 }
423
424 /* now if an exception occurs, it will occurs in the next task
425 context */
426
427 if (source == SWITCH_TSS_CALL) {
428 stw_kernel(tss_base, env->tr.selector);
429 new_eflags |= NT_MASK;
430 }
431
432 /* set busy bit */
433 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
434 target_ulong ptr;
435 uint32_t e2;
436 ptr = env->gdt.base + (tss_selector & ~7);
437 e2 = ldl_kernel(ptr + 4);
438 e2 |= DESC_TSS_BUSY_MASK;
439 stl_kernel(ptr + 4, e2);
440 }
441
442 /* set the new CPU state */
443 /* from this point, any exception which occurs can give problems */
444 env->cr[0] |= CR0_TS_MASK;
445 env->hflags |= HF_TS_MASK;
446 env->tr.selector = tss_selector;
447 env->tr.base = tss_base;
448 env->tr.limit = tss_limit;
449 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
450
451 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
452 cpu_x86_update_cr3(env, new_cr3);
453 }
454
455 /* load all registers without an exception, then reload them with
456 possible exception */
457 env->eip = new_eip;
458 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
459 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
460 if (!(type & 8))
461 eflags_mask &= 0xffff;
462 load_eflags(new_eflags, eflags_mask);
463 /* XXX: what to do in 16 bit case ? */
464 EAX = new_regs[0];
465 ECX = new_regs[1];
466 EDX = new_regs[2];
467 EBX = new_regs[3];
468 ESP = new_regs[4];
469 EBP = new_regs[5];
470 ESI = new_regs[6];
471 EDI = new_regs[7];
472 if (new_eflags & VM_MASK) {
473 for(i = 0; i < 6; i++)
474 load_seg_vm(i, new_segs[i]);
475 /* in vm86, CPL is always 3 */
476 cpu_x86_set_cpl(env, 3);
477 } else {
478 /* CPL is set the RPL of CS */
479 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
480 /* first just selectors as the rest may trigger exceptions */
481 for(i = 0; i < 6; i++)
482 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
483 }
484
485 env->ldt.selector = new_ldt & ~4;
486 env->ldt.base = 0;
487 env->ldt.limit = 0;
488 env->ldt.flags = 0;
489
490 /* load the LDT */
491 if (new_ldt & 4)
492 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
493
494 if ((new_ldt & 0xfffc) != 0) {
495 dt = &env->gdt;
496 index = new_ldt & ~7;
497 if ((index + 7) > dt->limit)
498 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
499 ptr = dt->base + index;
500 e1 = ldl_kernel(ptr);
501 e2 = ldl_kernel(ptr + 4);
502 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
503 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
504 if (!(e2 & DESC_P_MASK))
505 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
506 load_seg_cache_raw_dt(&env->ldt, e1, e2);
507 }
508
509 /* load the segments */
510 if (!(new_eflags & VM_MASK)) {
511 tss_load_seg(R_CS, new_segs[R_CS]);
512 tss_load_seg(R_SS, new_segs[R_SS]);
513 tss_load_seg(R_ES, new_segs[R_ES]);
514 tss_load_seg(R_DS, new_segs[R_DS]);
515 tss_load_seg(R_FS, new_segs[R_FS]);
516 tss_load_seg(R_GS, new_segs[R_GS]);
517 }
518
519 /* check that EIP is in the CS segment limits */
520 if (new_eip > env->segs[R_CS].limit) {
521 /* XXX: different exception if CALL ? */
522 raise_exception_err(EXCP0D_GPF, 0);
523 }
524}
525
526/* check if Port I/O is allowed in TSS */
527static inline void check_io(int addr, int size)
528{
529 int io_offset, val, mask;
530
531 /* TSS must be a valid 32 bit one */
532 if (!(env->tr.flags & DESC_P_MASK) ||
533 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
534 env->tr.limit < 103)
535 goto fail;
536 io_offset = lduw_kernel(env->tr.base + 0x66);
537 io_offset += (addr >> 3);
538 /* Note: the check needs two bytes */
539 if ((io_offset + 1) > env->tr.limit)
540 goto fail;
541 val = lduw_kernel(env->tr.base + io_offset);
542 val >>= (addr & 7);
543 mask = (1 << size) - 1;
544 /* all bits must be zero to allow the I/O */
545 if ((val & mask) != 0) {
546 fail:
547 raise_exception_err(EXCP0D_GPF, 0);
548 }
549}
550
551void check_iob_T0(void)
552{
553 check_io(T0, 1);
554}
555
556void check_iow_T0(void)
557{
558 check_io(T0, 2);
559}
560
561void check_iol_T0(void)
562{
563 check_io(T0, 4);
564}
565
566void check_iob_DX(void)
567{
568 check_io(EDX & 0xffff, 1);
569}
570
571void check_iow_DX(void)
572{
573 check_io(EDX & 0xffff, 2);
574}
575
576void check_iol_DX(void)
577{
578 check_io(EDX & 0xffff, 4);
579}
580
581static inline unsigned int get_sp_mask(unsigned int e2)
582{
583 if (e2 & DESC_B_MASK)
584 return 0xffffffff;
585 else
586 return 0xffff;
587}
588
589#ifdef TARGET_X86_64
590#define SET_ESP(val, sp_mask)\
591do {\
592 if ((sp_mask) == 0xffff)\
593 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
594 else if ((sp_mask) == 0xffffffffLL)\
595 ESP = (uint32_t)(val);\
596 else\
597 ESP = (val);\
598} while (0)
599#else
600#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
601#endif
602
603/* XXX: add a is_user flag to have proper security support */
604#define PUSHW(ssp, sp, sp_mask, val)\
605{\
606 sp -= 2;\
607 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
608}
609
610#define PUSHL(ssp, sp, sp_mask, val)\
611{\
612 sp -= 4;\
613 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
614}
615
616#define POPW(ssp, sp, sp_mask, val)\
617{\
618 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
619 sp += 2;\
620}
621
622#define POPL(ssp, sp, sp_mask, val)\
623{\
624 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
625 sp += 4;\
626}
627
628/* protected mode interrupt */
629static void do_interrupt_protected(int intno, int is_int, int error_code,
630 unsigned int next_eip, int is_hw)
631{
632 SegmentCache *dt;
633 target_ulong ptr, ssp;
634 int type, dpl, selector, ss_dpl, cpl;
635 int has_error_code, new_stack, shift;
636 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
637 uint32_t old_eip, sp_mask;
638
639#ifdef VBOX
640# ifdef VBOX_WITH_VMI
641 if ( intno == 6
642 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
643 {
644 env->exception_index = EXCP_PARAV_CALL;
645 cpu_loop_exit();
646 }
647# endif
648 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
649 cpu_loop_exit();
650#endif
651
652 has_error_code = 0;
653 if (!is_int && !is_hw) {
654 switch(intno) {
655 case 8:
656 case 10:
657 case 11:
658 case 12:
659 case 13:
660 case 14:
661 case 17:
662 has_error_code = 1;
663 break;
664 }
665 }
666 if (is_int)
667 old_eip = next_eip;
668 else
669 old_eip = env->eip;
670
671 dt = &env->idt;
672 if (intno * 8 + 7 > dt->limit)
673 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
674 ptr = dt->base + intno * 8;
675 e1 = ldl_kernel(ptr);
676 e2 = ldl_kernel(ptr + 4);
677 /* check gate type */
678 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
679 switch(type) {
680 case 5: /* task gate */
681 /* must do that check here to return the correct error code */
682 if (!(e2 & DESC_P_MASK))
683 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
684 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
685 if (has_error_code) {
686 int type;
687 uint32_t mask;
688 /* push the error code */
689 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
690 shift = type >> 3;
691 if (env->segs[R_SS].flags & DESC_B_MASK)
692 mask = 0xffffffff;
693 else
694 mask = 0xffff;
695 esp = (ESP - (2 << shift)) & mask;
696 ssp = env->segs[R_SS].base + esp;
697 if (shift)
698 stl_kernel(ssp, error_code);
699 else
700 stw_kernel(ssp, error_code);
701 SET_ESP(esp, mask);
702 }
703 return;
704 case 6: /* 286 interrupt gate */
705 case 7: /* 286 trap gate */
706 case 14: /* 386 interrupt gate */
707 case 15: /* 386 trap gate */
708 break;
709 default:
710 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
711 break;
712 }
713 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
714 cpl = env->hflags & HF_CPL_MASK;
715 /* check privledge if software int */
716 if (is_int && dpl < cpl)
717 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
718 /* check valid bit */
719 if (!(e2 & DESC_P_MASK))
720 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
721 selector = e1 >> 16;
722 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
723 if ((selector & 0xfffc) == 0)
724 raise_exception_err(EXCP0D_GPF, 0);
725
726 if (load_segment(&e1, &e2, selector) != 0)
727 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
728 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
729 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
730 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
731 if (dpl > cpl)
732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733 if (!(e2 & DESC_P_MASK))
734 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
735 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
736 /* to inner priviledge */
737 get_ss_esp_from_tss(&ss, &esp, dpl);
738 if ((ss & 0xfffc) == 0)
739 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
740 if ((ss & 3) != dpl)
741 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
742 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
743 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
745 if (ss_dpl != dpl)
746 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747 if (!(ss_e2 & DESC_S_MASK) ||
748 (ss_e2 & DESC_CS_MASK) ||
749 !(ss_e2 & DESC_W_MASK))
750 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751 if (!(ss_e2 & DESC_P_MASK))
752#ifdef VBOX /* See page 3-477 of 253666.pdf */
753 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
754#else
755 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
756#endif
757 new_stack = 1;
758 sp_mask = get_sp_mask(ss_e2);
759 ssp = get_seg_base(ss_e1, ss_e2);
760#if defined(VBOX) && defined(DEBUG)
761 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
762#endif
763 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
764 /* to same priviledge */
765 if (env->eflags & VM_MASK)
766 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
767 new_stack = 0;
768 sp_mask = get_sp_mask(env->segs[R_SS].flags);
769 ssp = env->segs[R_SS].base;
770 esp = ESP;
771 dpl = cpl;
772 } else {
773 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
774 new_stack = 0; /* avoid warning */
775 sp_mask = 0; /* avoid warning */
776 ssp = 0; /* avoid warning */
777 esp = 0; /* avoid warning */
778 }
779
780 shift = type >> 3;
781
782#if 0
783 /* XXX: check that enough room is available */
784 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
785 if (env->eflags & VM_MASK)
786 push_size += 8;
787 push_size <<= shift;
788#endif
789 if (shift == 1) {
790 if (new_stack) {
791 if (env->eflags & VM_MASK) {
792 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
793 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
794 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
795 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
796 }
797 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
798 PUSHL(ssp, esp, sp_mask, ESP);
799 }
800 PUSHL(ssp, esp, sp_mask, compute_eflags());
801 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
802 PUSHL(ssp, esp, sp_mask, old_eip);
803 if (has_error_code) {
804 PUSHL(ssp, esp, sp_mask, error_code);
805 }
806 } else {
807 if (new_stack) {
808 if (env->eflags & VM_MASK) {
809 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
810 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
811 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
812 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
813 }
814 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
815 PUSHW(ssp, esp, sp_mask, ESP);
816 }
817 PUSHW(ssp, esp, sp_mask, compute_eflags());
818 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
819 PUSHW(ssp, esp, sp_mask, old_eip);
820 if (has_error_code) {
821 PUSHW(ssp, esp, sp_mask, error_code);
822 }
823 }
824
825 if (new_stack) {
826 if (env->eflags & VM_MASK) {
827 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
828 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
829 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
830 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
831 }
832 ss = (ss & ~3) | dpl;
833 cpu_x86_load_seg_cache(env, R_SS, ss,
834 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
835 }
836 SET_ESP(esp, sp_mask);
837
838 selector = (selector & ~3) | dpl;
839 cpu_x86_load_seg_cache(env, R_CS, selector,
840 get_seg_base(e1, e2),
841 get_seg_limit(e1, e2),
842 e2);
843 cpu_x86_set_cpl(env, dpl);
844 env->eip = offset;
845
846 /* interrupt gate clear IF mask */
847 if ((type & 1) == 0) {
848 env->eflags &= ~IF_MASK;
849 }
850 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
851}
852
853#ifdef VBOX
854
855/* check if VME interrupt redirection is enabled in TSS */
856static inline bool is_vme_irq_redirected(int intno)
857{
858 int io_offset, intredir_offset;
859 unsigned char val, mask;
860
861 /* TSS must be a valid 32 bit one */
862 if (!(env->tr.flags & DESC_P_MASK) ||
863 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
864 env->tr.limit < 103)
865 goto fail;
866 io_offset = lduw_kernel(env->tr.base + 0x66);
867 /* the virtual interrupt redirection bitmap is located below the io bitmap */
868 intredir_offset = io_offset - 0x20;
869
870 intredir_offset += (intno >> 3);
871 if ((intredir_offset) > env->tr.limit)
872 goto fail;
873
874 val = ldub_kernel(env->tr.base + intredir_offset);
875 mask = 1 << (unsigned char)(intno & 7);
876
877 /* bit set means no redirection. */
878 if ((val & mask) != 0) {
879 return false;
880 }
881 return true;
882
883fail:
884 raise_exception_err(EXCP0D_GPF, 0);
885 return true;
886}
887
888/* V86 mode software interrupt with CR4.VME=1 */
889static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
890{
891 target_ulong ptr, ssp;
892 int selector;
893 uint32_t offset, esp;
894 uint32_t old_cs, old_eflags;
895 uint32_t iopl;
896
897 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
898
899 if (!is_vme_irq_redirected(intno))
900 {
901 if (iopl == 3)
902 /* normal protected mode handler call */
903 return do_interrupt_protected(intno, 1, error_code, next_eip, 0);
904 else
905 raise_exception_err(EXCP0D_GPF, 0);
906 }
907
908 /* virtual mode idt is at linear address 0 */
909 ptr = 0 + intno * 4;
910 offset = lduw_kernel(ptr);
911 selector = lduw_kernel(ptr + 2);
912 esp = ESP;
913 ssp = env->segs[R_SS].base;
914 old_cs = env->segs[R_CS].selector;
915
916 old_eflags = compute_eflags();
917 if (iopl < 3)
918 {
919 /* copy VIF into IF and set IOPL to 3 */
920 if (env->eflags & VIF_MASK)
921 old_eflags |= IF_MASK;
922 else
923 old_eflags &= ~IF_MASK;
924
925 old_eflags |= (3 << IOPL_SHIFT);
926 }
927
928 /* XXX: use SS segment size ? */
929 PUSHW(ssp, esp, 0xffff, old_eflags);
930 PUSHW(ssp, esp, 0xffff, old_cs);
931 PUSHW(ssp, esp, 0xffff, next_eip);
932
933 /* update processor state */
934 ESP = (ESP & ~0xffff) | (esp & 0xffff);
935 env->eip = offset;
936 env->segs[R_CS].selector = selector;
937 env->segs[R_CS].base = (selector << 4);
938 env->eflags &= ~(TF_MASK | RF_MASK);
939
940 if (iopl < 3)
941 env->eflags &= ~VIF_MASK;
942 else
943 env->eflags &= ~IF_MASK;
944}
945#endif /* VBOX */
946
947#ifdef TARGET_X86_64
948
949#define PUSHQ(sp, val)\
950{\
951 sp -= 8;\
952 stq_kernel(sp, (val));\
953}
954
955#define POPQ(sp, val)\
956{\
957 val = ldq_kernel(sp);\
958 sp += 8;\
959}
960
961static inline target_ulong get_rsp_from_tss(int level)
962{
963 int index;
964
965#if 0
966 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
967 env->tr.base, env->tr.limit);
968#endif
969
970 if (!(env->tr.flags & DESC_P_MASK))
971 cpu_abort(env, "invalid tss");
972 index = 8 * level + 4;
973 if ((index + 7) > env->tr.limit)
974 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
975 return ldq_kernel(env->tr.base + index);
976}
977
978/* 64 bit interrupt */
979static void do_interrupt64(int intno, int is_int, int error_code,
980 target_ulong next_eip, int is_hw)
981{
982 SegmentCache *dt;
983 target_ulong ptr;
984 int type, dpl, selector, cpl, ist;
985 int has_error_code, new_stack;
986 uint32_t e1, e2, e3, ss;
987 target_ulong old_eip, esp, offset;
988
989#ifdef VBOX
990 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
991 cpu_loop_exit();
992#endif
993
994 has_error_code = 0;
995 if (!is_int && !is_hw) {
996 switch(intno) {
997 case 8:
998 case 10:
999 case 11:
1000 case 12:
1001 case 13:
1002 case 14:
1003 case 17:
1004 has_error_code = 1;
1005 break;
1006 }
1007 }
1008 if (is_int)
1009 old_eip = next_eip;
1010 else
1011 old_eip = env->eip;
1012
1013 dt = &env->idt;
1014 if (intno * 16 + 15 > dt->limit)
1015 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1016 ptr = dt->base + intno * 16;
1017 e1 = ldl_kernel(ptr);
1018 e2 = ldl_kernel(ptr + 4);
1019 e3 = ldl_kernel(ptr + 8);
1020 /* check gate type */
1021 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1022 switch(type) {
1023 case 14: /* 386 interrupt gate */
1024 case 15: /* 386 trap gate */
1025 break;
1026 default:
1027 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1028 break;
1029 }
1030 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1031 cpl = env->hflags & HF_CPL_MASK;
1032 /* check privledge if software int */
1033 if (is_int && dpl < cpl)
1034 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1035 /* check valid bit */
1036 if (!(e2 & DESC_P_MASK))
1037 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1038 selector = e1 >> 16;
1039 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1040 ist = e2 & 7;
1041 if ((selector & 0xfffc) == 0)
1042 raise_exception_err(EXCP0D_GPF, 0);
1043
1044 if (load_segment(&e1, &e2, selector) != 0)
1045 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1046 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1047 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1048 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1049 if (dpl > cpl)
1050 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1051 if (!(e2 & DESC_P_MASK))
1052 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1053 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1054 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1055 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1056 /* to inner priviledge */
1057 if (ist != 0)
1058 esp = get_rsp_from_tss(ist + 3);
1059 else
1060 esp = get_rsp_from_tss(dpl);
1061 esp &= ~0xfLL; /* align stack */
1062 ss = 0;
1063 new_stack = 1;
1064 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1065 /* to same priviledge */
1066 if (env->eflags & VM_MASK)
1067 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1068 new_stack = 0;
1069 if (ist != 0)
1070 esp = get_rsp_from_tss(ist + 3);
1071 else
1072 esp = ESP;
1073 esp &= ~0xfLL; /* align stack */
1074 dpl = cpl;
1075 } else {
1076 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1077 new_stack = 0; /* avoid warning */
1078 esp = 0; /* avoid warning */
1079 }
1080
1081 PUSHQ(esp, env->segs[R_SS].selector);
1082 PUSHQ(esp, ESP);
1083 PUSHQ(esp, compute_eflags());
1084 PUSHQ(esp, env->segs[R_CS].selector);
1085 PUSHQ(esp, old_eip);
1086 if (has_error_code) {
1087 PUSHQ(esp, error_code);
1088 }
1089
1090 if (new_stack) {
1091 ss = 0 | dpl;
1092 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1093 }
1094 ESP = esp;
1095
1096 selector = (selector & ~3) | dpl;
1097 cpu_x86_load_seg_cache(env, R_CS, selector,
1098 get_seg_base(e1, e2),
1099 get_seg_limit(e1, e2),
1100 e2);
1101 cpu_x86_set_cpl(env, dpl);
1102 env->eip = offset;
1103
1104 /* interrupt gate clear IF mask */
1105 if ((type & 1) == 0) {
1106 env->eflags &= ~IF_MASK;
1107 }
1108 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1109}
1110#endif
1111
1112void helper_syscall(int next_eip_addend)
1113{
1114 int selector;
1115
1116 if (!(env->efer & MSR_EFER_SCE)) {
1117 raise_exception_err(EXCP06_ILLOP, 0);
1118 }
1119 selector = (env->star >> 32) & 0xffff;
1120#ifdef TARGET_X86_64
1121 if (env->hflags & HF_LMA_MASK) {
1122 int code64;
1123
1124 ECX = env->eip + next_eip_addend;
1125 env->regs[11] = compute_eflags();
1126
1127 code64 = env->hflags & HF_CS64_MASK;
1128
1129 cpu_x86_set_cpl(env, 0);
1130 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1131 0, 0xffffffff,
1132 DESC_G_MASK | DESC_P_MASK |
1133 DESC_S_MASK |
1134 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1135 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1136 0, 0xffffffff,
1137 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1138 DESC_S_MASK |
1139 DESC_W_MASK | DESC_A_MASK);
1140 env->eflags &= ~env->fmask;
1141 load_eflags(env->eflags, 0);
1142 if (code64)
1143 env->eip = env->lstar;
1144 else
1145 env->eip = env->cstar;
1146 } else
1147#endif
1148 {
1149 ECX = (uint32_t)(env->eip + next_eip_addend);
1150
1151 cpu_x86_set_cpl(env, 0);
1152 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1153 0, 0xffffffff,
1154 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1155 DESC_S_MASK |
1156 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1157 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1158 0, 0xffffffff,
1159 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1160 DESC_S_MASK |
1161 DESC_W_MASK | DESC_A_MASK);
1162 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1163 env->eip = (uint32_t)env->star;
1164 }
1165}
1166
1167void helper_sysret(int dflag)
1168{
1169 int cpl, selector;
1170
1171 if (!(env->efer & MSR_EFER_SCE)) {
1172 raise_exception_err(EXCP06_ILLOP, 0);
1173 }
1174 cpl = env->hflags & HF_CPL_MASK;
1175 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1176 raise_exception_err(EXCP0D_GPF, 0);
1177 }
1178 selector = (env->star >> 48) & 0xffff;
1179#ifdef TARGET_X86_64
1180 if (env->hflags & HF_LMA_MASK) {
1181 if (dflag == 2) {
1182 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1183 0, 0xffffffff,
1184 DESC_G_MASK | DESC_P_MASK |
1185 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1186 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1187 DESC_L_MASK);
1188 env->eip = ECX;
1189 } else {
1190 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1191 0, 0xffffffff,
1192 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1193 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1194 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1195 env->eip = (uint32_t)ECX;
1196 }
1197 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1198 0, 0xffffffff,
1199 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1200 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1201 DESC_W_MASK | DESC_A_MASK);
1202 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1203 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1204 cpu_x86_set_cpl(env, 3);
1205 } else
1206#endif
1207 {
1208 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1209 0, 0xffffffff,
1210 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1211 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1212 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1213 env->eip = (uint32_t)ECX;
1214 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1215 0, 0xffffffff,
1216 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1217 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1218 DESC_W_MASK | DESC_A_MASK);
1219 env->eflags |= IF_MASK;
1220 cpu_x86_set_cpl(env, 3);
1221 }
1222#ifdef USE_KQEMU
1223 if (kqemu_is_ok(env)) {
1224 if (env->hflags & HF_LMA_MASK)
1225 CC_OP = CC_OP_EFLAGS;
1226 env->exception_index = -1;
1227 cpu_loop_exit();
1228 }
1229#endif
1230}
1231
1232#ifdef VBOX
1233/**
1234 * Checks and processes external VMM events.
1235 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1236 */
1237void helper_external_event(void)
1238{
1239#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1240 uintptr_t uESP;
1241 __asm__ __volatile__("movl %%esp, %0" : "=r" (uESP));
1242 AssertMsg(!(uESP & 15), ("esp=%#p\n", uESP));
1243#endif
1244 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1245 {
1246 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_HARD);
1247 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1248 }
1249 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1250 {
1251 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_EXIT);
1252 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1253 }
1254 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1255 {
1256 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_DMA);
1257 remR3DmaRun(env);
1258 }
1259 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1260 {
1261 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
1262 remR3TimersRun(env);
1263 }
1264}
1265/* helper for recording call instruction addresses for later scanning */
1266void helper_record_call()
1267{
1268 if ( !(env->state & CPU_RAW_RING0)
1269 && (env->cr[0] & CR0_PG_MASK)
1270 && !(env->eflags & X86_EFL_IF))
1271 remR3RecordCall(env);
1272}
1273#endif /* VBOX */
1274
1275/* real mode interrupt */
1276static void do_interrupt_real(int intno, int is_int, int error_code,
1277 unsigned int next_eip)
1278{
1279 SegmentCache *dt;
1280 target_ulong ptr, ssp;
1281 int selector;
1282 uint32_t offset, esp;
1283 uint32_t old_cs, old_eip;
1284
1285 /* real mode (simpler !) */
1286 dt = &env->idt;
1287 if (intno * 4 + 3 > dt->limit)
1288 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1289 ptr = dt->base + intno * 4;
1290 offset = lduw_kernel(ptr);
1291 selector = lduw_kernel(ptr + 2);
1292 esp = ESP;
1293 ssp = env->segs[R_SS].base;
1294 if (is_int)
1295 old_eip = next_eip;
1296 else
1297 old_eip = env->eip;
1298 old_cs = env->segs[R_CS].selector;
1299 /* XXX: use SS segment size ? */
1300 PUSHW(ssp, esp, 0xffff, compute_eflags());
1301 PUSHW(ssp, esp, 0xffff, old_cs);
1302 PUSHW(ssp, esp, 0xffff, old_eip);
1303
1304 /* update processor state */
1305 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1306 env->eip = offset;
1307 env->segs[R_CS].selector = selector;
1308 env->segs[R_CS].base = (selector << 4);
1309 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1310}
1311
1312/* fake user mode interrupt */
1313void do_interrupt_user(int intno, int is_int, int error_code,
1314 target_ulong next_eip)
1315{
1316 SegmentCache *dt;
1317 target_ulong ptr;
1318 int dpl, cpl;
1319 uint32_t e2;
1320
1321 dt = &env->idt;
1322 ptr = dt->base + (intno * 8);
1323 e2 = ldl_kernel(ptr + 4);
1324
1325 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1326 cpl = env->hflags & HF_CPL_MASK;
1327 /* check privledge if software int */
1328 if (is_int && dpl < cpl)
1329 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1330
1331 /* Since we emulate only user space, we cannot do more than
1332 exiting the emulation with the suitable exception and error
1333 code */
1334 if (is_int)
1335 EIP = next_eip;
1336}
1337
1338/*
1339 * Begin execution of an interruption. is_int is TRUE if coming from
1340 * the int instruction. next_eip is the EIP value AFTER the interrupt
1341 * instruction. It is only relevant if is_int is TRUE.
1342 */
1343void do_interrupt(int intno, int is_int, int error_code,
1344 target_ulong next_eip, int is_hw)
1345{
1346 if (loglevel & CPU_LOG_INT) {
1347 if ((env->cr[0] & CR0_PE_MASK)) {
1348 static int count;
1349 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1350 count, intno, error_code, is_int,
1351 env->hflags & HF_CPL_MASK,
1352 env->segs[R_CS].selector, EIP,
1353 (int)env->segs[R_CS].base + EIP,
1354 env->segs[R_SS].selector, ESP);
1355 if (intno == 0x0e) {
1356 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1357 } else {
1358 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1359 }
1360 fprintf(logfile, "\n");
1361 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1362#if 0
1363 {
1364 int i;
1365 uint8_t *ptr;
1366 fprintf(logfile, " code=");
1367 ptr = env->segs[R_CS].base + env->eip;
1368 for(i = 0; i < 16; i++) {
1369 fprintf(logfile, " %02x", ldub(ptr + i));
1370 }
1371 fprintf(logfile, "\n");
1372 }
1373#endif
1374 count++;
1375 }
1376 }
1377 if (env->cr[0] & CR0_PE_MASK) {
1378#ifdef TARGET_X86_64
1379 if (env->hflags & HF_LMA_MASK) {
1380 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1381 } else
1382#endif
1383 {
1384#ifdef VBOX
1385 /* int xx *, v86 code and VME enabled? */
1386 if ( (env->eflags & VM_MASK)
1387 && (env->cr[4] & CR4_VME_MASK)
1388 && is_int
1389 && !is_hw
1390 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1391 )
1392 do_soft_interrupt_vme(intno, error_code, next_eip);
1393 else
1394#endif /* VBOX */
1395 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1396 }
1397 } else {
1398 do_interrupt_real(intno, is_int, error_code, next_eip);
1399 }
1400}
1401
1402/*
1403 * Signal an interruption. It is executed in the main CPU loop.
1404 * is_int is TRUE if coming from the int instruction. next_eip is the
1405 * EIP value AFTER the interrupt instruction. It is only relevant if
1406 * is_int is TRUE.
1407 */
1408void raise_interrupt(int intno, int is_int, int error_code,
1409 int next_eip_addend)
1410{
1411#if defined(VBOX) && defined(DEBUG)
1412 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1413#endif
1414 env->exception_index = intno;
1415 env->error_code = error_code;
1416 env->exception_is_int = is_int;
1417 env->exception_next_eip = env->eip + next_eip_addend;
1418 cpu_loop_exit();
1419}
1420
1421/* same as raise_exception_err, but do not restore global registers */
1422static void raise_exception_err_norestore(int exception_index, int error_code)
1423{
1424 env->exception_index = exception_index;
1425 env->error_code = error_code;
1426 env->exception_is_int = 0;
1427 env->exception_next_eip = 0;
1428 longjmp(env->jmp_env, 1);
1429}
1430
1431/* shortcuts to generate exceptions */
1432
1433void (raise_exception_err)(int exception_index, int error_code)
1434{
1435 raise_interrupt(exception_index, 0, error_code, 0);
1436}
1437
1438void raise_exception(int exception_index)
1439{
1440 raise_interrupt(exception_index, 0, 0, 0);
1441}
1442
1443/* SMM support */
1444
1445#if defined(CONFIG_USER_ONLY)
1446
1447void do_smm_enter(void)
1448{
1449}
1450
1451void helper_rsm(void)
1452{
1453}
1454
1455#else
1456
1457#ifdef TARGET_X86_64
1458#define SMM_REVISION_ID 0x00020064
1459#else
1460#define SMM_REVISION_ID 0x00020000
1461#endif
1462
1463void do_smm_enter(void)
1464{
1465#ifdef VBOX
1466 cpu_abort(env, "do_ssm_enter");
1467#else /* !VBOX */
1468 target_ulong sm_state;
1469 SegmentCache *dt;
1470 int i, offset;
1471
1472 if (loglevel & CPU_LOG_INT) {
1473 fprintf(logfile, "SMM: enter\n");
1474 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1475 }
1476
1477 env->hflags |= HF_SMM_MASK;
1478 cpu_smm_update(env);
1479
1480 sm_state = env->smbase + 0x8000;
1481
1482#ifdef TARGET_X86_64
1483 for(i = 0; i < 6; i++) {
1484 dt = &env->segs[i];
1485 offset = 0x7e00 + i * 16;
1486 stw_phys(sm_state + offset, dt->selector);
1487 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1488 stl_phys(sm_state + offset + 4, dt->limit);
1489 stq_phys(sm_state + offset + 8, dt->base);
1490 }
1491
1492 stq_phys(sm_state + 0x7e68, env->gdt.base);
1493 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1494
1495 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1496 stq_phys(sm_state + 0x7e78, env->ldt.base);
1497 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1498 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1499
1500 stq_phys(sm_state + 0x7e88, env->idt.base);
1501 stl_phys(sm_state + 0x7e84, env->idt.limit);
1502
1503 stw_phys(sm_state + 0x7e90, env->tr.selector);
1504 stq_phys(sm_state + 0x7e98, env->tr.base);
1505 stl_phys(sm_state + 0x7e94, env->tr.limit);
1506 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1507
1508 stq_phys(sm_state + 0x7ed0, env->efer);
1509
1510 stq_phys(sm_state + 0x7ff8, EAX);
1511 stq_phys(sm_state + 0x7ff0, ECX);
1512 stq_phys(sm_state + 0x7fe8, EDX);
1513 stq_phys(sm_state + 0x7fe0, EBX);
1514 stq_phys(sm_state + 0x7fd8, ESP);
1515 stq_phys(sm_state + 0x7fd0, EBP);
1516 stq_phys(sm_state + 0x7fc8, ESI);
1517 stq_phys(sm_state + 0x7fc0, EDI);
1518 for(i = 8; i < 16; i++)
1519 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1520 stq_phys(sm_state + 0x7f78, env->eip);
1521 stl_phys(sm_state + 0x7f70, compute_eflags());
1522 stl_phys(sm_state + 0x7f68, env->dr[6]);
1523 stl_phys(sm_state + 0x7f60, env->dr[7]);
1524
1525 stl_phys(sm_state + 0x7f48, env->cr[4]);
1526 stl_phys(sm_state + 0x7f50, env->cr[3]);
1527 stl_phys(sm_state + 0x7f58, env->cr[0]);
1528
1529 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1530 stl_phys(sm_state + 0x7f00, env->smbase);
1531#else
1532 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1533 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1534 stl_phys(sm_state + 0x7ff4, compute_eflags());
1535 stl_phys(sm_state + 0x7ff0, env->eip);
1536 stl_phys(sm_state + 0x7fec, EDI);
1537 stl_phys(sm_state + 0x7fe8, ESI);
1538 stl_phys(sm_state + 0x7fe4, EBP);
1539 stl_phys(sm_state + 0x7fe0, ESP);
1540 stl_phys(sm_state + 0x7fdc, EBX);
1541 stl_phys(sm_state + 0x7fd8, EDX);
1542 stl_phys(sm_state + 0x7fd4, ECX);
1543 stl_phys(sm_state + 0x7fd0, EAX);
1544 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1545 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1546
1547 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1548 stl_phys(sm_state + 0x7f64, env->tr.base);
1549 stl_phys(sm_state + 0x7f60, env->tr.limit);
1550 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1551
1552 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1553 stl_phys(sm_state + 0x7f80, env->ldt.base);
1554 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1555 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1556
1557 stl_phys(sm_state + 0x7f74, env->gdt.base);
1558 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1559
1560 stl_phys(sm_state + 0x7f58, env->idt.base);
1561 stl_phys(sm_state + 0x7f54, env->idt.limit);
1562
1563 for(i = 0; i < 6; i++) {
1564 dt = &env->segs[i];
1565 if (i < 3)
1566 offset = 0x7f84 + i * 12;
1567 else
1568 offset = 0x7f2c + (i - 3) * 12;
1569 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1570 stl_phys(sm_state + offset + 8, dt->base);
1571 stl_phys(sm_state + offset + 4, dt->limit);
1572 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1573 }
1574 stl_phys(sm_state + 0x7f14, env->cr[4]);
1575
1576 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1577 stl_phys(sm_state + 0x7ef8, env->smbase);
1578#endif
1579 /* init SMM cpu state */
1580
1581#ifdef TARGET_X86_64
1582 env->efer = 0;
1583 env->hflags &= ~HF_LMA_MASK;
1584#endif
1585 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1586 env->eip = 0x00008000;
1587 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1588 0xffffffff, 0);
1589 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1590 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1591 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1592 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1593 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1594
1595 cpu_x86_update_cr0(env,
1596 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1597 cpu_x86_update_cr4(env, 0);
1598 env->dr[7] = 0x00000400;
1599 CC_OP = CC_OP_EFLAGS;
1600#endif /* VBOX */
1601}
1602
1603void helper_rsm(void)
1604{
1605#ifdef VBOX
1606 cpu_abort(env, "helper_rsm");
1607#else /* !VBOX */
1608 target_ulong sm_state;
1609 int i, offset;
1610 uint32_t val;
1611
1612 sm_state = env->smbase + 0x8000;
1613#ifdef TARGET_X86_64
1614 env->efer = ldq_phys(sm_state + 0x7ed0);
1615 if (env->efer & MSR_EFER_LMA)
1616 env->hflags |= HF_LMA_MASK;
1617 else
1618 env->hflags &= ~HF_LMA_MASK;
1619
1620 for(i = 0; i < 6; i++) {
1621 offset = 0x7e00 + i * 16;
1622 cpu_x86_load_seg_cache(env, i,
1623 lduw_phys(sm_state + offset),
1624 ldq_phys(sm_state + offset + 8),
1625 ldl_phys(sm_state + offset + 4),
1626 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1627 }
1628
1629 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1630 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1631
1632 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1633 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1634 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1635 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1636
1637 env->idt.base = ldq_phys(sm_state + 0x7e88);
1638 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1639
1640 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1641 env->tr.base = ldq_phys(sm_state + 0x7e98);
1642 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1643 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1644
1645 EAX = ldq_phys(sm_state + 0x7ff8);
1646 ECX = ldq_phys(sm_state + 0x7ff0);
1647 EDX = ldq_phys(sm_state + 0x7fe8);
1648 EBX = ldq_phys(sm_state + 0x7fe0);
1649 ESP = ldq_phys(sm_state + 0x7fd8);
1650 EBP = ldq_phys(sm_state + 0x7fd0);
1651 ESI = ldq_phys(sm_state + 0x7fc8);
1652 EDI = ldq_phys(sm_state + 0x7fc0);
1653 for(i = 8; i < 16; i++)
1654 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1655 env->eip = ldq_phys(sm_state + 0x7f78);
1656 load_eflags(ldl_phys(sm_state + 0x7f70),
1657 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1658 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1659 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1660
1661 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1662 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1663 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1664
1665 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1666 if (val & 0x20000) {
1667 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1668 }
1669#else
1670 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1671 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1672 load_eflags(ldl_phys(sm_state + 0x7ff4),
1673 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1674 env->eip = ldl_phys(sm_state + 0x7ff0);
1675 EDI = ldl_phys(sm_state + 0x7fec);
1676 ESI = ldl_phys(sm_state + 0x7fe8);
1677 EBP = ldl_phys(sm_state + 0x7fe4);
1678 ESP = ldl_phys(sm_state + 0x7fe0);
1679 EBX = ldl_phys(sm_state + 0x7fdc);
1680 EDX = ldl_phys(sm_state + 0x7fd8);
1681 ECX = ldl_phys(sm_state + 0x7fd4);
1682 EAX = ldl_phys(sm_state + 0x7fd0);
1683 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1684 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1685
1686 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1687 env->tr.base = ldl_phys(sm_state + 0x7f64);
1688 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1689 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1690
1691 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1692 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1693 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1694 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1695
1696 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1697 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1698
1699 env->idt.base = ldl_phys(sm_state + 0x7f58);
1700 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1701
1702 for(i = 0; i < 6; i++) {
1703 if (i < 3)
1704 offset = 0x7f84 + i * 12;
1705 else
1706 offset = 0x7f2c + (i - 3) * 12;
1707 cpu_x86_load_seg_cache(env, i,
1708 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1709 ldl_phys(sm_state + offset + 8),
1710 ldl_phys(sm_state + offset + 4),
1711 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1712 }
1713 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1714
1715 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1716 if (val & 0x20000) {
1717 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1718 }
1719#endif
1720 CC_OP = CC_OP_EFLAGS;
1721 env->hflags &= ~HF_SMM_MASK;
1722 cpu_smm_update(env);
1723
1724 if (loglevel & CPU_LOG_INT) {
1725 fprintf(logfile, "SMM: after RSM\n");
1726 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1727 }
1728#endif /* !VBOX */
1729}
1730
1731#endif /* !CONFIG_USER_ONLY */
1732
1733
1734#ifdef BUGGY_GCC_DIV64
1735/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1736 call it from another function */
1737uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1738{
1739 *q_ptr = num / den;
1740 return num % den;
1741}
1742
1743int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1744{
1745 *q_ptr = num / den;
1746 return num % den;
1747}
1748#endif
1749
1750void helper_divl_EAX_T0(void)
1751{
1752 unsigned int den, r;
1753 uint64_t num, q;
1754
1755 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1756 den = T0;
1757 if (den == 0) {
1758 raise_exception(EXCP00_DIVZ);
1759 }
1760#ifdef BUGGY_GCC_DIV64
1761 r = div32(&q, num, den);
1762#else
1763 q = (num / den);
1764 r = (num % den);
1765#endif
1766 if (q > 0xffffffff)
1767 raise_exception(EXCP00_DIVZ);
1768 EAX = (uint32_t)q;
1769 EDX = (uint32_t)r;
1770}
1771
1772void helper_idivl_EAX_T0(void)
1773{
1774 int den, r;
1775 int64_t num, q;
1776
1777 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1778 den = T0;
1779 if (den == 0) {
1780 raise_exception(EXCP00_DIVZ);
1781 }
1782#ifdef BUGGY_GCC_DIV64
1783 r = idiv32(&q, num, den);
1784#else
1785 q = (num / den);
1786 r = (num % den);
1787#endif
1788 if (q != (int32_t)q)
1789 raise_exception(EXCP00_DIVZ);
1790 EAX = (uint32_t)q;
1791 EDX = (uint32_t)r;
1792}
1793
1794void helper_cmpxchg8b(void)
1795{
1796 uint64_t d;
1797 int eflags;
1798
1799 eflags = cc_table[CC_OP].compute_all();
1800 d = ldq(A0);
1801 if (d == (((uint64_t)EDX << 32) | EAX)) {
1802 stq(A0, ((uint64_t)ECX << 32) | EBX);
1803 eflags |= CC_Z;
1804 } else {
1805 /* always do the store */
1806 stq(A0, d);
1807 EDX = (uint32_t)(d >> 32);
1808 EAX = (uint32_t)d;
1809 eflags &= ~CC_Z;
1810 }
1811 CC_SRC = eflags;
1812}
1813
1814void helper_single_step()
1815{
1816 env->dr[6] |= 0x4000;
1817 raise_exception(EXCP01_SSTP);
1818}
1819
1820void helper_cpuid(void)
1821{
1822#ifndef VBOX
1823 uint32_t index;
1824 index = (uint32_t)EAX;
1825
1826 /* test if maximum index reached */
1827 if (index & 0x80000000) {
1828 if (index > env->cpuid_xlevel)
1829 index = env->cpuid_level;
1830 } else {
1831 if (index > env->cpuid_level)
1832 index = env->cpuid_level;
1833 }
1834
1835 switch(index) {
1836 case 0:
1837 EAX = env->cpuid_level;
1838 EBX = env->cpuid_vendor1;
1839 EDX = env->cpuid_vendor2;
1840 ECX = env->cpuid_vendor3;
1841 break;
1842 case 1:
1843 EAX = env->cpuid_version;
1844 EBX = 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1845 ECX = env->cpuid_ext_features;
1846 EDX = env->cpuid_features;
1847 break;
1848 case 2:
1849 /* cache info: needed for Pentium Pro compatibility */
1850 EAX = 0x410601;
1851 EBX = 0;
1852 ECX = 0;
1853 EDX = 0;
1854 break;
1855 case 0x80000000:
1856 EAX = env->cpuid_xlevel;
1857 EBX = env->cpuid_vendor1;
1858 EDX = env->cpuid_vendor2;
1859 ECX = env->cpuid_vendor3;
1860 break;
1861 case 0x80000001:
1862 EAX = env->cpuid_features;
1863 EBX = 0;
1864 ECX = 0;
1865 EDX = env->cpuid_ext2_features;
1866 break;
1867 case 0x80000002:
1868 case 0x80000003:
1869 case 0x80000004:
1870 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1871 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1872 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1873 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1874 break;
1875 case 0x80000005:
1876 /* cache info (L1 cache) */
1877 EAX = 0x01ff01ff;
1878 EBX = 0x01ff01ff;
1879 ECX = 0x40020140;
1880 EDX = 0x40020140;
1881 break;
1882 case 0x80000006:
1883 /* cache info (L2 cache) */
1884 EAX = 0;
1885 EBX = 0x42004200;
1886 ECX = 0x02008140;
1887 EDX = 0;
1888 break;
1889 case 0x80000008:
1890 /* virtual & phys address size in low 2 bytes. */
1891 EAX = 0x00003028;
1892 EBX = 0;
1893 ECX = 0;
1894 EDX = 0;
1895 break;
1896 default:
1897 /* reserved values: zero */
1898 EAX = 0;
1899 EBX = 0;
1900 ECX = 0;
1901 EDX = 0;
1902 break;
1903 }
1904#else /* VBOX */
1905 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
1906#endif /* VBOX */
1907}
1908
1909void helper_enter_level(int level, int data32)
1910{
1911 target_ulong ssp;
1912 uint32_t esp_mask, esp, ebp;
1913
1914 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1915 ssp = env->segs[R_SS].base;
1916 ebp = EBP;
1917 esp = ESP;
1918 if (data32) {
1919 /* 32 bit */
1920 esp -= 4;
1921 while (--level) {
1922 esp -= 4;
1923 ebp -= 4;
1924 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1925 }
1926 esp -= 4;
1927 stl(ssp + (esp & esp_mask), T1);
1928 } else {
1929 /* 16 bit */
1930 esp -= 2;
1931 while (--level) {
1932 esp -= 2;
1933 ebp -= 2;
1934 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1935 }
1936 esp -= 2;
1937 stw(ssp + (esp & esp_mask), T1);
1938 }
1939}
1940
1941#ifdef TARGET_X86_64
1942void helper_enter64_level(int level, int data64)
1943{
1944 target_ulong esp, ebp;
1945 ebp = EBP;
1946 esp = ESP;
1947
1948 if (data64) {
1949 /* 64 bit */
1950 esp -= 8;
1951 while (--level) {
1952 esp -= 8;
1953 ebp -= 8;
1954 stq(esp, ldq(ebp));
1955 }
1956 esp -= 8;
1957 stq(esp, T1);
1958 } else {
1959 /* 16 bit */
1960 esp -= 2;
1961 while (--level) {
1962 esp -= 2;
1963 ebp -= 2;
1964 stw(esp, lduw(ebp));
1965 }
1966 esp -= 2;
1967 stw(esp, T1);
1968 }
1969}
1970#endif
1971
1972void helper_lldt_T0(void)
1973{
1974 int selector;
1975 SegmentCache *dt;
1976 uint32_t e1, e2;
1977 int index, entry_limit;
1978 target_ulong ptr;
1979#ifdef VBOX
1980 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
1981 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(T0 & 0xffff)));
1982#endif
1983
1984 selector = T0 & 0xffff;
1985 if ((selector & 0xfffc) == 0) {
1986 /* XXX: NULL selector case: invalid LDT */
1987 env->ldt.base = 0;
1988 env->ldt.limit = 0;
1989 } else {
1990 if (selector & 0x4)
1991 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1992 dt = &env->gdt;
1993 index = selector & ~7;
1994#ifdef TARGET_X86_64
1995 if (env->hflags & HF_LMA_MASK)
1996 entry_limit = 15;
1997 else
1998#endif
1999 entry_limit = 7;
2000 if ((index + entry_limit) > dt->limit)
2001 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2002 ptr = dt->base + index;
2003 e1 = ldl_kernel(ptr);
2004 e2 = ldl_kernel(ptr + 4);
2005 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2006 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2007 if (!(e2 & DESC_P_MASK))
2008 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2009#ifdef TARGET_X86_64
2010 if (env->hflags & HF_LMA_MASK) {
2011 uint32_t e3;
2012 e3 = ldl_kernel(ptr + 8);
2013 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2014 env->ldt.base |= (target_ulong)e3 << 32;
2015 } else
2016#endif
2017 {
2018 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2019 }
2020 }
2021 env->ldt.selector = selector;
2022#ifdef VBOX
2023 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2024 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2025#endif
2026}
2027
2028void helper_ltr_T0(void)
2029{
2030 int selector;
2031 SegmentCache *dt;
2032 uint32_t e1, e2;
2033 int index, type, entry_limit;
2034 target_ulong ptr;
2035
2036#ifdef VBOX
2037 Log(("helper_ltr_T0: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2038 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2039 env->tr.flags, (RTSEL)(T0 & 0xffff)));
2040#endif
2041
2042 selector = T0 & 0xffff;
2043 if ((selector & 0xfffc) == 0) {
2044 /* NULL selector case: invalid TR */
2045 env->tr.base = 0;
2046 env->tr.limit = 0;
2047 env->tr.flags = 0;
2048 } else {
2049 if (selector & 0x4)
2050 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2051 dt = &env->gdt;
2052 index = selector & ~7;
2053#ifdef TARGET_X86_64
2054 if (env->hflags & HF_LMA_MASK)
2055 entry_limit = 15;
2056 else
2057#endif
2058 entry_limit = 7;
2059 if ((index + entry_limit) > dt->limit)
2060 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2061 ptr = dt->base + index;
2062 e1 = ldl_kernel(ptr);
2063 e2 = ldl_kernel(ptr + 4);
2064 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2065 if ((e2 & DESC_S_MASK) ||
2066 (type != 1 && type != 9))
2067 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2068 if (!(e2 & DESC_P_MASK))
2069 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2070#ifdef TARGET_X86_64
2071 if (env->hflags & HF_LMA_MASK) {
2072 uint32_t e3;
2073 e3 = ldl_kernel(ptr + 8);
2074 load_seg_cache_raw_dt(&env->tr, e1, e2);
2075 env->tr.base |= (target_ulong)e3 << 32;
2076 } else
2077#endif
2078 {
2079 load_seg_cache_raw_dt(&env->tr, e1, e2);
2080 }
2081 e2 |= DESC_TSS_BUSY_MASK;
2082 stl_kernel(ptr + 4, e2);
2083 }
2084 env->tr.selector = selector;
2085#ifdef VBOX
2086 Log(("helper_ltr_T0: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2087 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2088 env->tr.flags, (RTSEL)(T0 & 0xffff)));
2089#endif
2090}
2091
2092/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2093void load_seg(int seg_reg, int selector)
2094{
2095 uint32_t e1, e2;
2096 int cpl, dpl, rpl;
2097 SegmentCache *dt;
2098 int index;
2099 target_ulong ptr;
2100
2101 selector &= 0xffff;
2102 cpl = env->hflags & HF_CPL_MASK;
2103
2104#ifdef VBOX
2105 /* Trying to load a selector with CPL=1? */
2106 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2107 {
2108 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2109 selector = selector & 0xfffc;
2110 }
2111#endif
2112
2113 if ((selector & 0xfffc) == 0) {
2114 /* null selector case */
2115 if (seg_reg == R_SS
2116#ifdef TARGET_X86_64
2117 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2118#endif
2119 )
2120 raise_exception_err(EXCP0D_GPF, 0);
2121 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2122 } else {
2123
2124 if (selector & 0x4)
2125 dt = &env->ldt;
2126 else
2127 dt = &env->gdt;
2128 index = selector & ~7;
2129 if ((index + 7) > dt->limit)
2130 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2131 ptr = dt->base + index;
2132 e1 = ldl_kernel(ptr);
2133 e2 = ldl_kernel(ptr + 4);
2134
2135 if (!(e2 & DESC_S_MASK))
2136 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2137 rpl = selector & 3;
2138 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2139 if (seg_reg == R_SS) {
2140 /* must be writable segment */
2141 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2142 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2143 if (rpl != cpl || dpl != cpl)
2144 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2145 } else {
2146 /* must be readable segment */
2147 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2148 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2149
2150 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2151 /* if not conforming code, test rights */
2152 if (dpl < cpl || dpl < rpl)
2153 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2154 }
2155 }
2156
2157 if (!(e2 & DESC_P_MASK)) {
2158 if (seg_reg == R_SS)
2159 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2160 else
2161 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2162 }
2163
2164 /* set the access bit if not already set */
2165 if (!(e2 & DESC_A_MASK)) {
2166 e2 |= DESC_A_MASK;
2167 stl_kernel(ptr + 4, e2);
2168 }
2169
2170 cpu_x86_load_seg_cache(env, seg_reg, selector,
2171 get_seg_base(e1, e2),
2172 get_seg_limit(e1, e2),
2173 e2);
2174#if 0
2175 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2176 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2177#endif
2178 }
2179}
2180
2181/* protected mode jump */
2182void helper_ljmp_protected_T0_T1(int next_eip_addend)
2183{
2184 int new_cs, gate_cs, type;
2185 uint32_t e1, e2, cpl, dpl, rpl, limit;
2186 target_ulong new_eip, next_eip;
2187
2188 new_cs = T0;
2189 new_eip = T1;
2190 if ((new_cs & 0xfffc) == 0)
2191 raise_exception_err(EXCP0D_GPF, 0);
2192 if (load_segment(&e1, &e2, new_cs) != 0)
2193 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2194 cpl = env->hflags & HF_CPL_MASK;
2195 if (e2 & DESC_S_MASK) {
2196 if (!(e2 & DESC_CS_MASK))
2197 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2198 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2199 if (e2 & DESC_C_MASK) {
2200 /* conforming code segment */
2201 if (dpl > cpl)
2202 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2203 } else {
2204 /* non conforming code segment */
2205 rpl = new_cs & 3;
2206 if (rpl > cpl)
2207 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2208 if (dpl != cpl)
2209 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2210 }
2211 if (!(e2 & DESC_P_MASK))
2212 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2213 limit = get_seg_limit(e1, e2);
2214 if (new_eip > limit &&
2215 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2216 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2217 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2218 get_seg_base(e1, e2), limit, e2);
2219 EIP = new_eip;
2220 } else {
2221 /* jump to call or task gate */
2222 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2223 rpl = new_cs & 3;
2224 cpl = env->hflags & HF_CPL_MASK;
2225 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2226 switch(type) {
2227 case 1: /* 286 TSS */
2228 case 9: /* 386 TSS */
2229 case 5: /* task gate */
2230 if (dpl < cpl || dpl < rpl)
2231 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2232 next_eip = env->eip + next_eip_addend;
2233 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2234 CC_OP = CC_OP_EFLAGS;
2235 break;
2236 case 4: /* 286 call gate */
2237 case 12: /* 386 call gate */
2238 if ((dpl < cpl) || (dpl < rpl))
2239 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2240 if (!(e2 & DESC_P_MASK))
2241 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2242 gate_cs = e1 >> 16;
2243 new_eip = (e1 & 0xffff);
2244 if (type == 12)
2245 new_eip |= (e2 & 0xffff0000);
2246 if (load_segment(&e1, &e2, gate_cs) != 0)
2247 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2248 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2249 /* must be code segment */
2250 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2251 (DESC_S_MASK | DESC_CS_MASK)))
2252 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2253 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2254 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2255 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2256 if (!(e2 & DESC_P_MASK))
2257#ifdef VBOX /* See page 3-514 of 253666.pdf */
2258 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2259#else
2260 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2261#endif
2262 limit = get_seg_limit(e1, e2);
2263 if (new_eip > limit)
2264 raise_exception_err(EXCP0D_GPF, 0);
2265 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2266 get_seg_base(e1, e2), limit, e2);
2267 EIP = new_eip;
2268 break;
2269 default:
2270 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2271 break;
2272 }
2273 }
2274}
2275
2276/* real mode call */
2277void helper_lcall_real_T0_T1(int shift, int next_eip)
2278{
2279 int new_cs, new_eip;
2280 uint32_t esp, esp_mask;
2281 target_ulong ssp;
2282
2283 new_cs = T0;
2284 new_eip = T1;
2285 esp = ESP;
2286 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2287 ssp = env->segs[R_SS].base;
2288 if (shift) {
2289 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2290 PUSHL(ssp, esp, esp_mask, next_eip);
2291 } else {
2292 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2293 PUSHW(ssp, esp, esp_mask, next_eip);
2294 }
2295
2296 SET_ESP(esp, esp_mask);
2297 env->eip = new_eip;
2298 env->segs[R_CS].selector = new_cs;
2299 env->segs[R_CS].base = (new_cs << 4);
2300}
2301
2302/* protected mode call */
2303void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2304{
2305 int new_cs, new_stack, i;
2306 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2307 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2308 uint32_t val, limit, old_sp_mask;
2309 target_ulong ssp, old_ssp, next_eip, new_eip;
2310
2311 new_cs = T0;
2312 new_eip = T1;
2313 next_eip = env->eip + next_eip_addend;
2314#ifdef DEBUG_PCALL
2315 if (loglevel & CPU_LOG_PCALL) {
2316 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2317 new_cs, (uint32_t)new_eip, shift);
2318 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2319 }
2320#endif
2321 if ((new_cs & 0xfffc) == 0)
2322 raise_exception_err(EXCP0D_GPF, 0);
2323 if (load_segment(&e1, &e2, new_cs) != 0)
2324 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2325 cpl = env->hflags & HF_CPL_MASK;
2326#ifdef DEBUG_PCALL
2327 if (loglevel & CPU_LOG_PCALL) {
2328 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2329 }
2330#endif
2331 if (e2 & DESC_S_MASK) {
2332 if (!(e2 & DESC_CS_MASK))
2333 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2335 if (e2 & DESC_C_MASK) {
2336 /* conforming code segment */
2337 if (dpl > cpl)
2338 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339 } else {
2340 /* non conforming code segment */
2341 rpl = new_cs & 3;
2342 if (rpl > cpl)
2343 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2344 if (dpl != cpl)
2345 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2346 }
2347 if (!(e2 & DESC_P_MASK))
2348 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2349
2350#ifdef TARGET_X86_64
2351 /* XXX: check 16/32 bit cases in long mode */
2352 if (shift == 2) {
2353 target_ulong rsp;
2354 /* 64 bit case */
2355 rsp = ESP;
2356 PUSHQ(rsp, env->segs[R_CS].selector);
2357 PUSHQ(rsp, next_eip);
2358 /* from this point, not restartable */
2359 ESP = rsp;
2360 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2361 get_seg_base(e1, e2),
2362 get_seg_limit(e1, e2), e2);
2363 EIP = new_eip;
2364 } else
2365#endif
2366 {
2367 sp = ESP;
2368 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2369 ssp = env->segs[R_SS].base;
2370 if (shift) {
2371 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2372 PUSHL(ssp, sp, sp_mask, next_eip);
2373 } else {
2374 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2375 PUSHW(ssp, sp, sp_mask, next_eip);
2376 }
2377
2378 limit = get_seg_limit(e1, e2);
2379 if (new_eip > limit)
2380 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2381 /* from this point, not restartable */
2382 SET_ESP(sp, sp_mask);
2383 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2384 get_seg_base(e1, e2), limit, e2);
2385 EIP = new_eip;
2386 }
2387 } else {
2388 /* check gate type */
2389 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2390 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2391 rpl = new_cs & 3;
2392 switch(type) {
2393 case 1: /* available 286 TSS */
2394 case 9: /* available 386 TSS */
2395 case 5: /* task gate */
2396 if (dpl < cpl || dpl < rpl)
2397 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2398 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2399 CC_OP = CC_OP_EFLAGS;
2400 return;
2401 case 4: /* 286 call gate */
2402 case 12: /* 386 call gate */
2403 break;
2404 default:
2405 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2406 break;
2407 }
2408 shift = type >> 3;
2409
2410 if (dpl < cpl || dpl < rpl)
2411 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2412 /* check valid bit */
2413 if (!(e2 & DESC_P_MASK))
2414 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2415 selector = e1 >> 16;
2416 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2417 param_count = e2 & 0x1f;
2418 if ((selector & 0xfffc) == 0)
2419 raise_exception_err(EXCP0D_GPF, 0);
2420
2421 if (load_segment(&e1, &e2, selector) != 0)
2422 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2423 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2424 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2425 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2426 if (dpl > cpl)
2427 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2428 if (!(e2 & DESC_P_MASK))
2429 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2430
2431 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2432 /* to inner priviledge */
2433 get_ss_esp_from_tss(&ss, &sp, dpl);
2434#ifdef DEBUG_PCALL
2435 if (loglevel & CPU_LOG_PCALL)
2436 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2437 ss, sp, param_count, ESP);
2438#endif
2439 if ((ss & 0xfffc) == 0)
2440 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441 if ((ss & 3) != dpl)
2442 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2443 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2444 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2446 if (ss_dpl != dpl)
2447 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2448 if (!(ss_e2 & DESC_S_MASK) ||
2449 (ss_e2 & DESC_CS_MASK) ||
2450 !(ss_e2 & DESC_W_MASK))
2451 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2452 if (!(ss_e2 & DESC_P_MASK))
2453#ifdef VBOX /* See page 3-99 of 253666.pdf */
2454 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2455#else
2456 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2457#endif
2458
2459 // push_size = ((param_count * 2) + 8) << shift;
2460
2461 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2462 old_ssp = env->segs[R_SS].base;
2463
2464 sp_mask = get_sp_mask(ss_e2);
2465 ssp = get_seg_base(ss_e1, ss_e2);
2466 if (shift) {
2467 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2468 PUSHL(ssp, sp, sp_mask, ESP);
2469 for(i = param_count - 1; i >= 0; i--) {
2470 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2471 PUSHL(ssp, sp, sp_mask, val);
2472 }
2473 } else {
2474 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2475 PUSHW(ssp, sp, sp_mask, ESP);
2476 for(i = param_count - 1; i >= 0; i--) {
2477 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2478 PUSHW(ssp, sp, sp_mask, val);
2479 }
2480 }
2481 new_stack = 1;
2482 } else {
2483 /* to same priviledge */
2484 sp = ESP;
2485 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2486 ssp = env->segs[R_SS].base;
2487 // push_size = (4 << shift);
2488 new_stack = 0;
2489 }
2490
2491 if (shift) {
2492 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2493 PUSHL(ssp, sp, sp_mask, next_eip);
2494 } else {
2495 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2496 PUSHW(ssp, sp, sp_mask, next_eip);
2497 }
2498
2499 /* from this point, not restartable */
2500
2501 if (new_stack) {
2502 ss = (ss & ~3) | dpl;
2503 cpu_x86_load_seg_cache(env, R_SS, ss,
2504 ssp,
2505 get_seg_limit(ss_e1, ss_e2),
2506 ss_e2);
2507 }
2508
2509 selector = (selector & ~3) | dpl;
2510 cpu_x86_load_seg_cache(env, R_CS, selector,
2511 get_seg_base(e1, e2),
2512 get_seg_limit(e1, e2),
2513 e2);
2514 cpu_x86_set_cpl(env, dpl);
2515 SET_ESP(sp, sp_mask);
2516 EIP = offset;
2517 }
2518#ifdef USE_KQEMU
2519 if (kqemu_is_ok(env)) {
2520 env->exception_index = -1;
2521 cpu_loop_exit();
2522 }
2523#endif
2524}
2525
2526/* real and vm86 mode iret */
2527void helper_iret_real(int shift)
2528{
2529 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2530 target_ulong ssp;
2531 int eflags_mask;
2532#ifdef VBOX
2533 bool fVME = false;
2534
2535 remR3TrapClear(env->pVM);
2536#endif /* VBOX */
2537
2538 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2539 sp = ESP;
2540 ssp = env->segs[R_SS].base;
2541 if (shift == 1) {
2542 /* 32 bits */
2543 POPL(ssp, sp, sp_mask, new_eip);
2544 POPL(ssp, sp, sp_mask, new_cs);
2545 new_cs &= 0xffff;
2546 POPL(ssp, sp, sp_mask, new_eflags);
2547 } else {
2548 /* 16 bits */
2549 POPW(ssp, sp, sp_mask, new_eip);
2550 POPW(ssp, sp, sp_mask, new_cs);
2551 POPW(ssp, sp, sp_mask, new_eflags);
2552 }
2553#ifdef VBOX
2554 if ( (env->eflags & VM_MASK)
2555 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
2556 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
2557 {
2558 fVME = true;
2559 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
2560 /* if TF will be set -> #GP */
2561 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
2562 || (new_eflags & TF_MASK))
2563 raise_exception(EXCP0D_GPF);
2564 }
2565#endif /* VBOX */
2566
2567 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2568 load_seg_vm(R_CS, new_cs);
2569 env->eip = new_eip;
2570#ifdef VBOX
2571 if (fVME)
2572 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2573 else
2574#endif
2575 if (env->eflags & VM_MASK)
2576 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2577 else
2578 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2579 if (shift == 0)
2580 eflags_mask &= 0xffff;
2581 load_eflags(new_eflags, eflags_mask);
2582
2583#ifdef VBOX
2584 if (fVME)
2585 {
2586 if (new_eflags & IF_MASK)
2587 env->eflags |= VIF_MASK;
2588 else
2589 env->eflags &= ~VIF_MASK;
2590 }
2591#endif /* VBOX */
2592}
2593
2594static inline void validate_seg(int seg_reg, int cpl)
2595{
2596 int dpl;
2597 uint32_t e2;
2598
2599 /* XXX: on x86_64, we do not want to nullify FS and GS because
2600 they may still contain a valid base. I would be interested to
2601 know how a real x86_64 CPU behaves */
2602 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2603 (env->segs[seg_reg].selector & 0xfffc) == 0)
2604 return;
2605
2606 e2 = env->segs[seg_reg].flags;
2607 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2608 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2609 /* data or non conforming code segment */
2610 if (dpl < cpl) {
2611 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2612 }
2613 }
2614}
2615
2616/* protected mode iret */
2617static inline void helper_ret_protected(int shift, int is_iret, int addend)
2618{
2619 uint32_t new_cs, new_eflags, new_ss;
2620 uint32_t new_es, new_ds, new_fs, new_gs;
2621 uint32_t e1, e2, ss_e1, ss_e2;
2622 int cpl, dpl, rpl, eflags_mask, iopl;
2623 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2624
2625#ifdef TARGET_X86_64
2626 if (shift == 2)
2627 sp_mask = -1;
2628 else
2629#endif
2630 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2631 sp = ESP;
2632 ssp = env->segs[R_SS].base;
2633 new_eflags = 0; /* avoid warning */
2634#ifdef TARGET_X86_64
2635 if (shift == 2) {
2636 POPQ(sp, new_eip);
2637 POPQ(sp, new_cs);
2638 new_cs &= 0xffff;
2639 if (is_iret) {
2640 POPQ(sp, new_eflags);
2641 }
2642 } else
2643#endif
2644 if (shift == 1) {
2645 /* 32 bits */
2646 POPL(ssp, sp, sp_mask, new_eip);
2647 POPL(ssp, sp, sp_mask, new_cs);
2648 new_cs &= 0xffff;
2649 if (is_iret) {
2650 POPL(ssp, sp, sp_mask, new_eflags);
2651#if defined(VBOX) && defined(DEBUG)
2652 printf("iret: new CS %04X\n", new_cs);
2653 printf("iret: new EIP %08X\n", new_eip);
2654 printf("iret: new EFLAGS %08X\n", new_eflags);
2655 printf("iret: EAX=%08x\n", EAX);
2656#endif
2657
2658 if (new_eflags & VM_MASK)
2659 goto return_to_vm86;
2660 }
2661#ifdef VBOX
2662 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
2663 {
2664#ifdef DEBUG
2665 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
2666#endif
2667 new_cs = new_cs & 0xfffc;
2668 }
2669#endif
2670 } else {
2671 /* 16 bits */
2672 POPW(ssp, sp, sp_mask, new_eip);
2673 POPW(ssp, sp, sp_mask, new_cs);
2674 if (is_iret)
2675 POPW(ssp, sp, sp_mask, new_eflags);
2676 }
2677#ifdef DEBUG_PCALL
2678 if (loglevel & CPU_LOG_PCALL) {
2679 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2680 new_cs, new_eip, shift, addend);
2681 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2682 }
2683#endif
2684 if ((new_cs & 0xfffc) == 0)
2685 {
2686#if defined(VBOX) && defined(DEBUG)
2687 printf("new_cs & 0xfffc) == 0\n");
2688#endif
2689 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2690 }
2691 if (load_segment(&e1, &e2, new_cs) != 0)
2692 {
2693#if defined(VBOX) && defined(DEBUG)
2694 printf("load_segment failed\n");
2695#endif
2696 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2697 }
2698 if (!(e2 & DESC_S_MASK) ||
2699 !(e2 & DESC_CS_MASK))
2700 {
2701#if defined(VBOX) && defined(DEBUG)
2702 printf("e2 mask %08x\n", e2);
2703#endif
2704 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2705 }
2706 cpl = env->hflags & HF_CPL_MASK;
2707 rpl = new_cs & 3;
2708 if (rpl < cpl)
2709 {
2710#if defined(VBOX) && defined(DEBUG)
2711 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
2712#endif
2713 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2714 }
2715 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2716 if (e2 & DESC_C_MASK) {
2717 if (dpl > rpl)
2718 {
2719#if defined(VBOX) && defined(DEBUG)
2720 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
2721#endif
2722 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2723 }
2724 } else {
2725 if (dpl != rpl)
2726 {
2727#if defined(VBOX) && defined(DEBUG)
2728 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
2729#endif
2730 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2731 }
2732 }
2733 if (!(e2 & DESC_P_MASK))
2734 {
2735#if defined(VBOX) && defined(DEBUG)
2736 printf("DESC_P_MASK e2=%08x\n", e2);
2737#endif
2738 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2739 }
2740 sp += addend;
2741 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2742 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2743 /* return to same priledge level */
2744 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2745 get_seg_base(e1, e2),
2746 get_seg_limit(e1, e2),
2747 e2);
2748 } else {
2749 /* return to different priviledge level */
2750#ifdef TARGET_X86_64
2751 if (shift == 2) {
2752 POPQ(sp, new_esp);
2753 POPQ(sp, new_ss);
2754 new_ss &= 0xffff;
2755 } else
2756#endif
2757 if (shift == 1) {
2758 /* 32 bits */
2759 POPL(ssp, sp, sp_mask, new_esp);
2760 POPL(ssp, sp, sp_mask, new_ss);
2761 new_ss &= 0xffff;
2762 } else {
2763 /* 16 bits */
2764 POPW(ssp, sp, sp_mask, new_esp);
2765 POPW(ssp, sp, sp_mask, new_ss);
2766 }
2767#ifdef DEBUG_PCALL
2768 if (loglevel & CPU_LOG_PCALL) {
2769 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2770 new_ss, new_esp);
2771 }
2772#endif
2773 if ((new_ss & 0xfffc) == 0) {
2774#ifdef TARGET_X86_64
2775 /* NULL ss is allowed in long mode if cpl != 3*/
2776 /* XXX: test CS64 ? */
2777 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2778 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2779 0, 0xffffffff,
2780 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2781 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2782 DESC_W_MASK | DESC_A_MASK);
2783 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2784 } else
2785#endif
2786 {
2787 raise_exception_err(EXCP0D_GPF, 0);
2788 }
2789 } else {
2790 if ((new_ss & 3) != rpl)
2791 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2792 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2793 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2794 if (!(ss_e2 & DESC_S_MASK) ||
2795 (ss_e2 & DESC_CS_MASK) ||
2796 !(ss_e2 & DESC_W_MASK))
2797 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2798 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2799 if (dpl != rpl)
2800 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2801 if (!(ss_e2 & DESC_P_MASK))
2802 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2803 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2804 get_seg_base(ss_e1, ss_e2),
2805 get_seg_limit(ss_e1, ss_e2),
2806 ss_e2);
2807 }
2808
2809 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2810 get_seg_base(e1, e2),
2811 get_seg_limit(e1, e2),
2812 e2);
2813 cpu_x86_set_cpl(env, rpl);
2814 sp = new_esp;
2815#ifdef TARGET_X86_64
2816 if (env->hflags & HF_CS64_MASK)
2817 sp_mask = -1;
2818 else
2819#endif
2820 sp_mask = get_sp_mask(ss_e2);
2821
2822 /* validate data segments */
2823 validate_seg(R_ES, rpl);
2824 validate_seg(R_DS, rpl);
2825 validate_seg(R_FS, rpl);
2826 validate_seg(R_GS, rpl);
2827
2828 sp += addend;
2829 }
2830 SET_ESP(sp, sp_mask);
2831 env->eip = new_eip;
2832 if (is_iret) {
2833 /* NOTE: 'cpl' is the _old_ CPL */
2834 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2835 if (cpl == 0)
2836#ifdef VBOX
2837 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
2838#else
2839 eflags_mask |= IOPL_MASK;
2840#endif
2841 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2842 if (cpl <= iopl)
2843 eflags_mask |= IF_MASK;
2844 if (shift == 0)
2845 eflags_mask &= 0xffff;
2846 load_eflags(new_eflags, eflags_mask);
2847 }
2848 return;
2849
2850 return_to_vm86:
2851
2852#if 0 // defined(VBOX) && defined(DEBUG)
2853 printf("V86: new CS %04X\n", new_cs);
2854 printf("V86: Descriptor %08X:%08X\n", e2, e1);
2855 printf("V86: new EIP %08X\n", new_eip);
2856 printf("V86: new EFLAGS %08X\n", new_eflags);
2857#endif
2858
2859 POPL(ssp, sp, sp_mask, new_esp);
2860 POPL(ssp, sp, sp_mask, new_ss);
2861 POPL(ssp, sp, sp_mask, new_es);
2862 POPL(ssp, sp, sp_mask, new_ds);
2863 POPL(ssp, sp, sp_mask, new_fs);
2864 POPL(ssp, sp, sp_mask, new_gs);
2865
2866 /* modify processor state */
2867 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2868 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2869 load_seg_vm(R_CS, new_cs & 0xffff);
2870 cpu_x86_set_cpl(env, 3);
2871 load_seg_vm(R_SS, new_ss & 0xffff);
2872 load_seg_vm(R_ES, new_es & 0xffff);
2873 load_seg_vm(R_DS, new_ds & 0xffff);
2874 load_seg_vm(R_FS, new_fs & 0xffff);
2875 load_seg_vm(R_GS, new_gs & 0xffff);
2876
2877 env->eip = new_eip & 0xffff;
2878 ESP = new_esp;
2879}
2880
2881void helper_iret_protected(int shift, int next_eip)
2882{
2883 int tss_selector, type;
2884 uint32_t e1, e2;
2885
2886#ifdef VBOX
2887 remR3TrapClear(env->pVM);
2888#endif
2889
2890 /* specific case for TSS */
2891 if (env->eflags & NT_MASK) {
2892#ifdef TARGET_X86_64
2893 if (env->hflags & HF_LMA_MASK)
2894 raise_exception_err(EXCP0D_GPF, 0);
2895#endif
2896 tss_selector = lduw_kernel(env->tr.base + 0);
2897 if (tss_selector & 4)
2898 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2899 if (load_segment(&e1, &e2, tss_selector) != 0)
2900 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2901 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2902 /* NOTE: we check both segment and busy TSS */
2903 if (type != 3)
2904 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2905 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2906 } else {
2907 helper_ret_protected(shift, 1, 0);
2908 }
2909#ifdef USE_KQEMU
2910 if (kqemu_is_ok(env)) {
2911 CC_OP = CC_OP_EFLAGS;
2912 env->exception_index = -1;
2913 cpu_loop_exit();
2914 }
2915#endif
2916}
2917
2918void helper_lret_protected(int shift, int addend)
2919{
2920 helper_ret_protected(shift, 0, addend);
2921#ifdef USE_KQEMU
2922 if (kqemu_is_ok(env)) {
2923 env->exception_index = -1;
2924 cpu_loop_exit();
2925 }
2926#endif
2927}
2928
2929void helper_sysenter(void)
2930{
2931 if (env->sysenter_cs == 0) {
2932 raise_exception_err(EXCP0D_GPF, 0);
2933 }
2934 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2935 cpu_x86_set_cpl(env, 0);
2936 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2937 0, 0xffffffff,
2938 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2939 DESC_S_MASK |
2940 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2941 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2942 0, 0xffffffff,
2943 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2944 DESC_S_MASK |
2945 DESC_W_MASK | DESC_A_MASK);
2946 ESP = env->sysenter_esp;
2947 EIP = env->sysenter_eip;
2948}
2949
2950void helper_sysexit(void)
2951{
2952 int cpl;
2953
2954 cpl = env->hflags & HF_CPL_MASK;
2955 if (env->sysenter_cs == 0 || cpl != 0) {
2956 raise_exception_err(EXCP0D_GPF, 0);
2957 }
2958 cpu_x86_set_cpl(env, 3);
2959 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2960 0, 0xffffffff,
2961 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2962 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2963 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2964 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2965 0, 0xffffffff,
2966 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2967 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2968 DESC_W_MASK | DESC_A_MASK);
2969 ESP = ECX;
2970 EIP = EDX;
2971#ifdef USE_KQEMU
2972 if (kqemu_is_ok(env)) {
2973 env->exception_index = -1;
2974 cpu_loop_exit();
2975 }
2976#endif
2977}
2978
2979void helper_movl_crN_T0(int reg)
2980{
2981#if !defined(CONFIG_USER_ONLY)
2982 switch(reg) {
2983 case 0:
2984 cpu_x86_update_cr0(env, T0);
2985 break;
2986 case 3:
2987 cpu_x86_update_cr3(env, T0);
2988 break;
2989 case 4:
2990 cpu_x86_update_cr4(env, T0);
2991 break;
2992 case 8:
2993 cpu_set_apic_tpr(env, T0);
2994 break;
2995 default:
2996 env->cr[reg] = T0;
2997 break;
2998 }
2999#endif
3000}
3001
3002/* XXX: do more */
3003void helper_movl_drN_T0(int reg)
3004{
3005 env->dr[reg] = T0;
3006}
3007
3008void helper_invlpg(target_ulong addr)
3009{
3010 cpu_x86_flush_tlb(env, addr);
3011}
3012
3013void helper_rdtsc(void)
3014{
3015 uint64_t val;
3016
3017 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3018 raise_exception(EXCP0D_GPF);
3019 }
3020 val = cpu_get_tsc(env);
3021 EAX = (uint32_t)(val);
3022 EDX = (uint32_t)(val >> 32);
3023}
3024
3025#ifdef VBOX
3026void helper_rdtscp(void)
3027{
3028 uint64_t val;
3029
3030 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3031 raise_exception(EXCP0D_GPF);
3032 }
3033
3034 val = cpu_get_tsc(env);
3035 EAX = (uint32_t)(val);
3036 EDX = (uint32_t)(val >> 32);
3037 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3038}
3039#endif
3040
3041#if defined(CONFIG_USER_ONLY)
3042void helper_wrmsr(void)
3043{
3044}
3045
3046void helper_rdmsr(void)
3047{
3048}
3049#else
3050void helper_wrmsr(void)
3051{
3052 uint64_t val;
3053
3054 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3055
3056 switch((uint32_t)ECX) {
3057 case MSR_IA32_SYSENTER_CS:
3058 env->sysenter_cs = val & 0xffff;
3059 break;
3060 case MSR_IA32_SYSENTER_ESP:
3061 env->sysenter_esp = val;
3062 break;
3063 case MSR_IA32_SYSENTER_EIP:
3064 env->sysenter_eip = val;
3065 break;
3066 case MSR_IA32_APICBASE:
3067 cpu_set_apic_base(env, val);
3068 break;
3069 case MSR_EFER:
3070 {
3071 uint64_t update_mask;
3072 update_mask = 0;
3073 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3074 update_mask |= MSR_EFER_SCE;
3075 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3076 update_mask |= MSR_EFER_LME;
3077 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3078 update_mask |= MSR_EFER_FFXSR;
3079 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3080 update_mask |= MSR_EFER_NXE;
3081 env->efer = (env->efer & ~update_mask) |
3082 (val & update_mask);
3083 }
3084 break;
3085 case MSR_STAR:
3086 env->star = val;
3087 break;
3088 case MSR_PAT:
3089 env->pat = val;
3090 break;
3091#ifdef TARGET_X86_64
3092 case MSR_LSTAR:
3093 env->lstar = val;
3094 break;
3095 case MSR_CSTAR:
3096 env->cstar = val;
3097 break;
3098 case MSR_FMASK:
3099 env->fmask = val;
3100 break;
3101 case MSR_FSBASE:
3102 env->segs[R_FS].base = val;
3103 break;
3104 case MSR_GSBASE:
3105 env->segs[R_GS].base = val;
3106 break;
3107 case MSR_KERNELGSBASE:
3108 env->kernelgsbase = val;
3109 break;
3110#endif
3111 default:
3112#ifndef VBOX
3113 /* XXX: exception ? */
3114 break;
3115#else /* VBOX */
3116 {
3117 uint32_t ecx = (uint32_t)ECX;
3118 /* In X2APIC specification this range is reserved for APIC control. */
3119 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3120 cpu_apic_wrmsr(env, ecx, val);
3121 /** @todo else exception? */
3122 break;
3123 }
3124 case MSR_K8_TSC_AUX:
3125 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3126 break;
3127#endif /* VBOX */
3128 }
3129}
3130
3131void helper_rdmsr(void)
3132{
3133 uint64_t val;
3134 switch((uint32_t)ECX) {
3135 case MSR_IA32_SYSENTER_CS:
3136 val = env->sysenter_cs;
3137 break;
3138 case MSR_IA32_SYSENTER_ESP:
3139 val = env->sysenter_esp;
3140 break;
3141 case MSR_IA32_SYSENTER_EIP:
3142 val = env->sysenter_eip;
3143 break;
3144 case MSR_IA32_APICBASE:
3145 val = cpu_get_apic_base(env);
3146 break;
3147 case MSR_EFER:
3148 val = env->efer;
3149 break;
3150 case MSR_STAR:
3151 val = env->star;
3152 break;
3153 case MSR_PAT:
3154 val = env->pat;
3155 break;
3156#ifdef TARGET_X86_64
3157 case MSR_LSTAR:
3158 val = env->lstar;
3159 break;
3160 case MSR_CSTAR:
3161 val = env->cstar;
3162 break;
3163 case MSR_FMASK:
3164 val = env->fmask;
3165 break;
3166 case MSR_FSBASE:
3167 val = env->segs[R_FS].base;
3168 break;
3169 case MSR_GSBASE:
3170 val = env->segs[R_GS].base;
3171 break;
3172 case MSR_KERNELGSBASE:
3173 val = env->kernelgsbase;
3174 break;
3175#endif
3176 default:
3177#ifndef VBOX
3178 /* XXX: exception ? */
3179 val = 0;
3180 break;
3181#else /* VBOX */
3182 {
3183 uint32_t ecx = (uint32_t)ECX;
3184 /* In X2APIC specification this range is reserved for APIC control. */
3185 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3186 val = cpu_apic_rdmsr(env, ecx);
3187 else
3188 val = 0; /** @todo else exception? */
3189 break;
3190 }
3191 case MSR_K8_TSC_AUX:
3192 val = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3193 break;
3194#endif /* VBOX */
3195 }
3196 EAX = (uint32_t)(val);
3197 EDX = (uint32_t)(val >> 32);
3198}
3199#endif
3200
3201void helper_lsl(void)
3202{
3203 unsigned int selector, limit;
3204 uint32_t e1, e2, eflags;
3205 int rpl, dpl, cpl, type;
3206
3207 eflags = cc_table[CC_OP].compute_all();
3208 selector = T0 & 0xffff;
3209 if (load_segment(&e1, &e2, selector) != 0)
3210 goto fail;
3211 rpl = selector & 3;
3212 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3213 cpl = env->hflags & HF_CPL_MASK;
3214 if (e2 & DESC_S_MASK) {
3215 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3216 /* conforming */
3217 } else {
3218 if (dpl < cpl || dpl < rpl)
3219 goto fail;
3220 }
3221 } else {
3222 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3223 switch(type) {
3224 case 1:
3225 case 2:
3226 case 3:
3227 case 9:
3228 case 11:
3229 break;
3230 default:
3231 goto fail;
3232 }
3233 if (dpl < cpl || dpl < rpl) {
3234 fail:
3235 CC_SRC = eflags & ~CC_Z;
3236 return;
3237 }
3238 }
3239 limit = get_seg_limit(e1, e2);
3240 T1 = limit;
3241 CC_SRC = eflags | CC_Z;
3242}
3243
3244void helper_lar(void)
3245{
3246 unsigned int selector;
3247 uint32_t e1, e2, eflags;
3248 int rpl, dpl, cpl, type;
3249
3250 eflags = cc_table[CC_OP].compute_all();
3251 selector = T0 & 0xffff;
3252 if ((selector & 0xfffc) == 0)
3253 goto fail;
3254 if (load_segment(&e1, &e2, selector) != 0)
3255 goto fail;
3256 rpl = selector & 3;
3257 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3258 cpl = env->hflags & HF_CPL_MASK;
3259 if (e2 & DESC_S_MASK) {
3260 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3261 /* conforming */
3262 } else {
3263 if (dpl < cpl || dpl < rpl)
3264 goto fail;
3265 }
3266 } else {
3267 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3268 switch(type) {
3269 case 1:
3270 case 2:
3271 case 3:
3272 case 4:
3273 case 5:
3274 case 9:
3275 case 11:
3276 case 12:
3277 break;
3278 default:
3279 goto fail;
3280 }
3281 if (dpl < cpl || dpl < rpl) {
3282 fail:
3283 CC_SRC = eflags & ~CC_Z;
3284 return;
3285 }
3286 }
3287 T1 = e2 & 0x00f0ff00;
3288 CC_SRC = eflags | CC_Z;
3289}
3290
3291void helper_verr(void)
3292{
3293 unsigned int selector;
3294 uint32_t e1, e2, eflags;
3295 int rpl, dpl, cpl;
3296
3297 eflags = cc_table[CC_OP].compute_all();
3298 selector = T0 & 0xffff;
3299 if ((selector & 0xfffc) == 0)
3300 goto fail;
3301 if (load_segment(&e1, &e2, selector) != 0)
3302 goto fail;
3303 if (!(e2 & DESC_S_MASK))
3304 goto fail;
3305 rpl = selector & 3;
3306 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3307 cpl = env->hflags & HF_CPL_MASK;
3308 if (e2 & DESC_CS_MASK) {
3309 if (!(e2 & DESC_R_MASK))
3310 goto fail;
3311 if (!(e2 & DESC_C_MASK)) {
3312 if (dpl < cpl || dpl < rpl)
3313 goto fail;
3314 }
3315 } else {
3316 if (dpl < cpl || dpl < rpl) {
3317 fail:
3318 CC_SRC = eflags & ~CC_Z;
3319 return;
3320 }
3321 }
3322 CC_SRC = eflags | CC_Z;
3323}
3324
3325void helper_verw(void)
3326{
3327 unsigned int selector;
3328 uint32_t e1, e2, eflags;
3329 int rpl, dpl, cpl;
3330
3331 eflags = cc_table[CC_OP].compute_all();
3332 selector = T0 & 0xffff;
3333 if ((selector & 0xfffc) == 0)
3334 goto fail;
3335 if (load_segment(&e1, &e2, selector) != 0)
3336 goto fail;
3337 if (!(e2 & DESC_S_MASK))
3338 goto fail;
3339 rpl = selector & 3;
3340 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3341 cpl = env->hflags & HF_CPL_MASK;
3342 if (e2 & DESC_CS_MASK) {
3343 goto fail;
3344 } else {
3345 if (dpl < cpl || dpl < rpl)
3346 goto fail;
3347 if (!(e2 & DESC_W_MASK)) {
3348 fail:
3349 CC_SRC = eflags & ~CC_Z;
3350 return;
3351 }
3352 }
3353 CC_SRC = eflags | CC_Z;
3354}
3355
3356/* FPU helpers */
3357
3358void helper_fldt_ST0_A0(void)
3359{
3360 int new_fpstt;
3361 new_fpstt = (env->fpstt - 1) & 7;
3362 env->fpregs[new_fpstt].d = helper_fldt(A0);
3363 env->fpstt = new_fpstt;
3364 env->fptags[new_fpstt] = 0; /* validate stack entry */
3365}
3366
3367void helper_fstt_ST0_A0(void)
3368{
3369 helper_fstt(ST0, A0);
3370}
3371
3372void fpu_set_exception(int mask)
3373{
3374 env->fpus |= mask;
3375 if (env->fpus & (~env->fpuc & FPUC_EM))
3376 env->fpus |= FPUS_SE | FPUS_B;
3377}
3378
3379CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3380{
3381 if (b == 0.0)
3382 fpu_set_exception(FPUS_ZE);
3383 return a / b;
3384}
3385
3386void fpu_raise_exception(void)
3387{
3388 if (env->cr[0] & CR0_NE_MASK) {
3389 raise_exception(EXCP10_COPR);
3390 }
3391#if !defined(CONFIG_USER_ONLY)
3392 else {
3393 cpu_set_ferr(env);
3394 }
3395#endif
3396}
3397
3398/* BCD ops */
3399
3400void helper_fbld_ST0_A0(void)
3401{
3402 CPU86_LDouble tmp;
3403 uint64_t val;
3404 unsigned int v;
3405 int i;
3406
3407 val = 0;
3408 for(i = 8; i >= 0; i--) {
3409 v = ldub(A0 + i);
3410 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3411 }
3412 tmp = val;
3413 if (ldub(A0 + 9) & 0x80)
3414 tmp = -tmp;
3415 fpush();
3416 ST0 = tmp;
3417}
3418
3419void helper_fbst_ST0_A0(void)
3420{
3421 int v;
3422 target_ulong mem_ref, mem_end;
3423 int64_t val;
3424
3425 val = floatx_to_int64(ST0, &env->fp_status);
3426 mem_ref = A0;
3427 mem_end = mem_ref + 9;
3428 if (val < 0) {
3429 stb(mem_end, 0x80);
3430 val = -val;
3431 } else {
3432 stb(mem_end, 0x00);
3433 }
3434 while (mem_ref < mem_end) {
3435 if (val == 0)
3436 break;
3437 v = val % 100;
3438 val = val / 100;
3439 v = ((v / 10) << 4) | (v % 10);
3440 stb(mem_ref++, v);
3441 }
3442 while (mem_ref < mem_end) {
3443 stb(mem_ref++, 0);
3444 }
3445}
3446
3447void helper_f2xm1(void)
3448{
3449 ST0 = pow(2.0,ST0) - 1.0;
3450}
3451
3452void helper_fyl2x(void)
3453{
3454 CPU86_LDouble fptemp;
3455
3456 fptemp = ST0;
3457 if (fptemp>0.0){
3458 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3459 ST1 *= fptemp;
3460 fpop();
3461 } else {
3462 env->fpus &= (~0x4700);
3463 env->fpus |= 0x400;
3464 }
3465}
3466
3467void helper_fptan(void)
3468{
3469 CPU86_LDouble fptemp;
3470
3471 fptemp = ST0;
3472 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3473 env->fpus |= 0x400;
3474 } else {
3475 ST0 = tan(fptemp);
3476 fpush();
3477 ST0 = 1.0;
3478 env->fpus &= (~0x400); /* C2 <-- 0 */
3479 /* the above code is for |arg| < 2**52 only */
3480 }
3481}
3482
3483void helper_fpatan(void)
3484{
3485 CPU86_LDouble fptemp, fpsrcop;
3486
3487 fpsrcop = ST1;
3488 fptemp = ST0;
3489 ST1 = atan2(fpsrcop,fptemp);
3490 fpop();
3491}
3492
3493void helper_fxtract(void)
3494{
3495 CPU86_LDoubleU temp;
3496 unsigned int expdif;
3497
3498 temp.d = ST0;
3499 expdif = EXPD(temp) - EXPBIAS;
3500 /*DP exponent bias*/
3501 ST0 = expdif;
3502 fpush();
3503 BIASEXPONENT(temp);
3504 ST0 = temp.d;
3505}
3506
3507void helper_fprem1(void)
3508{
3509 CPU86_LDouble dblq, fpsrcop, fptemp;
3510 CPU86_LDoubleU fpsrcop1, fptemp1;
3511 int expdif;
3512 int q;
3513
3514 fpsrcop = ST0;
3515 fptemp = ST1;
3516 fpsrcop1.d = fpsrcop;
3517 fptemp1.d = fptemp;
3518 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3519 if (expdif < 53) {
3520 dblq = fpsrcop / fptemp;
3521 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3522 ST0 = fpsrcop - fptemp*dblq;
3523 q = (int)dblq; /* cutting off top bits is assumed here */
3524 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3525 /* (C0,C1,C3) <-- (q2,q1,q0) */
3526 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3527 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3528 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3529 } else {
3530 env->fpus |= 0x400; /* C2 <-- 1 */
3531 fptemp = pow(2.0, expdif-50);
3532 fpsrcop = (ST0 / ST1) / fptemp;
3533 /* fpsrcop = integer obtained by rounding to the nearest */
3534 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
3535 floor(fpsrcop): ceil(fpsrcop);
3536 ST0 -= (ST1 * fpsrcop * fptemp);
3537 }
3538}
3539
3540void helper_fprem(void)
3541{
3542 CPU86_LDouble dblq, fpsrcop, fptemp;
3543 CPU86_LDoubleU fpsrcop1, fptemp1;
3544 int expdif;
3545 int q;
3546
3547 fpsrcop = ST0;
3548 fptemp = ST1;
3549 fpsrcop1.d = fpsrcop;
3550 fptemp1.d = fptemp;
3551 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3552 if ( expdif < 53 ) {
3553 dblq = fpsrcop / fptemp;
3554 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3555 ST0 = fpsrcop - fptemp*dblq;
3556 q = (int)dblq; /* cutting off top bits is assumed here */
3557 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3558 /* (C0,C1,C3) <-- (q2,q1,q0) */
3559 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3560 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3561 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3562 } else {
3563 env->fpus |= 0x400; /* C2 <-- 1 */
3564 fptemp = pow(2.0, expdif-50);
3565 fpsrcop = (ST0 / ST1) / fptemp;
3566 /* fpsrcop = integer obtained by chopping */
3567 fpsrcop = (fpsrcop < 0.0)?
3568 -(floor(fabs(fpsrcop))): floor(fpsrcop);
3569 ST0 -= (ST1 * fpsrcop * fptemp);
3570 }
3571}
3572
3573void helper_fyl2xp1(void)
3574{
3575 CPU86_LDouble fptemp;
3576
3577 fptemp = ST0;
3578 if ((fptemp+1.0)>0.0) {
3579 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3580 ST1 *= fptemp;
3581 fpop();
3582 } else {
3583 env->fpus &= (~0x4700);
3584 env->fpus |= 0x400;
3585 }
3586}
3587
3588void helper_fsqrt(void)
3589{
3590 CPU86_LDouble fptemp;
3591
3592 fptemp = ST0;
3593 if (fptemp<0.0) {
3594 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3595 env->fpus |= 0x400;
3596 }
3597 ST0 = sqrt(fptemp);
3598}
3599
3600void helper_fsincos(void)
3601{
3602 CPU86_LDouble fptemp;
3603
3604 fptemp = ST0;
3605 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3606 env->fpus |= 0x400;
3607 } else {
3608 ST0 = sin(fptemp);
3609 fpush();
3610 ST0 = cos(fptemp);
3611 env->fpus &= (~0x400); /* C2 <-- 0 */
3612 /* the above code is for |arg| < 2**63 only */
3613 }
3614}
3615
3616void helper_frndint(void)
3617{
3618 ST0 = floatx_round_to_int(ST0, &env->fp_status);
3619}
3620
3621void helper_fscale(void)
3622{
3623 ST0 = ldexp (ST0, (int)(ST1));
3624}
3625
3626void helper_fsin(void)
3627{
3628 CPU86_LDouble fptemp;
3629
3630 fptemp = ST0;
3631 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3632 env->fpus |= 0x400;
3633 } else {
3634 ST0 = sin(fptemp);
3635 env->fpus &= (~0x400); /* C2 <-- 0 */
3636 /* the above code is for |arg| < 2**53 only */
3637 }
3638}
3639
3640void helper_fcos(void)
3641{
3642 CPU86_LDouble fptemp;
3643
3644 fptemp = ST0;
3645 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3646 env->fpus |= 0x400;
3647 } else {
3648 ST0 = cos(fptemp);
3649 env->fpus &= (~0x400); /* C2 <-- 0 */
3650 /* the above code is for |arg5 < 2**63 only */
3651 }
3652}
3653
3654void helper_fxam_ST0(void)
3655{
3656 CPU86_LDoubleU temp;
3657 int expdif;
3658
3659 temp.d = ST0;
3660
3661 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3662 if (SIGND(temp))
3663 env->fpus |= 0x200; /* C1 <-- 1 */
3664
3665 /* XXX: test fptags too */
3666 expdif = EXPD(temp);
3667 if (expdif == MAXEXPD) {
3668#ifdef USE_X86LDOUBLE
3669 if (MANTD(temp) == 0x8000000000000000ULL)
3670#else
3671 if (MANTD(temp) == 0)
3672#endif
3673 env->fpus |= 0x500 /*Infinity*/;
3674 else
3675 env->fpus |= 0x100 /*NaN*/;
3676 } else if (expdif == 0) {
3677 if (MANTD(temp) == 0)
3678 env->fpus |= 0x4000 /*Zero*/;
3679 else
3680 env->fpus |= 0x4400 /*Denormal*/;
3681 } else {
3682 env->fpus |= 0x400;
3683 }
3684}
3685
3686void helper_fstenv(target_ulong ptr, int data32)
3687{
3688 int fpus, fptag, exp, i;
3689 uint64_t mant;
3690 CPU86_LDoubleU tmp;
3691
3692 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3693 fptag = 0;
3694 for (i=7; i>=0; i--) {
3695 fptag <<= 2;
3696 if (env->fptags[i]) {
3697 fptag |= 3;
3698 } else {
3699 tmp.d = env->fpregs[i].d;
3700 exp = EXPD(tmp);
3701 mant = MANTD(tmp);
3702 if (exp == 0 && mant == 0) {
3703 /* zero */
3704 fptag |= 1;
3705 } else if (exp == 0 || exp == MAXEXPD
3706#ifdef USE_X86LDOUBLE
3707 || (mant & (1LL << 63)) == 0
3708#endif
3709 ) {
3710 /* NaNs, infinity, denormal */
3711 fptag |= 2;
3712 }
3713 }
3714 }
3715 if (data32) {
3716 /* 32 bit */
3717 stl(ptr, env->fpuc);
3718 stl(ptr + 4, fpus);
3719 stl(ptr + 8, fptag);
3720 stl(ptr + 12, 0); /* fpip */
3721 stl(ptr + 16, 0); /* fpcs */
3722 stl(ptr + 20, 0); /* fpoo */
3723 stl(ptr + 24, 0); /* fpos */
3724 } else {
3725 /* 16 bit */
3726 stw(ptr, env->fpuc);
3727 stw(ptr + 2, fpus);
3728 stw(ptr + 4, fptag);
3729 stw(ptr + 6, 0);
3730 stw(ptr + 8, 0);
3731 stw(ptr + 10, 0);
3732 stw(ptr + 12, 0);
3733 }
3734}
3735
3736void helper_fldenv(target_ulong ptr, int data32)
3737{
3738 int i, fpus, fptag;
3739
3740 if (data32) {
3741 env->fpuc = lduw(ptr);
3742 fpus = lduw(ptr + 4);
3743 fptag = lduw(ptr + 8);
3744 }
3745 else {
3746 env->fpuc = lduw(ptr);
3747 fpus = lduw(ptr + 2);
3748 fptag = lduw(ptr + 4);
3749 }
3750 env->fpstt = (fpus >> 11) & 7;
3751 env->fpus = fpus & ~0x3800;
3752 for(i = 0;i < 8; i++) {
3753 env->fptags[i] = ((fptag & 3) == 3);
3754 fptag >>= 2;
3755 }
3756}
3757
3758void helper_fsave(target_ulong ptr, int data32)
3759{
3760 CPU86_LDouble tmp;
3761 int i;
3762
3763 helper_fstenv(ptr, data32);
3764
3765 ptr += (14 << data32);
3766 for(i = 0;i < 8; i++) {
3767 tmp = ST(i);
3768 helper_fstt(tmp, ptr);
3769 ptr += 10;
3770 }
3771
3772 /* fninit */
3773 env->fpus = 0;
3774 env->fpstt = 0;
3775 env->fpuc = 0x37f;
3776 env->fptags[0] = 1;
3777 env->fptags[1] = 1;
3778 env->fptags[2] = 1;
3779 env->fptags[3] = 1;
3780 env->fptags[4] = 1;
3781 env->fptags[5] = 1;
3782 env->fptags[6] = 1;
3783 env->fptags[7] = 1;
3784}
3785
3786void helper_frstor(target_ulong ptr, int data32)
3787{
3788 CPU86_LDouble tmp;
3789 int i;
3790
3791 helper_fldenv(ptr, data32);
3792 ptr += (14 << data32);
3793
3794 for(i = 0;i < 8; i++) {
3795 tmp = helper_fldt(ptr);
3796 ST(i) = tmp;
3797 ptr += 10;
3798 }
3799}
3800
3801void helper_fxsave(target_ulong ptr, int data64)
3802{
3803 int fpus, fptag, i, nb_xmm_regs;
3804 CPU86_LDouble tmp;
3805 target_ulong addr;
3806
3807 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3808 fptag = 0;
3809 for(i = 0; i < 8; i++) {
3810 fptag |= (env->fptags[i] << i);
3811 }
3812 stw(ptr, env->fpuc);
3813 stw(ptr + 2, fpus);
3814 stw(ptr + 4, fptag ^ 0xff);
3815
3816 addr = ptr + 0x20;
3817 for(i = 0;i < 8; i++) {
3818 tmp = ST(i);
3819 helper_fstt(tmp, addr);
3820 addr += 16;
3821 }
3822
3823 if (env->cr[4] & CR4_OSFXSR_MASK) {
3824 /* XXX: finish it */
3825 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3826 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3827 nb_xmm_regs = 8 << data64;
3828 addr = ptr + 0xa0;
3829 for(i = 0; i < nb_xmm_regs; i++) {
3830 stq(addr, env->xmm_regs[i].XMM_Q(0));
3831 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3832 addr += 16;
3833 }
3834 }
3835}
3836
3837void helper_fxrstor(target_ulong ptr, int data64)
3838{
3839 int i, fpus, fptag, nb_xmm_regs;
3840 CPU86_LDouble tmp;
3841 target_ulong addr;
3842
3843 env->fpuc = lduw(ptr);
3844 fpus = lduw(ptr + 2);
3845 fptag = lduw(ptr + 4);
3846 env->fpstt = (fpus >> 11) & 7;
3847 env->fpus = fpus & ~0x3800;
3848 fptag ^= 0xff;
3849 for(i = 0;i < 8; i++) {
3850 env->fptags[i] = ((fptag >> i) & 1);
3851 }
3852
3853 addr = ptr + 0x20;
3854 for(i = 0;i < 8; i++) {
3855 tmp = helper_fldt(addr);
3856 ST(i) = tmp;
3857 addr += 16;
3858 }
3859
3860 if (env->cr[4] & CR4_OSFXSR_MASK) {
3861 /* XXX: finish it */
3862 env->mxcsr = ldl(ptr + 0x18);
3863 //ldl(ptr + 0x1c);
3864 nb_xmm_regs = 8 << data64;
3865 addr = ptr + 0xa0;
3866 for(i = 0; i < nb_xmm_regs; i++) {
3867#if !defined(VBOX) || __GNUC__ < 4
3868 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3869 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3870#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
3871# if 1
3872 env->xmm_regs[i].XMM_L(0) = ldl(addr);
3873 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
3874 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
3875 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
3876# else
3877 /* this works fine on Mac OS X, gcc 4.0.1 */
3878 uint64_t u64 = ldq(addr);
3879 env->xmm_regs[i].XMM_Q(0);
3880 u64 = ldq(addr + 4);
3881 env->xmm_regs[i].XMM_Q(1) = u64;
3882# endif
3883#endif
3884 addr += 16;
3885 }
3886 }
3887}
3888
3889#ifndef USE_X86LDOUBLE
3890
3891void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3892{
3893 CPU86_LDoubleU temp;
3894 int e;
3895
3896 temp.d = f;
3897 /* mantissa */
3898 *pmant = (MANTD(temp) << 11) | (1LL << 63);
3899 /* exponent + sign */
3900 e = EXPD(temp) - EXPBIAS + 16383;
3901 e |= SIGND(temp) >> 16;
3902 *pexp = e;
3903}
3904
3905CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3906{
3907 CPU86_LDoubleU temp;
3908 int e;
3909 uint64_t ll;
3910
3911 /* XXX: handle overflow ? */
3912 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3913 e |= (upper >> 4) & 0x800; /* sign */
3914 ll = (mant >> 11) & ((1LL << 52) - 1);
3915#ifdef __arm__
3916 temp.l.upper = (e << 20) | (ll >> 32);
3917 temp.l.lower = ll;
3918#else
3919 temp.ll = ll | ((uint64_t)e << 52);
3920#endif
3921 return temp.d;
3922}
3923
3924#else
3925
3926void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3927{
3928 CPU86_LDoubleU temp;
3929
3930 temp.d = f;
3931 *pmant = temp.l.lower;
3932 *pexp = temp.l.upper;
3933}
3934
3935CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3936{
3937 CPU86_LDoubleU temp;
3938
3939 temp.l.upper = upper;
3940 temp.l.lower = mant;
3941 return temp.d;
3942}
3943#endif
3944
3945#ifdef TARGET_X86_64
3946
3947//#define DEBUG_MULDIV
3948
3949static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3950{
3951 *plow += a;
3952 /* carry test */
3953 if (*plow < a)
3954 (*phigh)++;
3955 *phigh += b;
3956}
3957
3958static void neg128(uint64_t *plow, uint64_t *phigh)
3959{
3960 *plow = ~ *plow;
3961 *phigh = ~ *phigh;
3962 add128(plow, phigh, 1, 0);
3963}
3964
3965static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3966{
3967 uint32_t a0, a1, b0, b1;
3968 uint64_t v;
3969
3970 a0 = a;
3971 a1 = a >> 32;
3972
3973 b0 = b;
3974 b1 = b >> 32;
3975
3976 v = (uint64_t)a0 * (uint64_t)b0;
3977 *plow = v;
3978 *phigh = 0;
3979
3980 v = (uint64_t)a0 * (uint64_t)b1;
3981 add128(plow, phigh, v << 32, v >> 32);
3982
3983 v = (uint64_t)a1 * (uint64_t)b0;
3984 add128(plow, phigh, v << 32, v >> 32);
3985
3986 v = (uint64_t)a1 * (uint64_t)b1;
3987 *phigh += v;
3988#ifdef DEBUG_MULDIV
3989 printf("mul: 0x%016" PRIx64 " * 0x%016" PRIx64 " = 0x%016" PRIx64 "%016" PRIx64 "\n",
3990 a, b, *phigh, *plow);
3991#endif
3992}
3993
3994static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
3995{
3996 int sa, sb;
3997 sa = (a < 0);
3998 if (sa)
3999 a = -a;
4000 sb = (b < 0);
4001 if (sb)
4002 b = -b;
4003 mul64(plow, phigh, a, b);
4004 if (sa ^ sb) {
4005 neg128(plow, phigh);
4006 }
4007}
4008
4009/* return TRUE if overflow */
4010static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4011{
4012 uint64_t q, r, a1, a0;
4013 int i, qb, ab;
4014
4015 a0 = *plow;
4016 a1 = *phigh;
4017 if (a1 == 0) {
4018 q = a0 / b;
4019 r = a0 % b;
4020 *plow = q;
4021 *phigh = r;
4022 } else {
4023 if (a1 >= b)
4024 return 1;
4025 /* XXX: use a better algorithm */
4026 for(i = 0; i < 64; i++) {
4027 ab = a1 >> 63;
4028 a1 = (a1 << 1) | (a0 >> 63);
4029 if (ab || a1 >= b) {
4030 a1 -= b;
4031 qb = 1;
4032 } else {
4033 qb = 0;
4034 }
4035 a0 = (a0 << 1) | qb;
4036 }
4037#if defined(DEBUG_MULDIV)
4038 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4039 *phigh, *plow, b, a0, a1);
4040#endif
4041 *plow = a0;
4042 *phigh = a1;
4043 }
4044 return 0;
4045}
4046
4047/* return TRUE if overflow */
4048static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4049{
4050 int sa, sb;
4051 sa = ((int64_t)*phigh < 0);
4052 if (sa)
4053 neg128(plow, phigh);
4054 sb = (b < 0);
4055 if (sb)
4056 b = -b;
4057 if (div64(plow, phigh, b) != 0)
4058 return 1;
4059 if (sa ^ sb) {
4060 if (*plow > (1ULL << 63))
4061 return 1;
4062 *plow = - *plow;
4063 } else {
4064 if (*plow >= (1ULL << 63))
4065 return 1;
4066 }
4067 if (sa)
4068 *phigh = - *phigh;
4069 return 0;
4070}
4071
4072void helper_mulq_EAX_T0(void)
4073{
4074 uint64_t r0, r1;
4075
4076 mul64(&r0, &r1, EAX, T0);
4077 EAX = r0;
4078 EDX = r1;
4079 CC_DST = r0;
4080 CC_SRC = r1;
4081}
4082
4083void helper_imulq_EAX_T0(void)
4084{
4085 uint64_t r0, r1;
4086
4087 imul64(&r0, &r1, EAX, T0);
4088 EAX = r0;
4089 EDX = r1;
4090 CC_DST = r0;
4091 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4092}
4093
4094void helper_imulq_T0_T1(void)
4095{
4096 uint64_t r0, r1;
4097
4098 imul64(&r0, &r1, T0, T1);
4099 T0 = r0;
4100 CC_DST = r0;
4101 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4102}
4103
4104void helper_divq_EAX_T0(void)
4105{
4106 uint64_t r0, r1;
4107 if (T0 == 0) {
4108 raise_exception(EXCP00_DIVZ);
4109 }
4110 r0 = EAX;
4111 r1 = EDX;
4112 if (div64(&r0, &r1, T0))
4113 raise_exception(EXCP00_DIVZ);
4114 EAX = r0;
4115 EDX = r1;
4116}
4117
4118void helper_idivq_EAX_T0(void)
4119{
4120 uint64_t r0, r1;
4121 if (T0 == 0) {
4122 raise_exception(EXCP00_DIVZ);
4123 }
4124 r0 = EAX;
4125 r1 = EDX;
4126 if (idiv64(&r0, &r1, T0))
4127 raise_exception(EXCP00_DIVZ);
4128 EAX = r0;
4129 EDX = r1;
4130}
4131
4132void helper_bswapq_T0(void)
4133{
4134 T0 = bswap64(T0);
4135}
4136#endif
4137
4138void helper_hlt(void)
4139{
4140 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4141 env->hflags |= HF_HALTED_MASK;
4142 env->exception_index = EXCP_HLT;
4143 cpu_loop_exit();
4144}
4145
4146void helper_monitor(void)
4147{
4148 if ((uint32_t)ECX != 0)
4149 raise_exception(EXCP0D_GPF);
4150 /* XXX: store address ? */
4151}
4152
4153void helper_mwait(void)
4154{
4155 if ((uint32_t)ECX != 0)
4156 raise_exception(EXCP0D_GPF);
4157#ifdef VBOX
4158 helper_hlt();
4159#else
4160 /* XXX: not complete but not completely erroneous */
4161 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4162 /* more than one CPU: do not sleep because another CPU may
4163 wake this one */
4164 } else {
4165 helper_hlt();
4166 }
4167#endif
4168}
4169
4170float approx_rsqrt(float a)
4171{
4172 return 1.0 / sqrt(a);
4173}
4174
4175float approx_rcp(float a)
4176{
4177 return 1.0 / a;
4178}
4179
4180void update_fp_status(void)
4181{
4182 int rnd_type;
4183
4184 /* set rounding mode */
4185 switch(env->fpuc & RC_MASK) {
4186 default:
4187 case RC_NEAR:
4188 rnd_type = float_round_nearest_even;
4189 break;
4190 case RC_DOWN:
4191 rnd_type = float_round_down;
4192 break;
4193 case RC_UP:
4194 rnd_type = float_round_up;
4195 break;
4196 case RC_CHOP:
4197 rnd_type = float_round_to_zero;
4198 break;
4199 }
4200 set_float_rounding_mode(rnd_type, &env->fp_status);
4201#ifdef FLOATX80
4202 switch((env->fpuc >> 8) & 3) {
4203 case 0:
4204 rnd_type = 32;
4205 break;
4206 case 2:
4207 rnd_type = 64;
4208 break;
4209 case 3:
4210 default:
4211 rnd_type = 80;
4212 break;
4213 }
4214 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4215#endif
4216}
4217
4218#if !defined(CONFIG_USER_ONLY)
4219
4220#define MMUSUFFIX _mmu
4221#define GETPC() (__builtin_return_address(0))
4222
4223#define SHIFT 0
4224#include "softmmu_template.h"
4225
4226#define SHIFT 1
4227#include "softmmu_template.h"
4228
4229#define SHIFT 2
4230#include "softmmu_template.h"
4231
4232#define SHIFT 3
4233#include "softmmu_template.h"
4234
4235#endif
4236
4237/* try to fill the TLB and return an exception if error. If retaddr is
4238 NULL, it means that the function was called in C code (i.e. not
4239 from generated code or from helper.c) */
4240/* XXX: fix it to restore all registers */
4241void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
4242{
4243 TranslationBlock *tb;
4244 int ret;
4245 unsigned long pc;
4246 CPUX86State *saved_env;
4247
4248 /* XXX: hack to restore env in all cases, even if not called from
4249 generated code */
4250 saved_env = env;
4251 env = cpu_single_env;
4252
4253 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
4254 if (ret) {
4255 if (retaddr) {
4256 /* now we have a real cpu fault */
4257 pc = (unsigned long)retaddr;
4258 tb = tb_find_pc(pc);
4259 if (tb) {
4260 /* the PC is inside the translated code. It means that we have
4261 a virtual CPU fault */
4262 cpu_restore_state(tb, env, pc, NULL);
4263 }
4264 }
4265 if (retaddr)
4266 raise_exception_err(env->exception_index, env->error_code);
4267 else
4268 raise_exception_err_norestore(env->exception_index, env->error_code);
4269 }
4270 env = saved_env;
4271}
4272
4273#ifdef VBOX
4274
4275/**
4276 * Correctly computes the eflags.
4277 * @returns eflags.
4278 * @param env1 CPU environment.
4279 */
4280uint32_t raw_compute_eflags(CPUX86State *env1)
4281{
4282 CPUX86State *savedenv = env;
4283 env = env1;
4284 uint32_t efl = compute_eflags();
4285 env = savedenv;
4286 return efl;
4287}
4288
4289/**
4290 * Reads byte from virtual address in guest memory area.
4291 * XXX: is it working for any addresses? swapped out pages?
4292 * @returns readed data byte.
4293 * @param env1 CPU environment.
4294 * @param pvAddr GC Virtual address.
4295 */
4296uint8_t read_byte(CPUX86State *env1, target_ulong addr)
4297{
4298 CPUX86State *savedenv = env;
4299 env = env1;
4300 uint8_t u8 = ldub_kernel(addr);
4301 env = savedenv;
4302 return u8;
4303}
4304
4305/**
4306 * Reads byte from virtual address in guest memory area.
4307 * XXX: is it working for any addresses? swapped out pages?
4308 * @returns readed data byte.
4309 * @param env1 CPU environment.
4310 * @param pvAddr GC Virtual address.
4311 */
4312uint16_t read_word(CPUX86State *env1, target_ulong addr)
4313{
4314 CPUX86State *savedenv = env;
4315 env = env1;
4316 uint16_t u16 = lduw_kernel(addr);
4317 env = savedenv;
4318 return u16;
4319}
4320
4321/**
4322 * Reads byte from virtual address in guest memory area.
4323 * XXX: is it working for any addresses? swapped out pages?
4324 * @returns readed data byte.
4325 * @param env1 CPU environment.
4326 * @param pvAddr GC Virtual address.
4327 */
4328uint32_t read_dword(CPUX86State *env1, target_ulong addr)
4329{
4330 CPUX86State *savedenv = env;
4331 env = env1;
4332 uint32_t u32 = ldl_kernel(addr);
4333 env = savedenv;
4334 return u32;
4335}
4336
4337/**
4338 * Writes byte to virtual address in guest memory area.
4339 * XXX: is it working for any addresses? swapped out pages?
4340 * @returns readed data byte.
4341 * @param env1 CPU environment.
4342 * @param pvAddr GC Virtual address.
4343 * @param val byte value
4344 */
4345void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
4346{
4347 CPUX86State *savedenv = env;
4348 env = env1;
4349 stb(addr, val);
4350 env = savedenv;
4351}
4352
4353void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
4354{
4355 CPUX86State *savedenv = env;
4356 env = env1;
4357 stw(addr, val);
4358 env = savedenv;
4359}
4360
4361void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
4362{
4363 CPUX86State *savedenv = env;
4364 env = env1;
4365 stl(addr, val);
4366 env = savedenv;
4367}
4368
4369/**
4370 * Correctly loads selector into segment register with updating internal
4371 * qemu data/caches.
4372 * @param env1 CPU environment.
4373 * @param seg_reg Segment register.
4374 * @param selector Selector to load.
4375 */
4376void sync_seg(CPUX86State *env1, int seg_reg, int selector)
4377{
4378 CPUX86State *savedenv = env;
4379 env = env1;
4380
4381 if ( env->eflags & X86_EFL_VM
4382 || !(env->cr[0] & X86_CR0_PE))
4383 {
4384 load_seg_vm(seg_reg, selector);
4385
4386 env = savedenv;
4387
4388 /* Successful sync. */
4389 env1->segs[seg_reg].newselector = 0;
4390 }
4391 else
4392 {
4393 if (setjmp(env1->jmp_env) == 0)
4394 {
4395 if (seg_reg == R_CS)
4396 {
4397 uint32_t e1, e2;
4398 load_segment(&e1, &e2, selector);
4399 cpu_x86_load_seg_cache(env, R_CS, selector,
4400 get_seg_base(e1, e2),
4401 get_seg_limit(e1, e2),
4402 e2);
4403 }
4404 else
4405 load_seg(seg_reg, selector);
4406 env = savedenv;
4407
4408 /* Successful sync. */
4409 env1->segs[seg_reg].newselector = 0;
4410 }
4411 else
4412 {
4413 env = savedenv;
4414
4415 /* Postpone sync until the guest uses the selector. */
4416 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
4417 env1->segs[seg_reg].newselector = selector;
4418 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
4419 }
4420 }
4421
4422}
4423
4424
4425/**
4426 * Correctly loads a new ldtr selector.
4427 *
4428 * @param env1 CPU environment.
4429 * @param selector Selector to load.
4430 */
4431void sync_ldtr(CPUX86State *env1, int selector)
4432{
4433 CPUX86State *saved_env = env;
4434 target_ulong saved_T0 = T0;
4435 if (setjmp(env1->jmp_env) == 0)
4436 {
4437 env = env1;
4438 T0 = selector;
4439 helper_lldt_T0();
4440 T0 = saved_T0;
4441 env = saved_env;
4442 }
4443 else
4444 {
4445 T0 = saved_T0;
4446 env = saved_env;
4447#ifdef VBOX_STRICT
4448 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
4449#endif
4450 }
4451}
4452
4453int emulate_single_instr(CPUX86State *env1)
4454{
4455#if 1 /* single stepping is broken when using a static tb... feel free to figure out why. :-) */
4456 /* This has to be static because it needs to be addressible
4457 using 32-bit immediate addresses on 64-bit machines. This
4458 is dictated by the gcc code model used when building this
4459 module / op.o. Using a static here pushes the problem
4460 onto the module loader. */
4461 static TranslationBlock tb_temp;
4462#endif
4463 TranslationBlock *tb;
4464 TranslationBlock *current;
4465 int csize;
4466 void (*gen_func)(void);
4467 uint8_t *tc_ptr;
4468 target_ulong old_eip;
4469
4470 /* ensures env is loaded in ebp! */
4471 CPUX86State *savedenv = env;
4472 env = env1;
4473
4474 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
4475
4476#if 1 /* see above */
4477 tc_ptr = env->pvCodeBuffer;
4478#else
4479 tc_ptr = code_gen_ptr;
4480#endif
4481
4482 /*
4483 * Setup temporary translation block.
4484 */
4485 /* tb_alloc: */
4486#if 1 /* see above */
4487 tb = &tb_temp;
4488 tb->pc = env->segs[R_CS].base + env->eip;
4489 tb->cflags = 0;
4490#else
4491 tb = tb_alloc(env->segs[R_CS].base + env->eip);
4492 if (!tb)
4493 {
4494 tb_flush(env);
4495 tb = tb_alloc(env->segs[R_CS].base + env->eip);
4496 }
4497#endif
4498
4499 /* tb_find_slow: */
4500 tb->tc_ptr = tc_ptr;
4501 tb->cs_base = env->segs[R_CS].base;
4502 tb->flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
4503
4504 /* Initialize the rest with sensible values. */
4505 tb->size = 0;
4506 tb->phys_hash_next = NULL;
4507 tb->page_next[0] = NULL;
4508 tb->page_next[1] = NULL;
4509 tb->page_addr[0] = 0;
4510 tb->page_addr[1] = 0;
4511 tb->tb_next_offset[0] = 0xffff;
4512 tb->tb_next_offset[1] = 0xffff;
4513 tb->tb_next[0] = 0xffff;
4514 tb->tb_next[1] = 0xffff;
4515 tb->jmp_next[0] = NULL;
4516 tb->jmp_next[1] = NULL;
4517 tb->jmp_first = NULL;
4518
4519 current = env->current_tb;
4520 env->current_tb = NULL;
4521
4522 /*
4523 * Translate only one instruction.
4524 */
4525 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
4526 if (cpu_gen_code(env, tb, env->cbCodeBuffer, &csize) < 0)
4527 {
4528 AssertFailed();
4529 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4530 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4531 env = savedenv;
4532 return -1;
4533 }
4534#ifdef DEBUG
4535 if(csize > env->cbCodeBuffer)
4536 {
4537 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4538 AssertFailed();
4539 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4540 env = savedenv;
4541 return -1;
4542 }
4543 if (tb->tc_ptr != tc_ptr)
4544 {
4545 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4546 AssertFailed();
4547 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4548 env = savedenv;
4549 return -1;
4550 }
4551#endif
4552 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4553
4554 /* tb_link_phys: */
4555 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
4556 Assert(tb->jmp_next[0] == NULL); Assert(tb->jmp_next[1] == NULL);
4557 if (tb->tb_next_offset[0] != 0xffff)
4558 tb_set_jmp_target(tb, 0, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[0]));
4559 if (tb->tb_next_offset[1] != 0xffff)
4560 tb_set_jmp_target(tb, 1, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[1]));
4561
4562 /*
4563 * Execute it using emulation
4564 */
4565 old_eip = env->eip;
4566 gen_func = (void *)tb->tc_ptr;
4567 env->current_tb = tb;
4568
4569 // eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
4570 // perhaps not a very safe hack
4571 while(old_eip == env->eip)
4572 {
4573 gen_func();
4574 /*
4575 * Exit once we detect an external interrupt and interrupts are enabled
4576 */
4577 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
4578 ( (env->eflags & IF_MASK) &&
4579 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
4580 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
4581 {
4582 break;
4583 }
4584 }
4585 env->current_tb = current;
4586
4587 Assert(tb->phys_hash_next == NULL);
4588 Assert(tb->page_next[0] == NULL);
4589 Assert(tb->page_next[1] == NULL);
4590 Assert(tb->page_addr[0] == 0);
4591 Assert(tb->page_addr[1] == 0);
4592/*
4593 Assert(tb->tb_next_offset[0] == 0xffff);
4594 Assert(tb->tb_next_offset[1] == 0xffff);
4595 Assert(tb->tb_next[0] == 0xffff);
4596 Assert(tb->tb_next[1] == 0xffff);
4597 Assert(tb->jmp_next[0] == NULL);
4598 Assert(tb->jmp_next[1] == NULL);
4599 Assert(tb->jmp_first == NULL); */
4600
4601 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4602
4603 /*
4604 * Execute the next instruction when we encounter instruction fusing.
4605 */
4606 if (env->hflags & HF_INHIBIT_IRQ_MASK)
4607 {
4608 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
4609 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4610 emulate_single_instr(env);
4611 }
4612
4613 env = savedenv;
4614 return 0;
4615}
4616
4617int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
4618 uint32_t *esp_ptr, int dpl)
4619{
4620 int type, index, shift;
4621
4622 CPUX86State *savedenv = env;
4623 env = env1;
4624
4625 if (!(env->tr.flags & DESC_P_MASK))
4626 cpu_abort(env, "invalid tss");
4627 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
4628 if ((type & 7) != 1)
4629 cpu_abort(env, "invalid tss type %d", type);
4630 shift = type >> 3;
4631 index = (dpl * 4 + 2) << shift;
4632 if (index + (4 << shift) - 1 > env->tr.limit)
4633 {
4634 env = savedenv;
4635 return 0;
4636 }
4637 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
4638
4639 if (shift == 0) {
4640 *esp_ptr = lduw_kernel(env->tr.base + index);
4641 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
4642 } else {
4643 *esp_ptr = ldl_kernel(env->tr.base + index);
4644 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
4645 }
4646
4647 env = savedenv;
4648 return 1;
4649}
4650
4651//*****************************************************************************
4652// Needs to be at the bottom of the file (overriding macros)
4653
4654static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
4655{
4656 return *(CPU86_LDouble *)ptr;
4657}
4658
4659static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
4660{
4661 *(CPU86_LDouble *)ptr = f;
4662}
4663
4664#undef stw
4665#undef stl
4666#undef stq
4667#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
4668#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
4669#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
4670#define data64 0
4671
4672//*****************************************************************************
4673void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4674{
4675 int fpus, fptag, i, nb_xmm_regs;
4676 CPU86_LDouble tmp;
4677 uint8_t *addr;
4678
4679 if (env->cpuid_features & CPUID_FXSR)
4680 {
4681 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4682 fptag = 0;
4683 for(i = 0; i < 8; i++) {
4684 fptag |= (env->fptags[i] << i);
4685 }
4686 stw(ptr, env->fpuc);
4687 stw(ptr + 2, fpus);
4688 stw(ptr + 4, fptag ^ 0xff);
4689
4690 addr = ptr + 0x20;
4691 for(i = 0;i < 8; i++) {
4692 tmp = ST(i);
4693 helper_fstt_raw(tmp, addr);
4694 addr += 16;
4695 }
4696
4697 if (env->cr[4] & CR4_OSFXSR_MASK) {
4698 /* XXX: finish it */
4699 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4700 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4701 nb_xmm_regs = 8 << data64;
4702 addr = ptr + 0xa0;
4703 for(i = 0; i < nb_xmm_regs; i++) {
4704#if __GNUC__ < 4
4705 stq(addr, env->xmm_regs[i].XMM_Q(0));
4706 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4707#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
4708 stl(addr, env->xmm_regs[i].XMM_L(0));
4709 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
4710 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
4711 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
4712#endif
4713 addr += 16;
4714 }
4715 }
4716 }
4717 else
4718 {
4719 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4720 int fptag;
4721
4722 fp->FCW = env->fpuc;
4723 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4724 fptag = 0;
4725 for (i=7; i>=0; i--) {
4726 fptag <<= 2;
4727 if (env->fptags[i]) {
4728 fptag |= 3;
4729 } else {
4730 /* the FPU automatically computes it */
4731 }
4732 }
4733 fp->FTW = fptag;
4734
4735 for(i = 0;i < 8; i++) {
4736 tmp = ST(i);
4737 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
4738 }
4739 }
4740}
4741
4742//*****************************************************************************
4743#undef lduw
4744#undef ldl
4745#undef ldq
4746#define lduw(a) *(uint16_t *)(a)
4747#define ldl(a) *(uint32_t *)(a)
4748#define ldq(a) *(uint64_t *)(a)
4749//*****************************************************************************
4750void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4751{
4752 int i, fpus, fptag, nb_xmm_regs;
4753 CPU86_LDouble tmp;
4754 uint8_t *addr;
4755
4756 if (env->cpuid_features & CPUID_FXSR)
4757 {
4758 env->fpuc = lduw(ptr);
4759 fpus = lduw(ptr + 2);
4760 fptag = lduw(ptr + 4);
4761 env->fpstt = (fpus >> 11) & 7;
4762 env->fpus = fpus & ~0x3800;
4763 fptag ^= 0xff;
4764 for(i = 0;i < 8; i++) {
4765 env->fptags[i] = ((fptag >> i) & 1);
4766 }
4767
4768 addr = ptr + 0x20;
4769 for(i = 0;i < 8; i++) {
4770 tmp = helper_fldt_raw(addr);
4771 ST(i) = tmp;
4772 addr += 16;
4773 }
4774
4775 if (env->cr[4] & CR4_OSFXSR_MASK) {
4776 /* XXX: finish it, endianness */
4777 env->mxcsr = ldl(ptr + 0x18);
4778 //ldl(ptr + 0x1c);
4779 nb_xmm_regs = 8 << data64;
4780 addr = ptr + 0xa0;
4781 for(i = 0; i < nb_xmm_regs; i++) {
4782#if HC_ARCH_BITS == 32
4783 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
4784 env->xmm_regs[i].XMM_L(0) = ldl(addr);
4785 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
4786 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
4787 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
4788#else
4789 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4790 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4791#endif
4792 addr += 16;
4793 }
4794 }
4795 }
4796 else
4797 {
4798 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4799 int fptag, j;
4800
4801 env->fpuc = fp->FCW;
4802 env->fpstt = (fp->FSW >> 11) & 7;
4803 env->fpus = fp->FSW & ~0x3800;
4804 fptag = fp->FTW;
4805 for(i = 0;i < 8; i++) {
4806 env->fptags[i] = ((fptag & 3) == 3);
4807 fptag >>= 2;
4808 }
4809 j = env->fpstt;
4810 for(i = 0;i < 8; i++) {
4811 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
4812 ST(i) = tmp;
4813 }
4814 }
4815}
4816//*****************************************************************************
4817//*****************************************************************************
4818
4819#endif /* VBOX */
4820
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette