VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 19365

Last change on this file since 19365 was 19365, checked in by vboxsync, 16 years ago

recompiler: gcc-4.4 fix, see internal ticket 3883

  • Property svn:eol-style set to native
File size: 60.4 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#define CPU_NO_GLOBAL_REGS
31#include "exec.h"
32#include "disas.h"
33#include "tcg.h"
34
35#if !defined(CONFIG_SOFTMMU)
36#undef EAX
37#undef ECX
38#undef EDX
39#undef EBX
40#undef ESP
41#undef EBP
42#undef ESI
43#undef EDI
44#undef EIP
45#include <signal.h>
46#include <sys/ucontext.h>
47#endif
48
49#if defined(__sparc__) && !defined(HOST_SOLARIS)
50// Work around ugly bugs in glibc that mangle global register contents
51#undef env
52#define env cpu_single_env
53#endif
54
55int tb_invalidated_flag;
56
57//#define DEBUG_EXEC
58//#define DEBUG_SIGNAL
59
60
61void cpu_loop_exit(void)
62{
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
65 regs_to_env();
66 longjmp(env->jmp_env, 1);
67}
68
69#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
70#define reg_T2
71#endif
72
73/* exit the current TB from a signal handler. The host registers are
74 restored in a state compatible with the CPU emulator
75 */
76void cpu_resume_from_signal(CPUState *env1, void *puc)
77{
78#if !defined(CONFIG_SOFTMMU)
79 struct ucontext *uc = puc;
80#endif
81
82 env = env1;
83
84 /* XXX: restore cpu registers saved in host registers */
85
86#if !defined(CONFIG_SOFTMMU)
87 if (puc) {
88 /* XXX: use siglongjmp ? */
89 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
90 }
91#endif
92 longjmp(env->jmp_env, 1);
93}
94
95/* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
97static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
98{
99 unsigned long next_tb;
100 TranslationBlock *tb;
101
102 /* Should never happen.
103 We only end up here when an existing TB is too long. */
104 if (max_cycles > CF_COUNT_MASK)
105 max_cycles = CF_COUNT_MASK;
106
107 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
108 max_cycles);
109 env->current_tb = tb;
110 /* execute the generated code */
111#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
112 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
113#else
114 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
115#endif
116
117 if ((next_tb & 3) == 2) {
118 /* Restore PC. This may happen if async event occurs before
119 the TB starts executing. */
120 CPU_PC_FROM_TB(env, tb);
121 }
122 tb_phys_invalidate(tb, -1);
123 tb_free(tb);
124}
125
126static TranslationBlock *tb_find_slow(target_ulong pc,
127 target_ulong cs_base,
128 uint64_t flags)
129{
130 TranslationBlock *tb, **ptb1;
131 unsigned int h;
132 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
133
134 tb_invalidated_flag = 0;
135
136 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
137
138 /* find translated block using physical mappings */
139 phys_pc = get_phys_addr_code(env, pc);
140 phys_page1 = phys_pc & TARGET_PAGE_MASK;
141 phys_page2 = -1;
142 h = tb_phys_hash_func(phys_pc);
143 ptb1 = &tb_phys_hash[h];
144 for(;;) {
145 tb = *ptb1;
146 if (!tb)
147 goto not_found;
148 if (tb->pc == pc &&
149 tb->page_addr[0] == phys_page1 &&
150 tb->cs_base == cs_base &&
151 tb->flags == flags) {
152 /* check next page if needed */
153 if (tb->page_addr[1] != -1) {
154 virt_page2 = (pc & TARGET_PAGE_MASK) +
155 TARGET_PAGE_SIZE;
156 phys_page2 = get_phys_addr_code(env, virt_page2);
157 if (tb->page_addr[1] == phys_page2)
158 goto found;
159 } else {
160 goto found;
161 }
162 }
163 ptb1 = &tb->phys_hash_next;
164 }
165 not_found:
166 /* if no translated code available, then translate it now */
167 tb = tb_gen_code(env, pc, cs_base, flags, 0);
168
169 found:
170 /* we add the TB in the virtual pc hash table */
171 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
172 return tb;
173}
174
175#ifndef VBOX
176static inline TranslationBlock *tb_find_fast(void)
177#else
178DECLINLINE(TranslationBlock *) tb_find_fast(void)
179#endif
180{
181 TranslationBlock *tb;
182 target_ulong cs_base, pc;
183 uint64_t flags;
184
185 /* we record a subset of the CPU state. It will
186 always be the same before a given translated block
187 is executed. */
188#if defined(TARGET_I386)
189 flags = env->hflags;
190 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
191 cs_base = env->segs[R_CS].base;
192 pc = cs_base + env->eip;
193#elif defined(TARGET_ARM)
194 flags = env->thumb | (env->vfp.vec_len << 1)
195 | (env->vfp.vec_stride << 4);
196 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
197 flags |= (1 << 6);
198 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
199 flags |= (1 << 7);
200 flags |= (env->condexec_bits << 8);
201 cs_base = 0;
202 pc = env->regs[15];
203#elif defined(TARGET_SPARC)
204#ifdef TARGET_SPARC64
205 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
206 flags = ((env->pstate & PS_AM) << 2)
207 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
208 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
209#else
210 // FPU enable . Supervisor
211 flags = (env->psref << 4) | env->psrs;
212#endif
213 cs_base = env->npc;
214 pc = env->pc;
215#elif defined(TARGET_PPC)
216 flags = env->hflags;
217 cs_base = 0;
218 pc = env->nip;
219#elif defined(TARGET_MIPS)
220 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
221 cs_base = 0;
222 pc = env->active_tc.PC;
223#elif defined(TARGET_M68K)
224 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
225 | (env->sr & SR_S) /* Bit 13 */
226 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
227 cs_base = 0;
228 pc = env->pc;
229#elif defined(TARGET_SH4)
230 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
231 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
232 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
233 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
234 cs_base = 0;
235 pc = env->pc;
236#elif defined(TARGET_ALPHA)
237 flags = env->ps;
238 cs_base = 0;
239 pc = env->pc;
240#elif defined(TARGET_CRIS)
241 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
242 flags |= env->dslot;
243 cs_base = 0;
244 pc = env->pc;
245#else
246#error unsupported CPU
247#endif
248 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
249 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
250 tb->flags != flags)) {
251 tb = tb_find_slow(pc, cs_base, flags);
252 }
253 return tb;
254}
255
256/* main execution loop */
257
258#ifdef VBOX
259
260int cpu_exec(CPUState *env1)
261{
262#define DECLARE_HOST_REGS 1
263#include "hostregs_helper.h"
264 int ret = 0, interrupt_request;
265 TranslationBlock *tb;
266 uint8_t *tc_ptr;
267 unsigned long next_tb;
268
269 cpu_single_env = env1;
270
271 /* first we save global registers */
272#define SAVE_HOST_REGS 1
273#include "hostregs_helper.h"
274 env = env1;
275
276 env_to_regs();
277#if defined(TARGET_I386)
278 /* put eflags in CPU temporary format */
279 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
280 DF = 1 - (2 * ((env->eflags >> 10) & 1));
281 CC_OP = CC_OP_EFLAGS;
282 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
283#elif defined(TARGET_SPARC)
284#elif defined(TARGET_M68K)
285 env->cc_op = CC_OP_FLAGS;
286 env->cc_dest = env->sr & 0xf;
287 env->cc_x = (env->sr >> 4) & 1;
288#elif defined(TARGET_ALPHA)
289#elif defined(TARGET_ARM)
290#elif defined(TARGET_PPC)
291#elif defined(TARGET_MIPS)
292#elif defined(TARGET_SH4)
293#elif defined(TARGET_CRIS)
294 /* XXXXX */
295#else
296#error unsupported target CPU
297#endif
298#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
299 env->exception_index = -1;
300#endif
301
302 /* prepare setjmp context for exception handling */
303 for(;;) {
304 if (setjmp(env->jmp_env) == 0)
305 {
306 env->current_tb = NULL;
307 VMMR3Unlock(env->pVM);
308 VMMR3Lock(env->pVM);
309
310 /*
311 * Check for fatal errors first
312 */
313 if (env->interrupt_request & CPU_INTERRUPT_RC) {
314 env->exception_index = EXCP_RC;
315 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
316 ret = env->exception_index;
317 cpu_loop_exit();
318 }
319
320 /* if an exception is pending, we execute it here */
321 if (env->exception_index >= 0) {
322 Assert(!env->user_mode_only);
323 if (env->exception_index >= EXCP_INTERRUPT) {
324 /* exit request from the cpu execution loop */
325 ret = env->exception_index;
326 break;
327 } else {
328 /* simulate a real cpu exception. On i386, it can
329 trigger new exceptions, but we do not handle
330 double or triple faults yet. */
331 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
332 Log(("do_interrupt %d %d %RGv\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
333 do_interrupt(env->exception_index,
334 env->exception_is_int,
335 env->error_code,
336 env->exception_next_eip, 0);
337 /* successfully delivered */
338 env->old_exception = -1;
339 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
340 }
341 env->exception_index = -1;
342 }
343
344 next_tb = 0; /* force lookup of first TB */
345 for(;;)
346 {
347 interrupt_request = env->interrupt_request;
348#ifndef VBOX
349 if (__builtin_expect(interrupt_request, 0))
350#else
351 if (RT_UNLIKELY(interrupt_request != 0))
352#endif
353 {
354 /** @todo: reconscille with what QEMU really does */
355
356 /* Single instruction exec request, we execute it and return (one way or the other).
357 The caller will always reschedule after doing this operation! */
358 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
359 {
360 /* not in flight are we? (if we are, we trapped) */
361 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
362 {
363 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
364 env->exception_index = EXCP_SINGLE_INSTR;
365 if (emulate_single_instr(env) == -1)
366 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", env->eip));
367
368 /* When we receive an external interrupt during execution of this single
369 instruction, then we should stay here. We will leave when we're ready
370 for raw-mode or when interrupted by pending EMT requests. */
371 interrupt_request = env->interrupt_request; /* reload this! */
372 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
373 || !(env->eflags & IF_MASK)
374 || (env->hflags & HF_INHIBIT_IRQ_MASK)
375 || (env->state & CPU_RAW_HWACC)
376 )
377 {
378 env->exception_index = ret = EXCP_SINGLE_INSTR;
379 cpu_loop_exit();
380 }
381 }
382 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
383 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
384 }
385
386 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
387 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
388 !(env->hflags & HF_SMM_MASK)) {
389 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
390 do_smm_enter();
391 next_tb = 0;
392 }
393 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
394 (env->eflags & IF_MASK) &&
395 !(env->hflags & HF_INHIBIT_IRQ_MASK))
396 {
397 /* if hardware interrupt pending, we execute it */
398 int intno;
399 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
400 intno = cpu_get_pic_interrupt(env);
401 if (intno >= 0)
402 {
403 Log(("do_interrupt %d\n", intno));
404 do_interrupt(intno, 0, 0, 0, 1);
405 }
406 /* ensure that no TB jump will be modified as
407 the program flow was changed */
408 next_tb = 0;
409 }
410 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
411 {
412 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
413 /* ensure that no TB jump will be modified as
414 the program flow was changed */
415 next_tb = 0;
416 }
417 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
418 if (interrupt_request & CPU_INTERRUPT_EXIT)
419 {
420 env->exception_index = EXCP_INTERRUPT;
421 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
422 ret = env->exception_index;
423 cpu_loop_exit();
424 }
425 if (interrupt_request & CPU_INTERRUPT_RC)
426 {
427 env->exception_index = EXCP_RC;
428 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
429 ret = env->exception_index;
430 cpu_loop_exit();
431 }
432 }
433
434 /*
435 * Check if we the CPU state allows us to execute the code in raw-mode.
436 */
437 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
438 if (remR3CanExecuteRaw(env,
439 env->eip + env->segs[R_CS].base,
440 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
441 &env->exception_index))
442 {
443 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
444 ret = env->exception_index;
445 cpu_loop_exit();
446 }
447 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
448
449 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
450 spin_lock(&tb_lock);
451 tb = tb_find_fast();
452 /* Note: we do it here to avoid a gcc bug on Mac OS X when
453 doing it in tb_find_slow */
454 if (tb_invalidated_flag) {
455 /* as some TB could have been invalidated because
456 of memory exceptions while generating the code, we
457 must recompute the hash index here */
458 next_tb = 0;
459 tb_invalidated_flag = 0;
460 }
461
462 /* see if we can patch the calling TB. When the TB
463 spans two pages, we cannot safely do a direct
464 jump. */
465 if (next_tb != 0
466 && !(tb->cflags & CF_RAW_MODE)
467 && tb->page_addr[1] == -1)
468 {
469 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
470 }
471 spin_unlock(&tb_lock);
472 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
473
474 env->current_tb = tb;
475 while (env->current_tb) {
476 tc_ptr = tb->tc_ptr;
477 /* execute the generated code */
478 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
479#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
480 tcg_qemu_tb_exec(tc_ptr, next_tb);
481#else
482 next_tb = tcg_qemu_tb_exec(tc_ptr);
483#endif
484 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
485 env->current_tb = NULL;
486 if ((next_tb & 3) == 2) {
487 /* Instruction counter expired. */
488 int insns_left;
489 tb = (TranslationBlock *)(long)(next_tb & ~3);
490 /* Restore PC. */
491 CPU_PC_FROM_TB(env, tb);
492 insns_left = env->icount_decr.u32;
493 if (env->icount_extra && insns_left >= 0) {
494 /* Refill decrementer and continue execution. */
495 env->icount_extra += insns_left;
496 if (env->icount_extra > 0xffff) {
497 insns_left = 0xffff;
498 } else {
499 insns_left = env->icount_extra;
500 }
501 env->icount_extra -= insns_left;
502 env->icount_decr.u16.low = insns_left;
503 } else {
504 if (insns_left > 0) {
505 /* Execute remaining instructions. */
506 cpu_exec_nocache(insns_left, tb);
507 }
508 env->exception_index = EXCP_INTERRUPT;
509 next_tb = 0;
510 cpu_loop_exit();
511 }
512 }
513 }
514
515 /* reset soft MMU for next block (it can currently
516 only be set by a memory fault) */
517#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
518 if (env->hflags & HF_SOFTMMU_MASK) {
519 env->hflags &= ~HF_SOFTMMU_MASK;
520 /* do not allow linking to another block */
521 next_tb = 0;
522 }
523#endif
524 } /* for(;;) */
525 } else {
526 env_to_regs();
527 }
528#ifdef VBOX_HIGH_RES_TIMERS_HACK
529 /* NULL the current_tb here so cpu_interrupt() doesn't do
530 anything unnecessary (like crashing during emulate single instruction). */
531 env->current_tb = NULL;
532 /* don't use env1->pVM here, the code wouldn't run with gcc-4.4/amd64
533 * anymore, see #3883 */
534 TMTimerPoll(env->pVM);
535#endif
536 } /* for(;;) */
537
538#if defined(TARGET_I386)
539 /* restore flags in standard format */
540 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
541#else
542#error unsupported target CPU
543#endif
544#include "hostregs_helper.h"
545 return ret;
546}
547
548#else /* !VBOX */
549int cpu_exec(CPUState *env1)
550{
551#define DECLARE_HOST_REGS 1
552#include "hostregs_helper.h"
553 int ret, interrupt_request;
554 TranslationBlock *tb;
555 uint8_t *tc_ptr;
556 unsigned long next_tb;
557
558 if (cpu_halted(env1) == EXCP_HALTED)
559 return EXCP_HALTED;
560
561 cpu_single_env = env1;
562
563 /* first we save global registers */
564#define SAVE_HOST_REGS 1
565#include "hostregs_helper.h"
566 env = env1;
567
568 env_to_regs();
569#if defined(TARGET_I386)
570 /* put eflags in CPU temporary format */
571 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
572 DF = 1 - (2 * ((env->eflags >> 10) & 1));
573 CC_OP = CC_OP_EFLAGS;
574 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
575#elif defined(TARGET_SPARC)
576#elif defined(TARGET_M68K)
577 env->cc_op = CC_OP_FLAGS;
578 env->cc_dest = env->sr & 0xf;
579 env->cc_x = (env->sr >> 4) & 1;
580#elif defined(TARGET_ALPHA)
581#elif defined(TARGET_ARM)
582#elif defined(TARGET_PPC)
583#elif defined(TARGET_MIPS)
584#elif defined(TARGET_SH4)
585#elif defined(TARGET_CRIS)
586 /* XXXXX */
587#else
588#error unsupported target CPU
589#endif
590 env->exception_index = -1;
591
592 /* prepare setjmp context for exception handling */
593 for(;;) {
594 if (setjmp(env->jmp_env) == 0) {
595 env->current_tb = NULL;
596 /* if an exception is pending, we execute it here */
597 if (env->exception_index >= 0) {
598 if (env->exception_index >= EXCP_INTERRUPT) {
599 /* exit request from the cpu execution loop */
600 ret = env->exception_index;
601 break;
602 } else if (env->user_mode_only) {
603 /* if user mode only, we simulate a fake exception
604 which will be handled outside the cpu execution
605 loop */
606#if defined(TARGET_I386)
607 do_interrupt_user(env->exception_index,
608 env->exception_is_int,
609 env->error_code,
610 env->exception_next_eip);
611 /* successfully delivered */
612 env->old_exception = -1;
613#endif
614 ret = env->exception_index;
615 break;
616 } else {
617#if defined(TARGET_I386)
618 /* simulate a real cpu exception. On i386, it can
619 trigger new exceptions, but we do not handle
620 double or triple faults yet. */
621 do_interrupt(env->exception_index,
622 env->exception_is_int,
623 env->error_code,
624 env->exception_next_eip, 0);
625 /* successfully delivered */
626 env->old_exception = -1;
627#elif defined(TARGET_PPC)
628 do_interrupt(env);
629#elif defined(TARGET_MIPS)
630 do_interrupt(env);
631#elif defined(TARGET_SPARC)
632 do_interrupt(env);
633#elif defined(TARGET_ARM)
634 do_interrupt(env);
635#elif defined(TARGET_SH4)
636 do_interrupt(env);
637#elif defined(TARGET_ALPHA)
638 do_interrupt(env);
639#elif defined(TARGET_CRIS)
640 do_interrupt(env);
641#elif defined(TARGET_M68K)
642 do_interrupt(0);
643#endif
644 }
645 env->exception_index = -1;
646 }
647#ifdef USE_KQEMU
648 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
649 int ret;
650 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
651 ret = kqemu_cpu_exec(env);
652 /* put eflags in CPU temporary format */
653 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
654 DF = 1 - (2 * ((env->eflags >> 10) & 1));
655 CC_OP = CC_OP_EFLAGS;
656 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
657 if (ret == 1) {
658 /* exception */
659 longjmp(env->jmp_env, 1);
660 } else if (ret == 2) {
661 /* softmmu execution needed */
662 } else {
663 if (env->interrupt_request != 0) {
664 /* hardware interrupt will be executed just after */
665 } else {
666 /* otherwise, we restart */
667 longjmp(env->jmp_env, 1);
668 }
669 }
670 }
671#endif
672
673 next_tb = 0; /* force lookup of first TB */
674 for(;;) {
675 interrupt_request = env->interrupt_request;
676 if (unlikely(interrupt_request) &&
677 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
678 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
679 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
680 env->exception_index = EXCP_DEBUG;
681 cpu_loop_exit();
682 }
683#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
684 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
685 if (interrupt_request & CPU_INTERRUPT_HALT) {
686 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
687 env->halted = 1;
688 env->exception_index = EXCP_HLT;
689 cpu_loop_exit();
690 }
691#endif
692#if defined(TARGET_I386)
693 if (env->hflags2 & HF2_GIF_MASK) {
694 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
695 !(env->hflags & HF_SMM_MASK)) {
696 svm_check_intercept(SVM_EXIT_SMI);
697 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
698 do_smm_enter();
699 next_tb = 0;
700 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
701 !(env->hflags2 & HF2_NMI_MASK)) {
702 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
703 env->hflags2 |= HF2_NMI_MASK;
704 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
705 next_tb = 0;
706 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
707 (((env->hflags2 & HF2_VINTR_MASK) &&
708 (env->hflags2 & HF2_HIF_MASK)) ||
709 (!(env->hflags2 & HF2_VINTR_MASK) &&
710 (env->eflags & IF_MASK &&
711 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
712 int intno;
713 svm_check_intercept(SVM_EXIT_INTR);
714 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
715 intno = cpu_get_pic_interrupt(env);
716 if (loglevel & CPU_LOG_TB_IN_ASM) {
717 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
718 }
719 do_interrupt(intno, 0, 0, 0, 1);
720 /* ensure that no TB jump will be modified as
721 the program flow was changed */
722 next_tb = 0;
723#if !defined(CONFIG_USER_ONLY)
724 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
725 (env->eflags & IF_MASK) &&
726 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
727 int intno;
728 /* FIXME: this should respect TPR */
729 svm_check_intercept(SVM_EXIT_VINTR);
730 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
731 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
732 if (loglevel & CPU_LOG_TB_IN_ASM)
733 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
734 do_interrupt(intno, 0, 0, 0, 1);
735 next_tb = 0;
736#endif
737 }
738 }
739#elif defined(TARGET_PPC)
740#if 0
741 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
742 cpu_ppc_reset(env);
743 }
744#endif
745 if (interrupt_request & CPU_INTERRUPT_HARD) {
746 ppc_hw_interrupt(env);
747 if (env->pending_interrupts == 0)
748 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
749 next_tb = 0;
750 }
751#elif defined(TARGET_MIPS)
752 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
753 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
754 (env->CP0_Status & (1 << CP0St_IE)) &&
755 !(env->CP0_Status & (1 << CP0St_EXL)) &&
756 !(env->CP0_Status & (1 << CP0St_ERL)) &&
757 !(env->hflags & MIPS_HFLAG_DM)) {
758 /* Raise it */
759 env->exception_index = EXCP_EXT_INTERRUPT;
760 env->error_code = 0;
761 do_interrupt(env);
762 next_tb = 0;
763 }
764#elif defined(TARGET_SPARC)
765 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
766 (env->psret != 0)) {
767 int pil = env->interrupt_index & 15;
768 int type = env->interrupt_index & 0xf0;
769
770 if (((type == TT_EXTINT) &&
771 (pil == 15 || pil > env->psrpil)) ||
772 type != TT_EXTINT) {
773 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
774 env->exception_index = env->interrupt_index;
775 do_interrupt(env);
776 env->interrupt_index = 0;
777#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
778 cpu_check_irqs(env);
779#endif
780 next_tb = 0;
781 }
782 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
783 //do_interrupt(0, 0, 0, 0, 0);
784 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
785 }
786#elif defined(TARGET_ARM)
787 if (interrupt_request & CPU_INTERRUPT_FIQ
788 && !(env->uncached_cpsr & CPSR_F)) {
789 env->exception_index = EXCP_FIQ;
790 do_interrupt(env);
791 next_tb = 0;
792 }
793 /* ARMv7-M interrupt return works by loading a magic value
794 into the PC. On real hardware the load causes the
795 return to occur. The qemu implementation performs the
796 jump normally, then does the exception return when the
797 CPU tries to execute code at the magic address.
798 This will cause the magic PC value to be pushed to
799 the stack if an interrupt occured at the wrong time.
800 We avoid this by disabling interrupts when
801 pc contains a magic address. */
802 if (interrupt_request & CPU_INTERRUPT_HARD
803 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
804 || !(env->uncached_cpsr & CPSR_I))) {
805 env->exception_index = EXCP_IRQ;
806 do_interrupt(env);
807 next_tb = 0;
808 }
809#elif defined(TARGET_SH4)
810 if (interrupt_request & CPU_INTERRUPT_HARD) {
811 do_interrupt(env);
812 next_tb = 0;
813 }
814#elif defined(TARGET_ALPHA)
815 if (interrupt_request & CPU_INTERRUPT_HARD) {
816 do_interrupt(env);
817 next_tb = 0;
818 }
819#elif defined(TARGET_CRIS)
820 if (interrupt_request & CPU_INTERRUPT_HARD
821 && (env->pregs[PR_CCS] & I_FLAG)) {
822 env->exception_index = EXCP_IRQ;
823 do_interrupt(env);
824 next_tb = 0;
825 }
826 if (interrupt_request & CPU_INTERRUPT_NMI
827 && (env->pregs[PR_CCS] & M_FLAG)) {
828 env->exception_index = EXCP_NMI;
829 do_interrupt(env);
830 next_tb = 0;
831 }
832#elif defined(TARGET_M68K)
833 if (interrupt_request & CPU_INTERRUPT_HARD
834 && ((env->sr & SR_I) >> SR_I_SHIFT)
835 < env->pending_level) {
836 /* Real hardware gets the interrupt vector via an
837 IACK cycle at this point. Current emulated
838 hardware doesn't rely on this, so we
839 provide/save the vector when the interrupt is
840 first signalled. */
841 env->exception_index = env->pending_vector;
842 do_interrupt(1);
843 next_tb = 0;
844 }
845#endif
846 /* Don't use the cached interupt_request value,
847 do_interrupt may have updated the EXITTB flag. */
848 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
849 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
850 /* ensure that no TB jump will be modified as
851 the program flow was changed */
852 next_tb = 0;
853 }
854 if (interrupt_request & CPU_INTERRUPT_EXIT) {
855 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
856 env->exception_index = EXCP_INTERRUPT;
857 cpu_loop_exit();
858 }
859 }
860#ifdef DEBUG_EXEC
861 if ((loglevel & CPU_LOG_TB_CPU)) {
862 /* restore flags in standard format */
863 regs_to_env();
864#if defined(TARGET_I386)
865 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
866 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
867 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
868#elif defined(TARGET_ARM)
869 cpu_dump_state(env, logfile, fprintf, 0);
870#elif defined(TARGET_SPARC)
871 cpu_dump_state(env, logfile, fprintf, 0);
872#elif defined(TARGET_PPC)
873 cpu_dump_state(env, logfile, fprintf, 0);
874#elif defined(TARGET_M68K)
875 cpu_m68k_flush_flags(env, env->cc_op);
876 env->cc_op = CC_OP_FLAGS;
877 env->sr = (env->sr & 0xffe0)
878 | env->cc_dest | (env->cc_x << 4);
879 cpu_dump_state(env, logfile, fprintf, 0);
880#elif defined(TARGET_MIPS)
881 cpu_dump_state(env, logfile, fprintf, 0);
882#elif defined(TARGET_SH4)
883 cpu_dump_state(env, logfile, fprintf, 0);
884#elif defined(TARGET_ALPHA)
885 cpu_dump_state(env, logfile, fprintf, 0);
886#elif defined(TARGET_CRIS)
887 cpu_dump_state(env, logfile, fprintf, 0);
888#else
889#error unsupported target CPU
890#endif
891 }
892#endif
893 spin_lock(&tb_lock);
894 tb = tb_find_fast();
895 /* Note: we do it here to avoid a gcc bug on Mac OS X when
896 doing it in tb_find_slow */
897 if (tb_invalidated_flag) {
898 /* as some TB could have been invalidated because
899 of memory exceptions while generating the code, we
900 must recompute the hash index here */
901 next_tb = 0;
902 tb_invalidated_flag = 0;
903 }
904#ifdef DEBUG_EXEC
905 if ((loglevel & CPU_LOG_EXEC)) {
906 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
907 (long)tb->tc_ptr, tb->pc,
908 lookup_symbol(tb->pc));
909 }
910#endif
911 /* see if we can patch the calling TB. When the TB
912 spans two pages, we cannot safely do a direct
913 jump. */
914 {
915 if (next_tb != 0 &&
916#ifdef USE_KQEMU
917 (env->kqemu_enabled != 2) &&
918#endif
919 tb->page_addr[1] == -1) {
920 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
921 }
922 }
923 spin_unlock(&tb_lock);
924 env->current_tb = tb;
925 while (env->current_tb) {
926 tc_ptr = tb->tc_ptr;
927 /* execute the generated code */
928#if defined(__sparc__) && !defined(HOST_SOLARIS)
929#undef env
930 env = cpu_single_env;
931#define env cpu_single_env
932#endif
933 next_tb = tcg_qemu_tb_exec(tc_ptr);
934 env->current_tb = NULL;
935 if ((next_tb & 3) == 2) {
936 /* Instruction counter expired. */
937 int insns_left;
938 tb = (TranslationBlock *)(long)(next_tb & ~3);
939 /* Restore PC. */
940 CPU_PC_FROM_TB(env, tb);
941 insns_left = env->icount_decr.u32;
942 if (env->icount_extra && insns_left >= 0) {
943 /* Refill decrementer and continue execution. */
944 env->icount_extra += insns_left;
945 if (env->icount_extra > 0xffff) {
946 insns_left = 0xffff;
947 } else {
948 insns_left = env->icount_extra;
949 }
950 env->icount_extra -= insns_left;
951 env->icount_decr.u16.low = insns_left;
952 } else {
953 if (insns_left > 0) {
954 /* Execute remaining instructions. */
955 cpu_exec_nocache(insns_left, tb);
956 }
957 env->exception_index = EXCP_INTERRUPT;
958 next_tb = 0;
959 cpu_loop_exit();
960 }
961 }
962 }
963 /* reset soft MMU for next block (it can currently
964 only be set by a memory fault) */
965#if defined(USE_KQEMU)
966#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
967 if (kqemu_is_ok(env) &&
968 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
969 cpu_loop_exit();
970 }
971#endif
972 } /* for(;;) */
973 } else {
974 env_to_regs();
975 }
976 } /* for(;;) */
977
978
979#if defined(TARGET_I386)
980 /* restore flags in standard format */
981 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
982#elif defined(TARGET_ARM)
983 /* XXX: Save/restore host fpu exception state?. */
984#elif defined(TARGET_SPARC)
985#elif defined(TARGET_PPC)
986#elif defined(TARGET_M68K)
987 cpu_m68k_flush_flags(env, env->cc_op);
988 env->cc_op = CC_OP_FLAGS;
989 env->sr = (env->sr & 0xffe0)
990 | env->cc_dest | (env->cc_x << 4);
991#elif defined(TARGET_MIPS)
992#elif defined(TARGET_SH4)
993#elif defined(TARGET_ALPHA)
994#elif defined(TARGET_CRIS)
995 /* XXXXX */
996#else
997#error unsupported target CPU
998#endif
999
1000 /* restore global registers */
1001#include "hostregs_helper.h"
1002
1003 /* fail safe : never use cpu_single_env outside cpu_exec() */
1004 cpu_single_env = NULL;
1005 return ret;
1006}
1007#endif /* !VBOX */
1008
1009/* must only be called from the generated code as an exception can be
1010 generated */
1011void tb_invalidate_page_range(target_ulong start, target_ulong end)
1012{
1013 /* XXX: cannot enable it yet because it yields to MMU exception
1014 where NIP != read address on PowerPC */
1015#if 0
1016 target_ulong phys_addr;
1017 phys_addr = get_phys_addr_code(env, start);
1018 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1019#endif
1020}
1021
1022#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1023
1024void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1025{
1026 CPUX86State *saved_env;
1027
1028 saved_env = env;
1029 env = s;
1030 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1031 selector &= 0xffff;
1032 cpu_x86_load_seg_cache(env, seg_reg, selector,
1033 (selector << 4), 0xffff, 0);
1034 } else {
1035 load_seg(seg_reg, selector);
1036 }
1037 env = saved_env;
1038}
1039
1040void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1041{
1042 CPUX86State *saved_env;
1043
1044 saved_env = env;
1045 env = s;
1046
1047 helper_fsave((target_ulong)ptr, data32);
1048
1049 env = saved_env;
1050}
1051
1052void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1053{
1054 CPUX86State *saved_env;
1055
1056 saved_env = env;
1057 env = s;
1058
1059 helper_frstor((target_ulong)ptr, data32);
1060
1061 env = saved_env;
1062}
1063
1064#endif /* TARGET_I386 */
1065
1066#if !defined(CONFIG_SOFTMMU)
1067
1068#if defined(TARGET_I386)
1069
1070/* 'pc' is the host PC at which the exception was raised. 'address' is
1071 the effective address of the memory exception. 'is_write' is 1 if a
1072 write caused the exception and otherwise 0'. 'old_set' is the
1073 signal set which should be restored */
1074static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1075 int is_write, sigset_t *old_set,
1076 void *puc)
1077{
1078 TranslationBlock *tb;
1079 int ret;
1080
1081 if (cpu_single_env)
1082 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1083#if defined(DEBUG_SIGNAL)
1084 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1085 pc, address, is_write, *(unsigned long *)old_set);
1086#endif
1087 /* XXX: locking issue */
1088 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1089 return 1;
1090 }
1091
1092 /* see if it is an MMU fault */
1093 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1094 ((env->hflags & HF_CPL_MASK) == 3), 0);
1095 if (ret < 0)
1096 return 0; /* not an MMU fault */
1097 if (ret == 0)
1098 return 1; /* the MMU fault was handled without causing real CPU fault */
1099 /* now we have a real cpu fault */
1100 tb = tb_find_pc(pc);
1101 if (tb) {
1102 /* the PC is inside the translated code. It means that we have
1103 a virtual CPU fault */
1104 cpu_restore_state(tb, env, pc, puc);
1105 }
1106 if (ret == 1) {
1107#if 0
1108 printf("PF exception: EIP=0x%RGv CR2=0x%RGv error=0x%x\n",
1109 env->eip, env->cr[2], env->error_code);
1110#endif
1111 /* we restore the process signal mask as the sigreturn should
1112 do it (XXX: use sigsetjmp) */
1113 sigprocmask(SIG_SETMASK, old_set, NULL);
1114 raise_exception_err(env->exception_index, env->error_code);
1115 } else {
1116 /* activate soft MMU for this block */
1117 env->hflags |= HF_SOFTMMU_MASK;
1118 cpu_resume_from_signal(env, puc);
1119 }
1120 /* never comes here */
1121 return 1;
1122}
1123
1124#elif defined(TARGET_ARM)
1125static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1126 int is_write, sigset_t *old_set,
1127 void *puc)
1128{
1129 TranslationBlock *tb;
1130 int ret;
1131
1132 if (cpu_single_env)
1133 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1134#if defined(DEBUG_SIGNAL)
1135 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1136 pc, address, is_write, *(unsigned long *)old_set);
1137#endif
1138 /* XXX: locking issue */
1139 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1140 return 1;
1141 }
1142 /* see if it is an MMU fault */
1143 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1144 if (ret < 0)
1145 return 0; /* not an MMU fault */
1146 if (ret == 0)
1147 return 1; /* the MMU fault was handled without causing real CPU fault */
1148 /* now we have a real cpu fault */
1149 tb = tb_find_pc(pc);
1150 if (tb) {
1151 /* the PC is inside the translated code. It means that we have
1152 a virtual CPU fault */
1153 cpu_restore_state(tb, env, pc, puc);
1154 }
1155 /* we restore the process signal mask as the sigreturn should
1156 do it (XXX: use sigsetjmp) */
1157 sigprocmask(SIG_SETMASK, old_set, NULL);
1158 cpu_loop_exit();
1159}
1160#elif defined(TARGET_SPARC)
1161static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1162 int is_write, sigset_t *old_set,
1163 void *puc)
1164{
1165 TranslationBlock *tb;
1166 int ret;
1167
1168 if (cpu_single_env)
1169 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1170#if defined(DEBUG_SIGNAL)
1171 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1172 pc, address, is_write, *(unsigned long *)old_set);
1173#endif
1174 /* XXX: locking issue */
1175 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1176 return 1;
1177 }
1178 /* see if it is an MMU fault */
1179 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1180 if (ret < 0)
1181 return 0; /* not an MMU fault */
1182 if (ret == 0)
1183 return 1; /* the MMU fault was handled without causing real CPU fault */
1184 /* now we have a real cpu fault */
1185 tb = tb_find_pc(pc);
1186 if (tb) {
1187 /* the PC is inside the translated code. It means that we have
1188 a virtual CPU fault */
1189 cpu_restore_state(tb, env, pc, puc);
1190 }
1191 /* we restore the process signal mask as the sigreturn should
1192 do it (XXX: use sigsetjmp) */
1193 sigprocmask(SIG_SETMASK, old_set, NULL);
1194 cpu_loop_exit();
1195}
1196#elif defined (TARGET_PPC)
1197static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1198 int is_write, sigset_t *old_set,
1199 void *puc)
1200{
1201 TranslationBlock *tb;
1202 int ret;
1203
1204 if (cpu_single_env)
1205 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1206#if defined(DEBUG_SIGNAL)
1207 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1208 pc, address, is_write, *(unsigned long *)old_set);
1209#endif
1210 /* XXX: locking issue */
1211 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1212 return 1;
1213 }
1214
1215 /* see if it is an MMU fault */
1216 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1217 if (ret < 0)
1218 return 0; /* not an MMU fault */
1219 if (ret == 0)
1220 return 1; /* the MMU fault was handled without causing real CPU fault */
1221
1222 /* now we have a real cpu fault */
1223 tb = tb_find_pc(pc);
1224 if (tb) {
1225 /* the PC is inside the translated code. It means that we have
1226 a virtual CPU fault */
1227 cpu_restore_state(tb, env, pc, puc);
1228 }
1229 if (ret == 1) {
1230#if 0
1231 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1232 env->nip, env->error_code, tb);
1233#endif
1234 /* we restore the process signal mask as the sigreturn should
1235 do it (XXX: use sigsetjmp) */
1236 sigprocmask(SIG_SETMASK, old_set, NULL);
1237 do_raise_exception_err(env->exception_index, env->error_code);
1238 } else {
1239 /* activate soft MMU for this block */
1240 cpu_resume_from_signal(env, puc);
1241 }
1242 /* never comes here */
1243 return 1;
1244}
1245
1246#elif defined(TARGET_M68K)
1247static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1248 int is_write, sigset_t *old_set,
1249 void *puc)
1250{
1251 TranslationBlock *tb;
1252 int ret;
1253
1254 if (cpu_single_env)
1255 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1256#if defined(DEBUG_SIGNAL)
1257 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1258 pc, address, is_write, *(unsigned long *)old_set);
1259#endif
1260 /* XXX: locking issue */
1261 if (is_write && page_unprotect(address, pc, puc)) {
1262 return 1;
1263 }
1264 /* see if it is an MMU fault */
1265 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1266 if (ret < 0)
1267 return 0; /* not an MMU fault */
1268 if (ret == 0)
1269 return 1; /* the MMU fault was handled without causing real CPU fault */
1270 /* now we have a real cpu fault */
1271 tb = tb_find_pc(pc);
1272 if (tb) {
1273 /* the PC is inside the translated code. It means that we have
1274 a virtual CPU fault */
1275 cpu_restore_state(tb, env, pc, puc);
1276 }
1277 /* we restore the process signal mask as the sigreturn should
1278 do it (XXX: use sigsetjmp) */
1279 sigprocmask(SIG_SETMASK, old_set, NULL);
1280 cpu_loop_exit();
1281 /* never comes here */
1282 return 1;
1283}
1284
1285#elif defined (TARGET_MIPS)
1286static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1287 int is_write, sigset_t *old_set,
1288 void *puc)
1289{
1290 TranslationBlock *tb;
1291 int ret;
1292
1293 if (cpu_single_env)
1294 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1295#if defined(DEBUG_SIGNAL)
1296 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1297 pc, address, is_write, *(unsigned long *)old_set);
1298#endif
1299 /* XXX: locking issue */
1300 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1301 return 1;
1302 }
1303
1304 /* see if it is an MMU fault */
1305 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1306 if (ret < 0)
1307 return 0; /* not an MMU fault */
1308 if (ret == 0)
1309 return 1; /* the MMU fault was handled without causing real CPU fault */
1310
1311 /* now we have a real cpu fault */
1312 tb = tb_find_pc(pc);
1313 if (tb) {
1314 /* the PC is inside the translated code. It means that we have
1315 a virtual CPU fault */
1316 cpu_restore_state(tb, env, pc, puc);
1317 }
1318 if (ret == 1) {
1319#if 0
1320 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1321 env->nip, env->error_code, tb);
1322#endif
1323 /* we restore the process signal mask as the sigreturn should
1324 do it (XXX: use sigsetjmp) */
1325 sigprocmask(SIG_SETMASK, old_set, NULL);
1326 do_raise_exception_err(env->exception_index, env->error_code);
1327 } else {
1328 /* activate soft MMU for this block */
1329 cpu_resume_from_signal(env, puc);
1330 }
1331 /* never comes here */
1332 return 1;
1333}
1334
1335#elif defined (TARGET_SH4)
1336static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1337 int is_write, sigset_t *old_set,
1338 void *puc)
1339{
1340 TranslationBlock *tb;
1341 int ret;
1342
1343 if (cpu_single_env)
1344 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1345#if defined(DEBUG_SIGNAL)
1346 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1347 pc, address, is_write, *(unsigned long *)old_set);
1348#endif
1349 /* XXX: locking issue */
1350 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1351 return 1;
1352 }
1353
1354 /* see if it is an MMU fault */
1355 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1356 if (ret < 0)
1357 return 0; /* not an MMU fault */
1358 if (ret == 0)
1359 return 1; /* the MMU fault was handled without causing real CPU fault */
1360
1361 /* now we have a real cpu fault */
1362 tb = tb_find_pc(pc);
1363 if (tb) {
1364 /* the PC is inside the translated code. It means that we have
1365 a virtual CPU fault */
1366 cpu_restore_state(tb, env, pc, puc);
1367 }
1368#if 0
1369 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1370 env->nip, env->error_code, tb);
1371#endif
1372 /* we restore the process signal mask as the sigreturn should
1373 do it (XXX: use sigsetjmp) */
1374 sigprocmask(SIG_SETMASK, old_set, NULL);
1375 cpu_loop_exit();
1376 /* never comes here */
1377 return 1;
1378}
1379#else
1380#error unsupported target CPU
1381#endif
1382
1383#if defined(__i386__)
1384
1385#if defined(__APPLE__)
1386# include <sys/ucontext.h>
1387
1388# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1389# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1390# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1391#else
1392# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1393# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1394# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1395#endif
1396
1397int cpu_signal_handler(int host_signum, void *pinfo,
1398 void *puc)
1399{
1400 siginfo_t *info = pinfo;
1401 struct ucontext *uc = puc;
1402 unsigned long pc;
1403 int trapno;
1404
1405#ifndef REG_EIP
1406/* for glibc 2.1 */
1407#define REG_EIP EIP
1408#define REG_ERR ERR
1409#define REG_TRAPNO TRAPNO
1410#endif
1411 pc = uc->uc_mcontext.gregs[REG_EIP];
1412 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1413#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1414 if (trapno == 0x00 || trapno == 0x05) {
1415 /* send division by zero or bound exception */
1416 cpu_send_trap(pc, trapno, uc);
1417 return 1;
1418 } else
1419#endif
1420 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1421 trapno == 0xe ?
1422 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1423 &uc->uc_sigmask, puc);
1424}
1425
1426#elif defined(__x86_64__)
1427
1428int cpu_signal_handler(int host_signum, void *pinfo,
1429 void *puc)
1430{
1431 siginfo_t *info = pinfo;
1432 struct ucontext *uc = puc;
1433 unsigned long pc;
1434
1435 pc = uc->uc_mcontext.gregs[REG_RIP];
1436 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1437 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1438 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1439 &uc->uc_sigmask, puc);
1440}
1441
1442#elif defined(__powerpc__)
1443
1444/***********************************************************************
1445 * signal context platform-specific definitions
1446 * From Wine
1447 */
1448#ifdef linux
1449/* All Registers access - only for local access */
1450# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1451/* Gpr Registers access */
1452# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1453# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1454# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1455# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1456# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1457# define LR_sig(context) REG_sig(link, context) /* Link register */
1458# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1459/* Float Registers access */
1460# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1461# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1462/* Exception Registers access */
1463# define DAR_sig(context) REG_sig(dar, context)
1464# define DSISR_sig(context) REG_sig(dsisr, context)
1465# define TRAP_sig(context) REG_sig(trap, context)
1466#endif /* linux */
1467
1468#ifdef __APPLE__
1469# include <sys/ucontext.h>
1470typedef struct ucontext SIGCONTEXT;
1471/* All Registers access - only for local access */
1472# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1473# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1474# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1475# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1476/* Gpr Registers access */
1477# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1478# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1479# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1480# define CTR_sig(context) REG_sig(ctr, context)
1481# define XER_sig(context) REG_sig(xer, context) /* Link register */
1482# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1483# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1484/* Float Registers access */
1485# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1486# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1487/* Exception Registers access */
1488# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1489# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1490# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1491#endif /* __APPLE__ */
1492
1493int cpu_signal_handler(int host_signum, void *pinfo,
1494 void *puc)
1495{
1496 siginfo_t *info = pinfo;
1497 struct ucontext *uc = puc;
1498 unsigned long pc;
1499 int is_write;
1500
1501 pc = IAR_sig(uc);
1502 is_write = 0;
1503#if 0
1504 /* ppc 4xx case */
1505 if (DSISR_sig(uc) & 0x00800000)
1506 is_write = 1;
1507#else
1508 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1509 is_write = 1;
1510#endif
1511 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1512 is_write, &uc->uc_sigmask, puc);
1513}
1514
1515#elif defined(__alpha__)
1516
1517int cpu_signal_handler(int host_signum, void *pinfo,
1518 void *puc)
1519{
1520 siginfo_t *info = pinfo;
1521 struct ucontext *uc = puc;
1522 uint32_t *pc = uc->uc_mcontext.sc_pc;
1523 uint32_t insn = *pc;
1524 int is_write = 0;
1525
1526 /* XXX: need kernel patch to get write flag faster */
1527 switch (insn >> 26) {
1528 case 0x0d: // stw
1529 case 0x0e: // stb
1530 case 0x0f: // stq_u
1531 case 0x24: // stf
1532 case 0x25: // stg
1533 case 0x26: // sts
1534 case 0x27: // stt
1535 case 0x2c: // stl
1536 case 0x2d: // stq
1537 case 0x2e: // stl_c
1538 case 0x2f: // stq_c
1539 is_write = 1;
1540 }
1541
1542 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1543 is_write, &uc->uc_sigmask, puc);
1544}
1545#elif defined(__sparc__)
1546
1547int cpu_signal_handler(int host_signum, void *pinfo,
1548 void *puc)
1549{
1550 siginfo_t *info = pinfo;
1551 uint32_t *regs = (uint32_t *)(info + 1);
1552 void *sigmask = (regs + 20);
1553 unsigned long pc;
1554 int is_write;
1555 uint32_t insn;
1556
1557 /* XXX: is there a standard glibc define ? */
1558 pc = regs[1];
1559 /* XXX: need kernel patch to get write flag faster */
1560 is_write = 0;
1561 insn = *(uint32_t *)pc;
1562 if ((insn >> 30) == 3) {
1563 switch((insn >> 19) & 0x3f) {
1564 case 0x05: // stb
1565 case 0x06: // sth
1566 case 0x04: // st
1567 case 0x07: // std
1568 case 0x24: // stf
1569 case 0x27: // stdf
1570 case 0x25: // stfsr
1571 is_write = 1;
1572 break;
1573 }
1574 }
1575 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1576 is_write, sigmask, NULL);
1577}
1578
1579#elif defined(__arm__)
1580
1581int cpu_signal_handler(int host_signum, void *pinfo,
1582 void *puc)
1583{
1584 siginfo_t *info = pinfo;
1585 struct ucontext *uc = puc;
1586 unsigned long pc;
1587 int is_write;
1588
1589 pc = uc->uc_mcontext.gregs[R15];
1590 /* XXX: compute is_write */
1591 is_write = 0;
1592 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1593 is_write,
1594 &uc->uc_sigmask, puc);
1595}
1596
1597#elif defined(__mc68000)
1598
1599int cpu_signal_handler(int host_signum, void *pinfo,
1600 void *puc)
1601{
1602 siginfo_t *info = pinfo;
1603 struct ucontext *uc = puc;
1604 unsigned long pc;
1605 int is_write;
1606
1607 pc = uc->uc_mcontext.gregs[16];
1608 /* XXX: compute is_write */
1609 is_write = 0;
1610 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1611 is_write,
1612 &uc->uc_sigmask, puc);
1613}
1614
1615#elif defined(__ia64)
1616
1617#ifndef __ISR_VALID
1618 /* This ought to be in <bits/siginfo.h>... */
1619# define __ISR_VALID 1
1620#endif
1621
1622int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1623{
1624 siginfo_t *info = pinfo;
1625 struct ucontext *uc = puc;
1626 unsigned long ip;
1627 int is_write = 0;
1628
1629 ip = uc->uc_mcontext.sc_ip;
1630 switch (host_signum) {
1631 case SIGILL:
1632 case SIGFPE:
1633 case SIGSEGV:
1634 case SIGBUS:
1635 case SIGTRAP:
1636 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1637 /* ISR.W (write-access) is bit 33: */
1638 is_write = (info->si_isr >> 33) & 1;
1639 break;
1640
1641 default:
1642 break;
1643 }
1644 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1645 is_write,
1646 &uc->uc_sigmask, puc);
1647}
1648
1649#elif defined(__s390__)
1650
1651int cpu_signal_handler(int host_signum, void *pinfo,
1652 void *puc)
1653{
1654 siginfo_t *info = pinfo;
1655 struct ucontext *uc = puc;
1656 unsigned long pc;
1657 int is_write;
1658
1659 pc = uc->uc_mcontext.psw.addr;
1660 /* XXX: compute is_write */
1661 is_write = 0;
1662 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1663 is_write,
1664 &uc->uc_sigmask, puc);
1665}
1666
1667#else
1668
1669#error host CPU specific signal handler needed
1670
1671#endif
1672
1673#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette