VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 36143

Last change on this file since 36143 was 36140, checked in by vboxsync, 14 years ago

rem: Re-synced to svn://svn.savannah.nongnu.org/qemu/trunk@5495 (repo UUID c046a42c-6fe2-441c-8c8c-71466251a162).

  • Property svn:eol-style set to native
File size: 64.6 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include "config.h"
31#define CPU_NO_GLOBAL_REGS
32#include "exec.h"
33#include "disas.h"
34#include "tcg.h"
35
36#if !defined(CONFIG_SOFTMMU)
37#undef EAX
38#undef ECX
39#undef EDX
40#undef EBX
41#undef ESP
42#undef EBP
43#undef ESI
44#undef EDI
45#undef EIP
46#include <signal.h>
47#include <sys/ucontext.h>
48#endif
49
50#if defined(__sparc__) && !defined(HOST_SOLARIS)
51// Work around ugly bugs in glibc that mangle global register contents
52#undef env
53#define env cpu_single_env
54#endif
55
56int tb_invalidated_flag;
57
58//#define DEBUG_EXEC
59//#define DEBUG_SIGNAL
60
61void cpu_loop_exit(void)
62{
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
65 regs_to_env();
66 longjmp(env->jmp_env, 1);
67}
68
69#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
70#define reg_T2
71#endif
72
73/* exit the current TB from a signal handler. The host registers are
74 restored in a state compatible with the CPU emulator
75 */
76void cpu_resume_from_signal(CPUState *env1, void *puc)
77{
78#if !defined(CONFIG_SOFTMMU)
79 struct ucontext *uc = puc;
80#endif
81
82 env = env1;
83
84 /* XXX: restore cpu registers saved in host registers */
85
86#if !defined(CONFIG_SOFTMMU)
87 if (puc) {
88 /* XXX: use siglongjmp ? */
89 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
90 }
91#endif
92 longjmp(env->jmp_env, 1);
93}
94
95/* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
97static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
98{
99 unsigned long next_tb;
100 TranslationBlock *tb;
101
102 /* Should never happen.
103 We only end up here when an existing TB is too long. */
104 if (max_cycles > CF_COUNT_MASK)
105 max_cycles = CF_COUNT_MASK;
106
107 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
108 max_cycles);
109 env->current_tb = tb;
110 /* execute the generated code */
111#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
112 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
113#else
114 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
115#endif
116
117 if ((next_tb & 3) == 2) {
118 /* Restore PC. This may happen if async event occurs before
119 the TB starts executing. */
120 CPU_PC_FROM_TB(env, tb);
121 }
122 tb_phys_invalidate(tb, -1);
123 tb_free(tb);
124}
125
126static TranslationBlock *tb_find_slow(target_ulong pc,
127 target_ulong cs_base,
128 uint64_t flags)
129{
130 TranslationBlock *tb, **ptb1;
131 unsigned int h;
132 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
133
134 tb_invalidated_flag = 0;
135
136 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
137
138 /* find translated block using physical mappings */
139 phys_pc = get_phys_addr_code(env, pc);
140 phys_page1 = phys_pc & TARGET_PAGE_MASK;
141 phys_page2 = -1;
142 h = tb_phys_hash_func(phys_pc);
143 ptb1 = &tb_phys_hash[h];
144 for(;;) {
145 tb = *ptb1;
146 if (!tb)
147 goto not_found;
148 if (tb->pc == pc &&
149 tb->page_addr[0] == phys_page1 &&
150 tb->cs_base == cs_base &&
151 tb->flags == flags) {
152 /* check next page if needed */
153 if (tb->page_addr[1] != -1) {
154 virt_page2 = (pc & TARGET_PAGE_MASK) +
155 TARGET_PAGE_SIZE;
156 phys_page2 = get_phys_addr_code(env, virt_page2);
157 if (tb->page_addr[1] == phys_page2)
158 goto found;
159 } else {
160 goto found;
161 }
162 }
163 ptb1 = &tb->phys_hash_next;
164 }
165 not_found:
166 /* if no translated code available, then translate it now */
167 tb = tb_gen_code(env, pc, cs_base, flags, 0);
168
169 found:
170 /* we add the TB in the virtual pc hash table */
171 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
172 return tb;
173}
174
175static inline TranslationBlock *tb_find_fast(void)
176{
177 TranslationBlock *tb;
178 target_ulong cs_base, pc;
179 uint64_t flags;
180
181 /* we record a subset of the CPU state. It will
182 always be the same before a given translated block
183 is executed. */
184#if defined(TARGET_I386)
185 flags = env->hflags;
186 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
187 cs_base = env->segs[R_CS].base;
188 pc = cs_base + env->eip;
189#elif defined(TARGET_ARM)
190 flags = env->thumb | (env->vfp.vec_len << 1)
191 | (env->vfp.vec_stride << 4);
192 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
193 flags |= (1 << 6);
194 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
195 flags |= (1 << 7);
196 flags |= (env->condexec_bits << 8);
197 cs_base = 0;
198 pc = env->regs[15];
199#elif defined(TARGET_SPARC)
200#ifdef TARGET_SPARC64
201 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
202 flags = ((env->pstate & PS_AM) << 2)
203 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
204 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
205#else
206 // FPU enable . Supervisor
207 flags = (env->psref << 4) | env->psrs;
208#endif
209 cs_base = env->npc;
210 pc = env->pc;
211#elif defined(TARGET_PPC)
212 flags = env->hflags;
213 cs_base = 0;
214 pc = env->nip;
215#elif defined(TARGET_MIPS)
216 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
217 cs_base = 0;
218 pc = env->active_tc.PC;
219#elif defined(TARGET_M68K)
220 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
221 | (env->sr & SR_S) /* Bit 13 */
222 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
223 cs_base = 0;
224 pc = env->pc;
225#elif defined(TARGET_SH4)
226 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
227 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
228 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
229 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
230 cs_base = 0;
231 pc = env->pc;
232#elif defined(TARGET_ALPHA)
233 flags = env->ps;
234 cs_base = 0;
235 pc = env->pc;
236#elif defined(TARGET_CRIS)
237 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
238 flags |= env->dslot;
239 cs_base = 0;
240 pc = env->pc;
241#else
242#error unsupported CPU
243#endif
244 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
245 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
246 tb->flags != flags)) {
247 tb = tb_find_slow(pc, cs_base, flags);
248 }
249 return tb;
250}
251
252/* main execution loop */
253
254#ifdef VBOX
255
256int cpu_exec(CPUState *env1)
257{
258#define DECLARE_HOST_REGS 1
259#include "hostregs_helper.h"
260 int ret = 0, interrupt_request;
261 TranslationBlock *tb;
262 uint8_t *tc_ptr;
263 unsigned long next_tb;
264
265 cpu_single_env = env1;
266
267 /* first we save global registers */
268#define SAVE_HOST_REGS 1
269#include "hostregs_helper.h"
270 env = env1;
271
272 env_to_regs();
273#if defined(TARGET_I386)
274 /* put eflags in CPU temporary format */
275 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
276 DF = 1 - (2 * ((env->eflags >> 10) & 1));
277 CC_OP = CC_OP_EFLAGS;
278 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
279#elif defined(TARGET_SPARC)
280#elif defined(TARGET_M68K)
281 env->cc_op = CC_OP_FLAGS;
282 env->cc_dest = env->sr & 0xf;
283 env->cc_x = (env->sr >> 4) & 1;
284#elif defined(TARGET_ALPHA)
285#elif defined(TARGET_ARM)
286#elif defined(TARGET_PPC)
287#elif defined(TARGET_MIPS)
288#elif defined(TARGET_SH4)
289#elif defined(TARGET_CRIS)
290 /* XXXXX */
291#else
292#error unsupported target CPU
293#endif
294#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
295 env->exception_index = -1;
296#endif
297
298 /* prepare setjmp context for exception handling */
299 for(;;) {
300 if (setjmp(env->jmp_env) == 0)
301 {
302 env->current_tb = NULL;
303
304 /*
305 * Check for fatal errors first
306 */
307 if (env->interrupt_request & CPU_INTERRUPT_RC) {
308 env->exception_index = EXCP_RC;
309 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
310 ret = env->exception_index;
311 cpu_loop_exit();
312 }
313
314 /* if an exception is pending, we execute it here */
315 if (env->exception_index >= 0) {
316 Assert(!env->user_mode_only);
317 if (env->exception_index >= EXCP_INTERRUPT) {
318 /* exit request from the cpu execution loop */
319 ret = env->exception_index;
320 break;
321 } else {
322 /* simulate a real cpu exception. On i386, it can
323 trigger new exceptions, but we do not handle
324 double or triple faults yet. */
325 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
326 Log(("do_interrupt %d %d %RGv\n", env->exception_index, env->exception_is_int, (RTGCPTR)env->exception_next_eip));
327 do_interrupt(env->exception_index,
328 env->exception_is_int,
329 env->error_code,
330 env->exception_next_eip, 0);
331 /* successfully delivered */
332 env->old_exception = -1;
333 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
334 }
335 env->exception_index = -1;
336 }
337
338 next_tb = 0; /* force lookup of first TB */
339 for(;;)
340 {
341 interrupt_request = env->interrupt_request;
342#ifndef VBOX
343 if (__builtin_expect(interrupt_request, 0))
344#else
345 if (RT_UNLIKELY(interrupt_request != 0))
346#endif
347 {
348 /** @todo: reconcile with what QEMU really does */
349
350 /* Single instruction exec request, we execute it and return (one way or the other).
351 The caller will always reschedule after doing this operation! */
352 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
353 {
354 /* not in flight are we? (if we are, we trapped) */
355 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
356 {
357 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
358 env->exception_index = EXCP_SINGLE_INSTR;
359 if (emulate_single_instr(env) == -1)
360 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip));
361
362 /* When we receive an external interrupt during execution of this single
363 instruction, then we should stay here. We will leave when we're ready
364 for raw-mode or when interrupted by pending EMT requests. */
365 interrupt_request = env->interrupt_request; /* reload this! */
366 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
367 || !(env->eflags & IF_MASK)
368 || (env->hflags & HF_INHIBIT_IRQ_MASK)
369 || (env->state & CPU_RAW_HWACC)
370 )
371 {
372 env->exception_index = ret = EXCP_SINGLE_INSTR;
373 cpu_loop_exit();
374 }
375 }
376 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
377 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
378 }
379
380 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
381 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
382 !(env->hflags & HF_SMM_MASK)) {
383 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
384 do_smm_enter();
385 next_tb = 0;
386 }
387 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
388 (env->eflags & IF_MASK) &&
389 !(env->hflags & HF_INHIBIT_IRQ_MASK))
390 {
391 /* if hardware interrupt pending, we execute it */
392 int intno;
393 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
394 intno = cpu_get_pic_interrupt(env);
395 if (intno >= 0)
396 {
397 Log(("do_interrupt %d\n", intno));
398 do_interrupt(intno, 0, 0, 0, 1);
399 }
400 /* ensure that no TB jump will be modified as
401 the program flow was changed */
402 next_tb = 0;
403 }
404 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
405 {
406 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
407 /* ensure that no TB jump will be modified as
408 the program flow was changed */
409 next_tb = 0;
410 }
411 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
412 if (interrupt_request & CPU_INTERRUPT_EXIT)
413 {
414 env->exception_index = EXCP_INTERRUPT;
415 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
416 ret = env->exception_index;
417 cpu_loop_exit();
418 }
419 if (interrupt_request & CPU_INTERRUPT_RC)
420 {
421 env->exception_index = EXCP_RC;
422 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
423 ret = env->exception_index;
424 cpu_loop_exit();
425 }
426 }
427
428 /*
429 * Check if we the CPU state allows us to execute the code in raw-mode.
430 */
431 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
432 if (remR3CanExecuteRaw(env,
433 env->eip + env->segs[R_CS].base,
434 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
435 &env->exception_index))
436 {
437 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
438 ret = env->exception_index;
439 cpu_loop_exit();
440 }
441 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
442
443 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
444 spin_lock(&tb_lock);
445 tb = tb_find_fast();
446 /* Note: we do it here to avoid a gcc bug on Mac OS X when
447 doing it in tb_find_slow */
448 if (tb_invalidated_flag) {
449 /* as some TB could have been invalidated because
450 of memory exceptions while generating the code, we
451 must recompute the hash index here */
452 next_tb = 0;
453 tb_invalidated_flag = 0;
454 }
455
456 /* see if we can patch the calling TB. When the TB
457 spans two pages, we cannot safely do a direct
458 jump. */
459 if (next_tb != 0
460 && !(tb->cflags & CF_RAW_MODE)
461 && tb->page_addr[1] == -1)
462 {
463 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
464 }
465 spin_unlock(&tb_lock);
466 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
467
468 env->current_tb = tb;
469 while (env->current_tb) {
470 tc_ptr = tb->tc_ptr;
471 /* execute the generated code */
472 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
473#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
474 tcg_qemu_tb_exec(tc_ptr, next_tb);
475#else
476 next_tb = tcg_qemu_tb_exec(tc_ptr);
477#endif
478 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
479 env->current_tb = NULL;
480 if ((next_tb & 3) == 2) {
481 /* Instruction counter expired. */
482 int insns_left;
483 tb = (TranslationBlock *)(long)(next_tb & ~3);
484 /* Restore PC. */
485 CPU_PC_FROM_TB(env, tb);
486 insns_left = env->icount_decr.u32;
487 if (env->icount_extra && insns_left >= 0) {
488 /* Refill decrementer and continue execution. */
489 env->icount_extra += insns_left;
490 if (env->icount_extra > 0xffff) {
491 insns_left = 0xffff;
492 } else {
493 insns_left = env->icount_extra;
494 }
495 env->icount_extra -= insns_left;
496 env->icount_decr.u16.low = insns_left;
497 } else {
498 if (insns_left > 0) {
499 /* Execute remaining instructions. */
500 cpu_exec_nocache(insns_left, tb);
501 }
502 env->exception_index = EXCP_INTERRUPT;
503 next_tb = 0;
504 cpu_loop_exit();
505 }
506 }
507 }
508
509 /* reset soft MMU for next block (it can currently
510 only be set by a memory fault) */
511#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
512 if (env->hflags & HF_SOFTMMU_MASK) {
513 env->hflags &= ~HF_SOFTMMU_MASK;
514 /* do not allow linking to another block */
515 next_tb = 0;
516 }
517#endif
518 } /* for(;;) */
519 } else {
520 env_to_regs();
521 }
522#ifdef VBOX_HIGH_RES_TIMERS_HACK
523 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
524 unnecessary (like crashing during emulate single instruction).
525 Note! Don't use env1->pVM here, the code wouldn't run with
526 gcc-4.4/amd64 anymore, see #3883. */
527 env->current_tb = NULL;
528 if ( !(env->interrupt_request & ( CPU_INTERRUPT_EXIT | CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
529 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
530 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
531 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
532 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
533 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
534 TMR3TimerQueuesDo(env->pVM);
535 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
536 }
537#endif
538 } /* for(;;) */
539
540#if defined(TARGET_I386)
541 /* restore flags in standard format */
542 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
543#else
544#error unsupported target CPU
545#endif
546#include "hostregs_helper.h"
547 return ret;
548}
549
550#else /* !VBOX */
551int cpu_exec(CPUState *env1)
552{
553#define DECLARE_HOST_REGS 1
554#include "hostregs_helper.h"
555 int ret, interrupt_request;
556 TranslationBlock *tb;
557 uint8_t *tc_ptr;
558 unsigned long next_tb;
559
560 if (cpu_halted(env1) == EXCP_HALTED)
561 return EXCP_HALTED;
562
563 cpu_single_env = env1;
564
565 /* first we save global registers */
566#define SAVE_HOST_REGS 1
567#include "hostregs_helper.h"
568 env = env1;
569
570 env_to_regs();
571#if defined(TARGET_I386)
572 /* put eflags in CPU temporary format */
573 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
574 DF = 1 - (2 * ((env->eflags >> 10) & 1));
575 CC_OP = CC_OP_EFLAGS;
576 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
577#elif defined(TARGET_SPARC)
578#elif defined(TARGET_M68K)
579 env->cc_op = CC_OP_FLAGS;
580 env->cc_dest = env->sr & 0xf;
581 env->cc_x = (env->sr >> 4) & 1;
582#elif defined(TARGET_ALPHA)
583#elif defined(TARGET_ARM)
584#elif defined(TARGET_PPC)
585#elif defined(TARGET_MIPS)
586#elif defined(TARGET_SH4)
587#elif defined(TARGET_CRIS)
588 /* XXXXX */
589#else
590#error unsupported target CPU
591#endif
592 env->exception_index = -1;
593
594 /* prepare setjmp context for exception handling */
595 for(;;) {
596 if (setjmp(env->jmp_env) == 0) {
597 env->current_tb = NULL;
598 /* if an exception is pending, we execute it here */
599 if (env->exception_index >= 0) {
600 if (env->exception_index >= EXCP_INTERRUPT) {
601 /* exit request from the cpu execution loop */
602 ret = env->exception_index;
603 break;
604 } else if (env->user_mode_only) {
605 /* if user mode only, we simulate a fake exception
606 which will be handled outside the cpu execution
607 loop */
608#if defined(TARGET_I386)
609 do_interrupt_user(env->exception_index,
610 env->exception_is_int,
611 env->error_code,
612 env->exception_next_eip);
613 /* successfully delivered */
614 env->old_exception = -1;
615#endif
616 ret = env->exception_index;
617 break;
618 } else {
619#if defined(TARGET_I386)
620 /* simulate a real cpu exception. On i386, it can
621 trigger new exceptions, but we do not handle
622 double or triple faults yet. */
623 do_interrupt(env->exception_index,
624 env->exception_is_int,
625 env->error_code,
626 env->exception_next_eip, 0);
627 /* successfully delivered */
628 env->old_exception = -1;
629#elif defined(TARGET_PPC)
630 do_interrupt(env);
631#elif defined(TARGET_MIPS)
632 do_interrupt(env);
633#elif defined(TARGET_SPARC)
634 do_interrupt(env);
635#elif defined(TARGET_ARM)
636 do_interrupt(env);
637#elif defined(TARGET_SH4)
638 do_interrupt(env);
639#elif defined(TARGET_ALPHA)
640 do_interrupt(env);
641#elif defined(TARGET_CRIS)
642 do_interrupt(env);
643#elif defined(TARGET_M68K)
644 do_interrupt(0);
645#endif
646 }
647 env->exception_index = -1;
648 }
649#ifdef USE_KQEMU
650 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
651 int ret;
652 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
653 ret = kqemu_cpu_exec(env);
654 /* put eflags in CPU temporary format */
655 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
656 DF = 1 - (2 * ((env->eflags >> 10) & 1));
657 CC_OP = CC_OP_EFLAGS;
658 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
659 if (ret == 1) {
660 /* exception */
661 longjmp(env->jmp_env, 1);
662 } else if (ret == 2) {
663 /* softmmu execution needed */
664 } else {
665 if (env->interrupt_request != 0) {
666 /* hardware interrupt will be executed just after */
667 } else {
668 /* otherwise, we restart */
669 longjmp(env->jmp_env, 1);
670 }
671 }
672 }
673#endif
674
675 next_tb = 0; /* force lookup of first TB */
676 for(;;) {
677 interrupt_request = env->interrupt_request;
678 if (unlikely(interrupt_request) &&
679 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
680 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
681 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
682 env->exception_index = EXCP_DEBUG;
683 cpu_loop_exit();
684 }
685#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
686 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
687 if (interrupt_request & CPU_INTERRUPT_HALT) {
688 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
689 env->halted = 1;
690 env->exception_index = EXCP_HLT;
691 cpu_loop_exit();
692 }
693#endif
694#if defined(TARGET_I386)
695 if (env->hflags2 & HF2_GIF_MASK) {
696 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
697 !(env->hflags & HF_SMM_MASK)) {
698 svm_check_intercept(SVM_EXIT_SMI);
699 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
700 do_smm_enter();
701 next_tb = 0;
702 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
703 !(env->hflags2 & HF2_NMI_MASK)) {
704 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
705 env->hflags2 |= HF2_NMI_MASK;
706 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
707 next_tb = 0;
708 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
709 (((env->hflags2 & HF2_VINTR_MASK) &&
710 (env->hflags2 & HF2_HIF_MASK)) ||
711 (!(env->hflags2 & HF2_VINTR_MASK) &&
712 (env->eflags & IF_MASK &&
713 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
714 int intno;
715 svm_check_intercept(SVM_EXIT_INTR);
716 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
717 intno = cpu_get_pic_interrupt(env);
718 if (loglevel & CPU_LOG_TB_IN_ASM) {
719 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
720 }
721 do_interrupt(intno, 0, 0, 0, 1);
722 /* ensure that no TB jump will be modified as
723 the program flow was changed */
724 next_tb = 0;
725#if !defined(CONFIG_USER_ONLY)
726 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
727 (env->eflags & IF_MASK) &&
728 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
729 int intno;
730 /* FIXME: this should respect TPR */
731 svm_check_intercept(SVM_EXIT_VINTR);
732 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
733 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
734 if (loglevel & CPU_LOG_TB_IN_ASM)
735 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
736 do_interrupt(intno, 0, 0, 0, 1);
737 next_tb = 0;
738#endif
739 }
740 }
741#elif defined(TARGET_PPC)
742#if 0
743 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
744 cpu_ppc_reset(env);
745 }
746#endif
747 if (interrupt_request & CPU_INTERRUPT_HARD) {
748 ppc_hw_interrupt(env);
749 if (env->pending_interrupts == 0)
750 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
751 next_tb = 0;
752 }
753#elif defined(TARGET_MIPS)
754 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
755 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
756 (env->CP0_Status & (1 << CP0St_IE)) &&
757 !(env->CP0_Status & (1 << CP0St_EXL)) &&
758 !(env->CP0_Status & (1 << CP0St_ERL)) &&
759 !(env->hflags & MIPS_HFLAG_DM)) {
760 /* Raise it */
761 env->exception_index = EXCP_EXT_INTERRUPT;
762 env->error_code = 0;
763 do_interrupt(env);
764 next_tb = 0;
765 }
766#elif defined(TARGET_SPARC)
767 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
768 (env->psret != 0)) {
769 int pil = env->interrupt_index & 15;
770 int type = env->interrupt_index & 0xf0;
771
772 if (((type == TT_EXTINT) &&
773 (pil == 15 || pil > env->psrpil)) ||
774 type != TT_EXTINT) {
775 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
776 env->exception_index = env->interrupt_index;
777 do_interrupt(env);
778 env->interrupt_index = 0;
779#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
780 cpu_check_irqs(env);
781#endif
782 next_tb = 0;
783 }
784 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
785 //do_interrupt(0, 0, 0, 0, 0);
786 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
787 }
788#elif defined(TARGET_ARM)
789 if (interrupt_request & CPU_INTERRUPT_FIQ
790 && !(env->uncached_cpsr & CPSR_F)) {
791 env->exception_index = EXCP_FIQ;
792 do_interrupt(env);
793 next_tb = 0;
794 }
795 /* ARMv7-M interrupt return works by loading a magic value
796 into the PC. On real hardware the load causes the
797 return to occur. The qemu implementation performs the
798 jump normally, then does the exception return when the
799 CPU tries to execute code at the magic address.
800 This will cause the magic PC value to be pushed to
801 the stack if an interrupt occured at the wrong time.
802 We avoid this by disabling interrupts when
803 pc contains a magic address. */
804 if (interrupt_request & CPU_INTERRUPT_HARD
805 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
806 || !(env->uncached_cpsr & CPSR_I))) {
807 env->exception_index = EXCP_IRQ;
808 do_interrupt(env);
809 next_tb = 0;
810 }
811#elif defined(TARGET_SH4)
812 if (interrupt_request & CPU_INTERRUPT_HARD) {
813 do_interrupt(env);
814 next_tb = 0;
815 }
816#elif defined(TARGET_ALPHA)
817 if (interrupt_request & CPU_INTERRUPT_HARD) {
818 do_interrupt(env);
819 next_tb = 0;
820 }
821#elif defined(TARGET_CRIS)
822 if (interrupt_request & CPU_INTERRUPT_HARD
823 && (env->pregs[PR_CCS] & I_FLAG)) {
824 env->exception_index = EXCP_IRQ;
825 do_interrupt(env);
826 next_tb = 0;
827 }
828 if (interrupt_request & CPU_INTERRUPT_NMI
829 && (env->pregs[PR_CCS] & M_FLAG)) {
830 env->exception_index = EXCP_NMI;
831 do_interrupt(env);
832 next_tb = 0;
833 }
834#elif defined(TARGET_M68K)
835 if (interrupt_request & CPU_INTERRUPT_HARD
836 && ((env->sr & SR_I) >> SR_I_SHIFT)
837 < env->pending_level) {
838 /* Real hardware gets the interrupt vector via an
839 IACK cycle at this point. Current emulated
840 hardware doesn't rely on this, so we
841 provide/save the vector when the interrupt is
842 first signalled. */
843 env->exception_index = env->pending_vector;
844 do_interrupt(1);
845 next_tb = 0;
846 }
847#endif
848 /* Don't use the cached interupt_request value,
849 do_interrupt may have updated the EXITTB flag. */
850 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
851 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
852 /* ensure that no TB jump will be modified as
853 the program flow was changed */
854 next_tb = 0;
855 }
856 if (interrupt_request & CPU_INTERRUPT_EXIT) {
857 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
858 env->exception_index = EXCP_INTERRUPT;
859 cpu_loop_exit();
860 }
861 }
862#ifdef DEBUG_EXEC
863 if ((loglevel & CPU_LOG_TB_CPU)) {
864 /* restore flags in standard format */
865 regs_to_env();
866#if defined(TARGET_I386)
867 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
868 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
869 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
870#elif defined(TARGET_ARM)
871 cpu_dump_state(env, logfile, fprintf, 0);
872#elif defined(TARGET_SPARC)
873 cpu_dump_state(env, logfile, fprintf, 0);
874#elif defined(TARGET_PPC)
875 cpu_dump_state(env, logfile, fprintf, 0);
876#elif defined(TARGET_M68K)
877 cpu_m68k_flush_flags(env, env->cc_op);
878 env->cc_op = CC_OP_FLAGS;
879 env->sr = (env->sr & 0xffe0)
880 | env->cc_dest | (env->cc_x << 4);
881 cpu_dump_state(env, logfile, fprintf, 0);
882#elif defined(TARGET_MIPS)
883 cpu_dump_state(env, logfile, fprintf, 0);
884#elif defined(TARGET_SH4)
885 cpu_dump_state(env, logfile, fprintf, 0);
886#elif defined(TARGET_ALPHA)
887 cpu_dump_state(env, logfile, fprintf, 0);
888#elif defined(TARGET_CRIS)
889 cpu_dump_state(env, logfile, fprintf, 0);
890#else
891#error unsupported target CPU
892#endif
893 }
894#endif
895 spin_lock(&tb_lock);
896 tb = tb_find_fast();
897 /* Note: we do it here to avoid a gcc bug on Mac OS X when
898 doing it in tb_find_slow */
899 if (tb_invalidated_flag) {
900 /* as some TB could have been invalidated because
901 of memory exceptions while generating the code, we
902 must recompute the hash index here */
903 next_tb = 0;
904 tb_invalidated_flag = 0;
905 }
906#ifdef DEBUG_EXEC
907 if ((loglevel & CPU_LOG_EXEC)) {
908 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
909 (long)tb->tc_ptr, tb->pc,
910 lookup_symbol(tb->pc));
911 }
912#endif
913 /* see if we can patch the calling TB. When the TB
914 spans two pages, we cannot safely do a direct
915 jump. */
916 {
917 if (next_tb != 0 &&
918#ifdef USE_KQEMU
919 (env->kqemu_enabled != 2) &&
920#endif
921 tb->page_addr[1] == -1) {
922 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
923 }
924 }
925 spin_unlock(&tb_lock);
926 env->current_tb = tb;
927 while (env->current_tb) {
928 tc_ptr = tb->tc_ptr;
929 /* execute the generated code */
930#if defined(__sparc__) && !defined(HOST_SOLARIS)
931#undef env
932 env = cpu_single_env;
933#define env cpu_single_env
934#endif
935 next_tb = tcg_qemu_tb_exec(tc_ptr);
936 env->current_tb = NULL;
937 if ((next_tb & 3) == 2) {
938 /* Instruction counter expired. */
939 int insns_left;
940 tb = (TranslationBlock *)(long)(next_tb & ~3);
941 /* Restore PC. */
942 CPU_PC_FROM_TB(env, tb);
943 insns_left = env->icount_decr.u32;
944 if (env->icount_extra && insns_left >= 0) {
945 /* Refill decrementer and continue execution. */
946 env->icount_extra += insns_left;
947 if (env->icount_extra > 0xffff) {
948 insns_left = 0xffff;
949 } else {
950 insns_left = env->icount_extra;
951 }
952 env->icount_extra -= insns_left;
953 env->icount_decr.u16.low = insns_left;
954 } else {
955 if (insns_left > 0) {
956 /* Execute remaining instructions. */
957 cpu_exec_nocache(insns_left, tb);
958 }
959 env->exception_index = EXCP_INTERRUPT;
960 next_tb = 0;
961 cpu_loop_exit();
962 }
963 }
964 }
965 /* reset soft MMU for next block (it can currently
966 only be set by a memory fault) */
967#if defined(USE_KQEMU)
968#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
969 if (kqemu_is_ok(env) &&
970 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
971 cpu_loop_exit();
972 }
973#endif
974 } /* for(;;) */
975 } else {
976 env_to_regs();
977 }
978 } /* for(;;) */
979
980
981#if defined(TARGET_I386)
982 /* restore flags in standard format */
983 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
984#elif defined(TARGET_ARM)
985 /* XXX: Save/restore host fpu exception state?. */
986#elif defined(TARGET_SPARC)
987#elif defined(TARGET_PPC)
988#elif defined(TARGET_M68K)
989 cpu_m68k_flush_flags(env, env->cc_op);
990 env->cc_op = CC_OP_FLAGS;
991 env->sr = (env->sr & 0xffe0)
992 | env->cc_dest | (env->cc_x << 4);
993#elif defined(TARGET_MIPS)
994#elif defined(TARGET_SH4)
995#elif defined(TARGET_ALPHA)
996#elif defined(TARGET_CRIS)
997 /* XXXXX */
998#else
999#error unsupported target CPU
1000#endif
1001
1002 /* restore global registers */
1003#include "hostregs_helper.h"
1004
1005 /* fail safe : never use cpu_single_env outside cpu_exec() */
1006 cpu_single_env = NULL;
1007 return ret;
1008}
1009
1010#endif /* !VBOX */
1011
1012/* must only be called from the generated code as an exception can be
1013 generated */
1014void tb_invalidate_page_range(target_ulong start, target_ulong end)
1015{
1016 /* XXX: cannot enable it yet because it yields to MMU exception
1017 where NIP != read address on PowerPC */
1018#if 0
1019 target_ulong phys_addr;
1020 phys_addr = get_phys_addr_code(env, start);
1021 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1022#endif
1023}
1024
1025#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1026
1027void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1028{
1029 CPUX86State *saved_env;
1030
1031 saved_env = env;
1032 env = s;
1033 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1034 selector &= 0xffff;
1035 cpu_x86_load_seg_cache(env, seg_reg, selector,
1036 (selector << 4), 0xffff, 0);
1037 } else {
1038 helper_load_seg(seg_reg, selector);
1039 }
1040 env = saved_env;
1041}
1042
1043void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
1044{
1045 CPUX86State *saved_env;
1046
1047 saved_env = env;
1048 env = s;
1049
1050 helper_fsave(ptr, data32);
1051
1052 env = saved_env;
1053}
1054
1055void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
1056{
1057 CPUX86State *saved_env;
1058
1059 saved_env = env;
1060 env = s;
1061
1062 helper_frstor(ptr, data32);
1063
1064 env = saved_env;
1065}
1066
1067#endif /* TARGET_I386 */
1068
1069#if !defined(CONFIG_SOFTMMU)
1070
1071#if defined(TARGET_I386)
1072
1073/* 'pc' is the host PC at which the exception was raised. 'address' is
1074 the effective address of the memory exception. 'is_write' is 1 if a
1075 write caused the exception and otherwise 0'. 'old_set' is the
1076 signal set which should be restored */
1077static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1078 int is_write, sigset_t *old_set,
1079 void *puc)
1080{
1081 TranslationBlock *tb;
1082 int ret;
1083
1084 if (cpu_single_env)
1085 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1086#if defined(DEBUG_SIGNAL)
1087 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1088 pc, address, is_write, *(unsigned long *)old_set);
1089#endif
1090 /* XXX: locking issue */
1091 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1092 return 1;
1093 }
1094
1095 /* see if it is an MMU fault */
1096 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1097 if (ret < 0)
1098 return 0; /* not an MMU fault */
1099 if (ret == 0)
1100 return 1; /* the MMU fault was handled without causing real CPU fault */
1101 /* now we have a real cpu fault */
1102 tb = tb_find_pc(pc);
1103 if (tb) {
1104 /* the PC is inside the translated code. It means that we have
1105 a virtual CPU fault */
1106 cpu_restore_state(tb, env, pc, puc);
1107 }
1108 if (ret == 1) {
1109#if 0
1110 printf("PF exception: EIP=0x%RGv CR2=0x%RGv error=0x%x\n",
1111 env->eip, env->cr[2], env->error_code);
1112#endif
1113 /* we restore the process signal mask as the sigreturn should
1114 do it (XXX: use sigsetjmp) */
1115 sigprocmask(SIG_SETMASK, old_set, NULL);
1116 raise_exception_err(env->exception_index, env->error_code);
1117 } else {
1118 /* activate soft MMU for this block */
1119 env->hflags |= HF_SOFTMMU_MASK;
1120 cpu_resume_from_signal(env, puc);
1121 }
1122 /* never comes here */
1123 return 1;
1124}
1125
1126#elif defined(TARGET_ARM)
1127static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1128 int is_write, sigset_t *old_set,
1129 void *puc)
1130{
1131 TranslationBlock *tb;
1132 int ret;
1133
1134 if (cpu_single_env)
1135 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1136#if defined(DEBUG_SIGNAL)
1137 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1138 pc, address, is_write, *(unsigned long *)old_set);
1139#endif
1140 /* XXX: locking issue */
1141 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1142 return 1;
1143 }
1144 /* see if it is an MMU fault */
1145 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1146 if (ret < 0)
1147 return 0; /* not an MMU fault */
1148 if (ret == 0)
1149 return 1; /* the MMU fault was handled without causing real CPU fault */
1150 /* now we have a real cpu fault */
1151 tb = tb_find_pc(pc);
1152 if (tb) {
1153 /* the PC is inside the translated code. It means that we have
1154 a virtual CPU fault */
1155 cpu_restore_state(tb, env, pc, puc);
1156 }
1157 /* we restore the process signal mask as the sigreturn should
1158 do it (XXX: use sigsetjmp) */
1159 sigprocmask(SIG_SETMASK, old_set, NULL);
1160 cpu_loop_exit();
1161 /* never comes here */
1162 return 1;
1163}
1164#elif defined(TARGET_SPARC)
1165static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1166 int is_write, sigset_t *old_set,
1167 void *puc)
1168{
1169 TranslationBlock *tb;
1170 int ret;
1171
1172 if (cpu_single_env)
1173 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1174#if defined(DEBUG_SIGNAL)
1175 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1176 pc, address, is_write, *(unsigned long *)old_set);
1177#endif
1178 /* XXX: locking issue */
1179 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1180 return 1;
1181 }
1182 /* see if it is an MMU fault */
1183 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1184 if (ret < 0)
1185 return 0; /* not an MMU fault */
1186 if (ret == 0)
1187 return 1; /* the MMU fault was handled without causing real CPU fault */
1188 /* now we have a real cpu fault */
1189 tb = tb_find_pc(pc);
1190 if (tb) {
1191 /* the PC is inside the translated code. It means that we have
1192 a virtual CPU fault */
1193 cpu_restore_state(tb, env, pc, puc);
1194 }
1195 /* we restore the process signal mask as the sigreturn should
1196 do it (XXX: use sigsetjmp) */
1197 sigprocmask(SIG_SETMASK, old_set, NULL);
1198 cpu_loop_exit();
1199 /* never comes here */
1200 return 1;
1201}
1202#elif defined (TARGET_PPC)
1203static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1204 int is_write, sigset_t *old_set,
1205 void *puc)
1206{
1207 TranslationBlock *tb;
1208 int ret;
1209
1210 if (cpu_single_env)
1211 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1212#if defined(DEBUG_SIGNAL)
1213 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1214 pc, address, is_write, *(unsigned long *)old_set);
1215#endif
1216 /* XXX: locking issue */
1217 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1218 return 1;
1219 }
1220
1221 /* see if it is an MMU fault */
1222 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1223 if (ret < 0)
1224 return 0; /* not an MMU fault */
1225 if (ret == 0)
1226 return 1; /* the MMU fault was handled without causing real CPU fault */
1227
1228 /* now we have a real cpu fault */
1229 tb = tb_find_pc(pc);
1230 if (tb) {
1231 /* the PC is inside the translated code. It means that we have
1232 a virtual CPU fault */
1233 cpu_restore_state(tb, env, pc, puc);
1234 }
1235 if (ret == 1) {
1236#if 0
1237 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1238 env->nip, env->error_code, tb);
1239#endif
1240 /* we restore the process signal mask as the sigreturn should
1241 do it (XXX: use sigsetjmp) */
1242 sigprocmask(SIG_SETMASK, old_set, NULL);
1243 do_raise_exception_err(env->exception_index, env->error_code);
1244 } else {
1245 /* activate soft MMU for this block */
1246 cpu_resume_from_signal(env, puc);
1247 }
1248 /* never comes here */
1249 return 1;
1250}
1251
1252#elif defined(TARGET_M68K)
1253static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1254 int is_write, sigset_t *old_set,
1255 void *puc)
1256{
1257 TranslationBlock *tb;
1258 int ret;
1259
1260 if (cpu_single_env)
1261 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1262#if defined(DEBUG_SIGNAL)
1263 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1264 pc, address, is_write, *(unsigned long *)old_set);
1265#endif
1266 /* XXX: locking issue */
1267 if (is_write && page_unprotect(address, pc, puc)) {
1268 return 1;
1269 }
1270 /* see if it is an MMU fault */
1271 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1272 if (ret < 0)
1273 return 0; /* not an MMU fault */
1274 if (ret == 0)
1275 return 1; /* the MMU fault was handled without causing real CPU fault */
1276 /* now we have a real cpu fault */
1277 tb = tb_find_pc(pc);
1278 if (tb) {
1279 /* the PC is inside the translated code. It means that we have
1280 a virtual CPU fault */
1281 cpu_restore_state(tb, env, pc, puc);
1282 }
1283 /* we restore the process signal mask as the sigreturn should
1284 do it (XXX: use sigsetjmp) */
1285 sigprocmask(SIG_SETMASK, old_set, NULL);
1286 cpu_loop_exit();
1287 /* never comes here */
1288 return 1;
1289}
1290
1291#elif defined (TARGET_MIPS)
1292static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1293 int is_write, sigset_t *old_set,
1294 void *puc)
1295{
1296 TranslationBlock *tb;
1297 int ret;
1298
1299 if (cpu_single_env)
1300 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1301#if defined(DEBUG_SIGNAL)
1302 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1303 pc, address, is_write, *(unsigned long *)old_set);
1304#endif
1305 /* XXX: locking issue */
1306 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1307 return 1;
1308 }
1309
1310 /* see if it is an MMU fault */
1311 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1312 if (ret < 0)
1313 return 0; /* not an MMU fault */
1314 if (ret == 0)
1315 return 1; /* the MMU fault was handled without causing real CPU fault */
1316
1317 /* now we have a real cpu fault */
1318 tb = tb_find_pc(pc);
1319 if (tb) {
1320 /* the PC is inside the translated code. It means that we have
1321 a virtual CPU fault */
1322 cpu_restore_state(tb, env, pc, puc);
1323 }
1324 if (ret == 1) {
1325#if 0
1326 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1327 env->PC, env->error_code, tb);
1328#endif
1329 /* we restore the process signal mask as the sigreturn should
1330 do it (XXX: use sigsetjmp) */
1331 sigprocmask(SIG_SETMASK, old_set, NULL);
1332 do_raise_exception_err(env->exception_index, env->error_code);
1333 } else {
1334 /* activate soft MMU for this block */
1335 cpu_resume_from_signal(env, puc);
1336 }
1337 /* never comes here */
1338 return 1;
1339}
1340
1341#elif defined (TARGET_SH4)
1342static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1343 int is_write, sigset_t *old_set,
1344 void *puc)
1345{
1346 TranslationBlock *tb;
1347 int ret;
1348
1349 if (cpu_single_env)
1350 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1351#if defined(DEBUG_SIGNAL)
1352 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1353 pc, address, is_write, *(unsigned long *)old_set);
1354#endif
1355 /* XXX: locking issue */
1356 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1357 return 1;
1358 }
1359
1360 /* see if it is an MMU fault */
1361 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1362 if (ret < 0)
1363 return 0; /* not an MMU fault */
1364 if (ret == 0)
1365 return 1; /* the MMU fault was handled without causing real CPU fault */
1366
1367 /* now we have a real cpu fault */
1368 tb = tb_find_pc(pc);
1369 if (tb) {
1370 /* the PC is inside the translated code. It means that we have
1371 a virtual CPU fault */
1372 cpu_restore_state(tb, env, pc, puc);
1373 }
1374#if 0
1375 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1376 env->nip, env->error_code, tb);
1377#endif
1378 /* we restore the process signal mask as the sigreturn should
1379 do it (XXX: use sigsetjmp) */
1380 sigprocmask(SIG_SETMASK, old_set, NULL);
1381 cpu_loop_exit();
1382 /* never comes here */
1383 return 1;
1384}
1385
1386#elif defined (TARGET_ALPHA)
1387static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1388 int is_write, sigset_t *old_set,
1389 void *puc)
1390{
1391 TranslationBlock *tb;
1392 int ret;
1393
1394 if (cpu_single_env)
1395 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1396#if defined(DEBUG_SIGNAL)
1397 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1398 pc, address, is_write, *(unsigned long *)old_set);
1399#endif
1400 /* XXX: locking issue */
1401 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1402 return 1;
1403 }
1404
1405 /* see if it is an MMU fault */
1406 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1407 if (ret < 0)
1408 return 0; /* not an MMU fault */
1409 if (ret == 0)
1410 return 1; /* the MMU fault was handled without causing real CPU fault */
1411
1412 /* now we have a real cpu fault */
1413 tb = tb_find_pc(pc);
1414 if (tb) {
1415 /* the PC is inside the translated code. It means that we have
1416 a virtual CPU fault */
1417 cpu_restore_state(tb, env, pc, puc);
1418 }
1419#if 0
1420 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1421 env->nip, env->error_code, tb);
1422#endif
1423 /* we restore the process signal mask as the sigreturn should
1424 do it (XXX: use sigsetjmp) */
1425 sigprocmask(SIG_SETMASK, old_set, NULL);
1426 cpu_loop_exit();
1427 /* never comes here */
1428 return 1;
1429}
1430#elif defined (TARGET_CRIS)
1431static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1432 int is_write, sigset_t *old_set,
1433 void *puc)
1434{
1435 TranslationBlock *tb;
1436 int ret;
1437
1438 if (cpu_single_env)
1439 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1440#if defined(DEBUG_SIGNAL)
1441 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1442 pc, address, is_write, *(unsigned long *)old_set);
1443#endif
1444 /* XXX: locking issue */
1445 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1446 return 1;
1447 }
1448
1449 /* see if it is an MMU fault */
1450 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1451 if (ret < 0)
1452 return 0; /* not an MMU fault */
1453 if (ret == 0)
1454 return 1; /* the MMU fault was handled without causing real CPU fault */
1455
1456 /* now we have a real cpu fault */
1457 tb = tb_find_pc(pc);
1458 if (tb) {
1459 /* the PC is inside the translated code. It means that we have
1460 a virtual CPU fault */
1461 cpu_restore_state(tb, env, pc, puc);
1462 }
1463 /* we restore the process signal mask as the sigreturn should
1464 do it (XXX: use sigsetjmp) */
1465 sigprocmask(SIG_SETMASK, old_set, NULL);
1466 cpu_loop_exit();
1467 /* never comes here */
1468 return 1;
1469}
1470
1471#else
1472#error unsupported target CPU
1473#endif
1474
1475#if defined(__i386__)
1476
1477#if defined(__APPLE__)
1478# include <sys/ucontext.h>
1479
1480# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1481# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1482# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1483#else
1484# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1485# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1486# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1487#endif
1488
1489int cpu_signal_handler(int host_signum, void *pinfo,
1490 void *puc)
1491{
1492 siginfo_t *info = pinfo;
1493 struct ucontext *uc = puc;
1494 unsigned long pc;
1495 int trapno;
1496
1497#ifndef REG_EIP
1498/* for glibc 2.1 */
1499#define REG_EIP EIP
1500#define REG_ERR ERR
1501#define REG_TRAPNO TRAPNO
1502#endif
1503 pc = EIP_sig(uc);
1504 trapno = TRAP_sig(uc);
1505 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1506 trapno == 0xe ?
1507 (ERROR_sig(uc) >> 1) & 1 : 0,
1508 &uc->uc_sigmask, puc);
1509}
1510
1511#elif defined(__x86_64__)
1512
1513int cpu_signal_handler(int host_signum, void *pinfo,
1514 void *puc)
1515{
1516 siginfo_t *info = pinfo;
1517 struct ucontext *uc = puc;
1518 unsigned long pc;
1519
1520 pc = uc->uc_mcontext.gregs[REG_RIP];
1521 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1522 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1523 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1524 &uc->uc_sigmask, puc);
1525}
1526
1527#elif defined(__powerpc__)
1528
1529/***********************************************************************
1530 * signal context platform-specific definitions
1531 * From Wine
1532 */
1533#ifdef linux
1534/* All Registers access - only for local access */
1535# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1536/* Gpr Registers access */
1537# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1538# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1539# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1540# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1541# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1542# define LR_sig(context) REG_sig(link, context) /* Link register */
1543# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1544/* Float Registers access */
1545# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1546# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1547/* Exception Registers access */
1548# define DAR_sig(context) REG_sig(dar, context)
1549# define DSISR_sig(context) REG_sig(dsisr, context)
1550# define TRAP_sig(context) REG_sig(trap, context)
1551#endif /* linux */
1552
1553#ifdef __APPLE__
1554# include <sys/ucontext.h>
1555typedef struct ucontext SIGCONTEXT;
1556/* All Registers access - only for local access */
1557# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1558# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1559# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1560# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1561/* Gpr Registers access */
1562# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1563# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1564# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1565# define CTR_sig(context) REG_sig(ctr, context)
1566# define XER_sig(context) REG_sig(xer, context) /* Link register */
1567# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1568# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1569/* Float Registers access */
1570# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1571# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1572/* Exception Registers access */
1573# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1574# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1575# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1576#endif /* __APPLE__ */
1577
1578int cpu_signal_handler(int host_signum, void *pinfo,
1579 void *puc)
1580{
1581 siginfo_t *info = pinfo;
1582 struct ucontext *uc = puc;
1583 unsigned long pc;
1584 int is_write;
1585
1586 pc = IAR_sig(uc);
1587 is_write = 0;
1588#if 0
1589 /* ppc 4xx case */
1590 if (DSISR_sig(uc) & 0x00800000)
1591 is_write = 1;
1592#else
1593 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1594 is_write = 1;
1595#endif
1596 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1597 is_write, &uc->uc_sigmask, puc);
1598}
1599
1600#elif defined(__alpha__)
1601
1602int cpu_signal_handler(int host_signum, void *pinfo,
1603 void *puc)
1604{
1605 siginfo_t *info = pinfo;
1606 struct ucontext *uc = puc;
1607 uint32_t *pc = uc->uc_mcontext.sc_pc;
1608 uint32_t insn = *pc;
1609 int is_write = 0;
1610
1611 /* XXX: need kernel patch to get write flag faster */
1612 switch (insn >> 26) {
1613 case 0x0d: // stw
1614 case 0x0e: // stb
1615 case 0x0f: // stq_u
1616 case 0x24: // stf
1617 case 0x25: // stg
1618 case 0x26: // sts
1619 case 0x27: // stt
1620 case 0x2c: // stl
1621 case 0x2d: // stq
1622 case 0x2e: // stl_c
1623 case 0x2f: // stq_c
1624 is_write = 1;
1625 }
1626
1627 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1628 is_write, &uc->uc_sigmask, puc);
1629}
1630#elif defined(__sparc__)
1631
1632int cpu_signal_handler(int host_signum, void *pinfo,
1633 void *puc)
1634{
1635 siginfo_t *info = pinfo;
1636 int is_write;
1637 uint32_t insn;
1638#if !defined(__arch64__) || defined(HOST_SOLARIS)
1639 uint32_t *regs = (uint32_t *)(info + 1);
1640 void *sigmask = (regs + 20);
1641 /* XXX: is there a standard glibc define ? */
1642 unsigned long pc = regs[1];
1643#else
1644 struct sigcontext *sc = puc;
1645 unsigned long pc = sc->sigc_regs.tpc;
1646 void *sigmask = (void *)sc->sigc_mask;
1647#endif
1648
1649 /* XXX: need kernel patch to get write flag faster */
1650 is_write = 0;
1651 insn = *(uint32_t *)pc;
1652 if ((insn >> 30) == 3) {
1653 switch((insn >> 19) & 0x3f) {
1654 case 0x05: // stb
1655 case 0x06: // sth
1656 case 0x04: // st
1657 case 0x07: // std
1658 case 0x24: // stf
1659 case 0x27: // stdf
1660 case 0x25: // stfsr
1661 is_write = 1;
1662 break;
1663 }
1664 }
1665 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1666 is_write, sigmask, NULL);
1667}
1668
1669#elif defined(__arm__)
1670
1671int cpu_signal_handler(int host_signum, void *pinfo,
1672 void *puc)
1673{
1674 siginfo_t *info = pinfo;
1675 struct ucontext *uc = puc;
1676 unsigned long pc;
1677 int is_write;
1678
1679#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1680 pc = uc->uc_mcontext.gregs[R15];
1681#else
1682 pc = uc->uc_mcontext.arm_pc;
1683#endif
1684 /* XXX: compute is_write */
1685 is_write = 0;
1686 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1687 is_write,
1688 &uc->uc_sigmask, puc);
1689}
1690
1691#elif defined(__mc68000)
1692
1693int cpu_signal_handler(int host_signum, void *pinfo,
1694 void *puc)
1695{
1696 siginfo_t *info = pinfo;
1697 struct ucontext *uc = puc;
1698 unsigned long pc;
1699 int is_write;
1700
1701 pc = uc->uc_mcontext.gregs[16];
1702 /* XXX: compute is_write */
1703 is_write = 0;
1704 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1705 is_write,
1706 &uc->uc_sigmask, puc);
1707}
1708
1709#elif defined(__ia64)
1710
1711#ifndef __ISR_VALID
1712 /* This ought to be in <bits/siginfo.h>... */
1713# define __ISR_VALID 1
1714#endif
1715
1716int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1717{
1718 siginfo_t *info = pinfo;
1719 struct ucontext *uc = puc;
1720 unsigned long ip;
1721 int is_write = 0;
1722
1723 ip = uc->uc_mcontext.sc_ip;
1724 switch (host_signum) {
1725 case SIGILL:
1726 case SIGFPE:
1727 case SIGSEGV:
1728 case SIGBUS:
1729 case SIGTRAP:
1730 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1731 /* ISR.W (write-access) is bit 33: */
1732 is_write = (info->si_isr >> 33) & 1;
1733 break;
1734
1735 default:
1736 break;
1737 }
1738 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1739 is_write,
1740 &uc->uc_sigmask, puc);
1741}
1742
1743#elif defined(__s390__)
1744
1745int cpu_signal_handler(int host_signum, void *pinfo,
1746 void *puc)
1747{
1748 siginfo_t *info = pinfo;
1749 struct ucontext *uc = puc;
1750 unsigned long pc;
1751 int is_write;
1752
1753 pc = uc->uc_mcontext.psw.addr;
1754 /* XXX: compute is_write */
1755 is_write = 0;
1756 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1757 is_write, &uc->uc_sigmask, puc);
1758}
1759
1760#elif defined(__mips__)
1761
1762int cpu_signal_handler(int host_signum, void *pinfo,
1763 void *puc)
1764{
1765 siginfo_t *info = pinfo;
1766 struct ucontext *uc = puc;
1767 greg_t pc = uc->uc_mcontext.pc;
1768 int is_write;
1769
1770 /* XXX: compute is_write */
1771 is_write = 0;
1772 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1773 is_write, &uc->uc_sigmask, puc);
1774}
1775
1776#elif defined(__hppa__)
1777
1778int cpu_signal_handler(int host_signum, void *pinfo,
1779 void *puc)
1780{
1781 struct siginfo *info = pinfo;
1782 struct ucontext *uc = puc;
1783 unsigned long pc;
1784 int is_write;
1785
1786 pc = uc->uc_mcontext.sc_iaoq[0];
1787 /* FIXME: compute is_write */
1788 is_write = 0;
1789 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1790 is_write,
1791 &uc->uc_sigmask, puc);
1792}
1793
1794#else
1795
1796#error host CPU specific signal handler needed
1797
1798#endif
1799
1800#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette