VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 37270

Last change on this file since 37270 was 36768, checked in by vboxsync, 14 years ago

IEM: Initial commit, work in progress.

  • Property svn:eol-style set to native
File size: 69.6 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "config.h"
30#include "exec.h"
31#include "disas.h"
32#include "tcg.h"
33#include "kvm.h"
34
35#if !defined(CONFIG_SOFTMMU)
36#undef EAX
37#undef ECX
38#undef EDX
39#undef EBX
40#undef ESP
41#undef EBP
42#undef ESI
43#undef EDI
44#undef EIP
45#include <signal.h>
46#ifdef __linux__
47#include <sys/ucontext.h>
48#endif
49#endif
50
51#if defined(__sparc__) && !defined(HOST_SOLARIS)
52// Work around ugly bugs in glibc that mangle global register contents
53#undef env
54#define env cpu_single_env
55#endif
56
57int tb_invalidated_flag;
58
59//#define DEBUG_EXEC
60//#define DEBUG_SIGNAL
61
62int qemu_cpu_has_work(CPUState *env)
63{
64 return cpu_has_work(env);
65}
66
67void cpu_loop_exit(void)
68{
69 /* NOTE: the register at this point must be saved by hand because
70 longjmp restore them */
71 regs_to_env();
72 longjmp(env->jmp_env, 1);
73}
74
75/* exit the current TB from a signal handler. The host registers are
76 restored in a state compatible with the CPU emulator
77 */
78void cpu_resume_from_signal(CPUState *env1, void *puc)
79{
80#if !defined(CONFIG_SOFTMMU)
81#ifdef __linux__
82 struct ucontext *uc = puc;
83#elif defined(__OpenBSD__)
84 struct sigcontext *uc = puc;
85#endif
86#endif
87
88 env = env1;
89
90 /* XXX: restore cpu registers saved in host registers */
91
92#if !defined(CONFIG_SOFTMMU)
93 if (puc) {
94 /* XXX: use siglongjmp ? */
95#ifdef __linux__
96 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
97#elif defined(__OpenBSD__)
98 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
99#endif
100 }
101#endif
102 env->exception_index = -1;
103 longjmp(env->jmp_env, 1);
104}
105
106/* Execute the code without caching the generated code. An interpreter
107 could be used if available. */
108static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
109{
110 unsigned long next_tb;
111 TranslationBlock *tb;
112
113 /* Should never happen.
114 We only end up here when an existing TB is too long. */
115 if (max_cycles > CF_COUNT_MASK)
116 max_cycles = CF_COUNT_MASK;
117
118 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
119 max_cycles);
120 env->current_tb = tb;
121 /* execute the generated code */
122#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
123 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
124#else
125 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
126#endif
127
128 if ((next_tb & 3) == 2) {
129 /* Restore PC. This may happen if async event occurs before
130 the TB starts executing. */
131 cpu_pc_from_tb(env, tb);
132 }
133 tb_phys_invalidate(tb, -1);
134 tb_free(tb);
135}
136
137static TranslationBlock *tb_find_slow(target_ulong pc,
138 target_ulong cs_base,
139 uint64_t flags)
140{
141 TranslationBlock *tb, **ptb1;
142 unsigned int h;
143 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
144
145 tb_invalidated_flag = 0;
146
147 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
148
149 /* find translated block using physical mappings */
150 phys_pc = get_phys_addr_code(env, pc);
151 phys_page1 = phys_pc & TARGET_PAGE_MASK;
152 phys_page2 = -1;
153 h = tb_phys_hash_func(phys_pc);
154 ptb1 = &tb_phys_hash[h];
155 for(;;) {
156 tb = *ptb1;
157 if (!tb)
158 goto not_found;
159 if (tb->pc == pc &&
160 tb->page_addr[0] == phys_page1 &&
161 tb->cs_base == cs_base &&
162 tb->flags == flags) {
163 /* check next page if needed */
164 if (tb->page_addr[1] != -1) {
165 virt_page2 = (pc & TARGET_PAGE_MASK) +
166 TARGET_PAGE_SIZE;
167 phys_page2 = get_phys_addr_code(env, virt_page2);
168 if (tb->page_addr[1] == phys_page2)
169 goto found;
170 } else {
171 goto found;
172 }
173 }
174 ptb1 = &tb->phys_hash_next;
175 }
176 not_found:
177 /* if no translated code available, then translate it now */
178 tb = tb_gen_code(env, pc, cs_base, flags, 0);
179
180 found:
181 /* we add the TB in the virtual pc hash table */
182 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
183 return tb;
184}
185
186static inline TranslationBlock *tb_find_fast(void)
187{
188 TranslationBlock *tb;
189 target_ulong cs_base, pc;
190 int flags;
191
192 /* we record a subset of the CPU state. It will
193 always be the same before a given translated block
194 is executed. */
195 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
196 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
197 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
198 tb->flags != flags)) {
199 tb = tb_find_slow(pc, cs_base, flags);
200 }
201 return tb;
202}
203
204static CPUDebugExcpHandler *debug_excp_handler;
205
206CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
207{
208 CPUDebugExcpHandler *old_handler = debug_excp_handler;
209
210 debug_excp_handler = handler;
211 return old_handler;
212}
213
214static void cpu_handle_debug_exception(CPUState *env)
215{
216 CPUWatchpoint *wp;
217
218 if (!env->watchpoint_hit)
219 TAILQ_FOREACH(wp, &env->watchpoints, entry)
220 wp->flags &= ~BP_WATCHPOINT_HIT;
221
222 if (debug_excp_handler)
223 debug_excp_handler(env);
224}
225
226/* main execution loop */
227
228#ifdef VBOX
229
230int cpu_exec(CPUState *env1)
231{
232#define DECLARE_HOST_REGS 1
233#include "hostregs_helper.h"
234 int ret = 0, interrupt_request;
235 TranslationBlock *tb;
236 uint8_t *tc_ptr;
237 unsigned long next_tb;
238
239 cpu_single_env = env1;
240
241 /* first we save global registers */
242#define SAVE_HOST_REGS 1
243#include "hostregs_helper.h"
244 env = env1;
245
246 env_to_regs();
247#if defined(TARGET_I386)
248 /* put eflags in CPU temporary format */
249 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
250 DF = 1 - (2 * ((env->eflags >> 10) & 1));
251 CC_OP = CC_OP_EFLAGS;
252 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
253#elif defined(TARGET_SPARC)
254#elif defined(TARGET_M68K)
255 env->cc_op = CC_OP_FLAGS;
256 env->cc_dest = env->sr & 0xf;
257 env->cc_x = (env->sr >> 4) & 1;
258#elif defined(TARGET_ALPHA)
259#elif defined(TARGET_ARM)
260#elif defined(TARGET_PPC)
261#elif defined(TARGET_MIPS)
262#elif defined(TARGET_SH4)
263#elif defined(TARGET_CRIS)
264 /* XXXXX */
265#else
266#error unsupported target CPU
267#endif
268#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
269 env->exception_index = -1;
270#endif
271
272 /* prepare setjmp context for exception handling */
273 for(;;) {
274 if (setjmp(env->jmp_env) == 0)
275 {
276 env->current_tb = NULL;
277
278 /*
279 * Check for fatal errors first
280 */
281 if (env->interrupt_request & CPU_INTERRUPT_RC) {
282 env->exception_index = EXCP_RC;
283 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
284 ret = env->exception_index;
285 cpu_loop_exit();
286 }
287
288 /* if an exception is pending, we execute it here */
289 if (env->exception_index >= 0) {
290 if (env->exception_index >= EXCP_INTERRUPT) {
291 /* exit request from the cpu execution loop */
292 ret = env->exception_index;
293 if (ret == EXCP_DEBUG)
294 cpu_handle_debug_exception(env);
295 break;
296 } else {
297 /* simulate a real cpu exception. On i386, it can
298 trigger new exceptions, but we do not handle
299 double or triple faults yet. */
300 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
301 Log(("do_interrupt %d %d %RGv\n", env->exception_index, env->exception_is_int, (RTGCPTR)env->exception_next_eip));
302 do_interrupt(env->exception_index,
303 env->exception_is_int,
304 env->error_code,
305 env->exception_next_eip, 0);
306 /* successfully delivered */
307 env->old_exception = -1;
308 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
309 }
310 env->exception_index = -1;
311 }
312
313 next_tb = 0; /* force lookup of first TB */
314 for(;;)
315 {
316 interrupt_request = env->interrupt_request;
317 if (unlikely(interrupt_request)) {
318 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
319 /* Mask out external interrupts for this step. */
320 interrupt_request &= ~(CPU_INTERRUPT_HARD |
321 CPU_INTERRUPT_FIQ |
322 CPU_INTERRUPT_SMI |
323 CPU_INTERRUPT_NMI);
324 }
325 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
326 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
327 env->exception_index = EXCP_DEBUG;
328 cpu_loop_exit();
329 }
330 /** @todo: reconcile with what QEMU really does */
331
332 /* Single instruction exec request, we execute it and return (one way or the other).
333 The caller will always reschedule after doing this operation! */
334 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
335 {
336 /* not in flight are we? (if we are, we trapped) */
337 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
338 {
339 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
340 env->exception_index = EXCP_SINGLE_INSTR;
341 if (emulate_single_instr(env) == -1)
342 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip));
343
344 /* When we receive an external interrupt during execution of this single
345 instruction, then we should stay here. We will leave when we're ready
346 for raw-mode or when interrupted by pending EMT requests. */
347 interrupt_request = env->interrupt_request; /* reload this! */
348 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
349 || !(env->eflags & IF_MASK)
350 || (env->hflags & HF_INHIBIT_IRQ_MASK)
351 || (env->state & CPU_RAW_HWACC)
352 )
353 {
354 env->exception_index = ret = EXCP_SINGLE_INSTR;
355 cpu_loop_exit();
356 }
357 }
358 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
359 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
360#ifdef IEM_VERIFICATION_MODE
361 env->exception_index = ret = EXCP_SINGLE_INSTR;
362 cpu_loop_exit();
363#endif
364 }
365
366 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
367 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
368 !(env->hflags & HF_SMM_MASK)) {
369 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
370 do_smm_enter();
371 next_tb = 0;
372 }
373 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
374 (env->eflags & IF_MASK) &&
375 !(env->hflags & HF_INHIBIT_IRQ_MASK))
376 {
377 /* if hardware interrupt pending, we execute it */
378 int intno;
379 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
380 intno = cpu_get_pic_interrupt(env);
381 if (intno >= 0)
382 {
383 Log(("do_interrupt %d\n", intno));
384 do_interrupt(intno, 0, 0, 0, 1);
385 }
386 /* ensure that no TB jump will be modified as
387 the program flow was changed */
388 next_tb = 0;
389 }
390 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
391 {
392 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
393 /* ensure that no TB jump will be modified as
394 the program flow was changed */
395 next_tb = 0;
396 }
397 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
398 if (interrupt_request & CPU_INTERRUPT_RC)
399 {
400 env->exception_index = EXCP_RC;
401 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
402 ret = env->exception_index;
403 cpu_loop_exit();
404 }
405 }
406 if (unlikely(env->exit_request)) {
407 env->exit_request = 0;
408 env->exception_index = EXCP_INTERRUPT;
409 cpu_loop_exit();
410 }
411
412 /*
413 * Check if we the CPU state allows us to execute the code in raw-mode.
414 */
415 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
416 if (remR3CanExecuteRaw(env,
417 env->eip + env->segs[R_CS].base,
418 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
419 &env->exception_index))
420 {
421 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
422 ret = env->exception_index;
423 cpu_loop_exit();
424 }
425 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
426
427 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
428 spin_lock(&tb_lock);
429 tb = tb_find_fast();
430 /* Note: we do it here to avoid a gcc bug on Mac OS X when
431 doing it in tb_find_slow */
432 if (tb_invalidated_flag) {
433 /* as some TB could have been invalidated because
434 of memory exceptions while generating the code, we
435 must recompute the hash index here */
436 next_tb = 0;
437 tb_invalidated_flag = 0;
438 }
439
440 /* see if we can patch the calling TB. When the TB
441 spans two pages, we cannot safely do a direct
442 jump. */
443 if (next_tb != 0
444 && !(tb->cflags & CF_RAW_MODE)
445 && tb->page_addr[1] == -1)
446 {
447 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
448 }
449 spin_unlock(&tb_lock);
450 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
451
452 env->current_tb = tb;
453
454 /* cpu_interrupt might be called while translating the
455 TB, but before it is linked into a potentially
456 infinite loop and becomes env->current_tb. Avoid
457 starting execution if there is a pending interrupt. */
458 if (unlikely (env->exit_request))
459 env->current_tb = NULL;
460
461 while (env->current_tb) {
462 tc_ptr = tb->tc_ptr;
463 /* execute the generated code */
464 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
465#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
466 tcg_qemu_tb_exec(tc_ptr, next_tb);
467#else
468 next_tb = tcg_qemu_tb_exec(tc_ptr);
469#endif
470 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
471 env->current_tb = NULL;
472 if ((next_tb & 3) == 2) {
473 /* Instruction counter expired. */
474 int insns_left;
475 tb = (TranslationBlock *)(long)(next_tb & ~3);
476 /* Restore PC. */
477 cpu_pc_from_tb(env, tb);
478 insns_left = env->icount_decr.u32;
479 if (env->icount_extra && insns_left >= 0) {
480 /* Refill decrementer and continue execution. */
481 env->icount_extra += insns_left;
482 if (env->icount_extra > 0xffff) {
483 insns_left = 0xffff;
484 } else {
485 insns_left = env->icount_extra;
486 }
487 env->icount_extra -= insns_left;
488 env->icount_decr.u16.low = insns_left;
489 } else {
490 if (insns_left > 0) {
491 /* Execute remaining instructions. */
492 cpu_exec_nocache(insns_left, tb);
493 }
494 env->exception_index = EXCP_INTERRUPT;
495 next_tb = 0;
496 cpu_loop_exit();
497 }
498 }
499 }
500
501 /* reset soft MMU for next block (it can currently
502 only be set by a memory fault) */
503#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
504 if (env->hflags & HF_SOFTMMU_MASK) {
505 env->hflags &= ~HF_SOFTMMU_MASK;
506 /* do not allow linking to another block */
507 next_tb = 0;
508 }
509#endif
510 } /* for(;;) */
511 } else {
512 env_to_regs();
513 }
514#ifdef VBOX_HIGH_RES_TIMERS_HACK
515 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
516 unnecessary (like crashing during emulate single instruction).
517 Note! Don't use env1->pVM here, the code wouldn't run with
518 gcc-4.4/amd64 anymore, see #3883. */
519 env->current_tb = NULL;
520 if ( !(env->interrupt_request & ( CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
521 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
522 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
523 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
524 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
525 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
526 TMR3TimerQueuesDo(env->pVM);
527 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
528 }
529#endif
530 } /* for(;;) */
531
532#if defined(TARGET_I386)
533 /* restore flags in standard format */
534 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
535#else
536#error unsupported target CPU
537#endif
538#include "hostregs_helper.h"
539 return ret;
540}
541
542#else /* !VBOX */
543int cpu_exec(CPUState *env1)
544{
545#define DECLARE_HOST_REGS 1
546#include "hostregs_helper.h"
547 int ret, interrupt_request;
548 TranslationBlock *tb;
549 uint8_t *tc_ptr;
550 unsigned long next_tb;
551
552 if (cpu_halted(env1) == EXCP_HALTED)
553 return EXCP_HALTED;
554
555 cpu_single_env = env1;
556
557 /* first we save global registers */
558#define SAVE_HOST_REGS 1
559#include "hostregs_helper.h"
560 env = env1;
561
562 env_to_regs();
563#if defined(TARGET_I386)
564 /* put eflags in CPU temporary format */
565 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
566 DF = 1 - (2 * ((env->eflags >> 10) & 1));
567 CC_OP = CC_OP_EFLAGS;
568 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
569#elif defined(TARGET_SPARC)
570#elif defined(TARGET_M68K)
571 env->cc_op = CC_OP_FLAGS;
572 env->cc_dest = env->sr & 0xf;
573 env->cc_x = (env->sr >> 4) & 1;
574#elif defined(TARGET_ALPHA)
575#elif defined(TARGET_ARM)
576#elif defined(TARGET_PPC)
577#elif defined(TARGET_MICROBLAZE)
578#elif defined(TARGET_MIPS)
579#elif defined(TARGET_SH4)
580#elif defined(TARGET_CRIS)
581 /* XXXXX */
582#else
583#error unsupported target CPU
584#endif
585 env->exception_index = -1;
586
587 /* prepare setjmp context for exception handling */
588 for(;;) {
589 if (setjmp(env->jmp_env) == 0) {
590#if defined(__sparc__) && !defined(HOST_SOLARIS)
591#undef env
592 env = cpu_single_env;
593#define env cpu_single_env
594#endif
595 env->current_tb = NULL;
596 /* if an exception is pending, we execute it here */
597 if (env->exception_index >= 0) {
598 if (env->exception_index >= EXCP_INTERRUPT) {
599 /* exit request from the cpu execution loop */
600 ret = env->exception_index;
601 if (ret == EXCP_DEBUG)
602 cpu_handle_debug_exception(env);
603 break;
604 } else {
605#if defined(CONFIG_USER_ONLY)
606 /* if user mode only, we simulate a fake exception
607 which will be handled outside the cpu execution
608 loop */
609#if defined(TARGET_I386)
610 do_interrupt_user(env->exception_index,
611 env->exception_is_int,
612 env->error_code,
613 env->exception_next_eip);
614 /* successfully delivered */
615 env->old_exception = -1;
616#endif
617 ret = env->exception_index;
618 break;
619#else
620#if defined(TARGET_I386)
621 /* simulate a real cpu exception. On i386, it can
622 trigger new exceptions, but we do not handle
623 double or triple faults yet. */
624 do_interrupt(env->exception_index,
625 env->exception_is_int,
626 env->error_code,
627 env->exception_next_eip, 0);
628 /* successfully delivered */
629 env->old_exception = -1;
630#elif defined(TARGET_PPC)
631 do_interrupt(env);
632#elif defined(TARGET_MICROBLAZE)
633 do_interrupt(env);
634#elif defined(TARGET_MIPS)
635 do_interrupt(env);
636#elif defined(TARGET_SPARC)
637 do_interrupt(env);
638#elif defined(TARGET_ARM)
639 do_interrupt(env);
640#elif defined(TARGET_SH4)
641 do_interrupt(env);
642#elif defined(TARGET_ALPHA)
643 do_interrupt(env);
644#elif defined(TARGET_CRIS)
645 do_interrupt(env);
646#elif defined(TARGET_M68K)
647 do_interrupt(0);
648#endif
649#endif
650 }
651 env->exception_index = -1;
652 }
653#ifdef CONFIG_KQEMU
654 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
655 int ret;
656 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
657 ret = kqemu_cpu_exec(env);
658 /* put eflags in CPU temporary format */
659 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
660 DF = 1 - (2 * ((env->eflags >> 10) & 1));
661 CC_OP = CC_OP_EFLAGS;
662 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
663 if (ret == 1) {
664 /* exception */
665 longjmp(env->jmp_env, 1);
666 } else if (ret == 2) {
667 /* softmmu execution needed */
668 } else {
669 if (env->interrupt_request != 0 || env->exit_request != 0) {
670 /* hardware interrupt will be executed just after */
671 } else {
672 /* otherwise, we restart */
673 longjmp(env->jmp_env, 1);
674 }
675 }
676 }
677#endif
678
679 if (kvm_enabled()) {
680 kvm_cpu_exec(env);
681 longjmp(env->jmp_env, 1);
682 }
683
684 next_tb = 0; /* force lookup of first TB */
685 for(;;) {
686 interrupt_request = env->interrupt_request;
687 if (unlikely(interrupt_request)) {
688 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
689 /* Mask out external interrupts for this step. */
690 interrupt_request &= ~(CPU_INTERRUPT_HARD |
691 CPU_INTERRUPT_FIQ |
692 CPU_INTERRUPT_SMI |
693 CPU_INTERRUPT_NMI);
694 }
695 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
696 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
697 env->exception_index = EXCP_DEBUG;
698 cpu_loop_exit();
699 }
700#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
701 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
702 defined(TARGET_MICROBLAZE)
703 if (interrupt_request & CPU_INTERRUPT_HALT) {
704 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
705 env->halted = 1;
706 env->exception_index = EXCP_HLT;
707 cpu_loop_exit();
708 }
709#endif
710#if defined(TARGET_I386)
711 if (interrupt_request & CPU_INTERRUPT_INIT) {
712 svm_check_intercept(SVM_EXIT_INIT);
713 do_cpu_init(env);
714 env->exception_index = EXCP_HALTED;
715 cpu_loop_exit();
716 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
717 do_cpu_sipi(env);
718 } else if (env->hflags2 & HF2_GIF_MASK) {
719 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
720 !(env->hflags & HF_SMM_MASK)) {
721 svm_check_intercept(SVM_EXIT_SMI);
722 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
723 do_smm_enter();
724 next_tb = 0;
725 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
726 !(env->hflags2 & HF2_NMI_MASK)) {
727 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
728 env->hflags2 |= HF2_NMI_MASK;
729 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
730 next_tb = 0;
731 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
732 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
733 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
734 next_tb = 0;
735 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
736 (((env->hflags2 & HF2_VINTR_MASK) &&
737 (env->hflags2 & HF2_HIF_MASK)) ||
738 (!(env->hflags2 & HF2_VINTR_MASK) &&
739 (env->eflags & IF_MASK &&
740 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
741 int intno;
742 svm_check_intercept(SVM_EXIT_INTR);
743 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
744 intno = cpu_get_pic_interrupt(env);
745 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
746#if defined(__sparc__) && !defined(HOST_SOLARIS)
747#undef env
748 env = cpu_single_env;
749#define env cpu_single_env
750#endif
751 do_interrupt(intno, 0, 0, 0, 1);
752 /* ensure that no TB jump will be modified as
753 the program flow was changed */
754 next_tb = 0;
755#if !defined(CONFIG_USER_ONLY)
756 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
757 (env->eflags & IF_MASK) &&
758 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
759 int intno;
760 /* FIXME: this should respect TPR */
761 svm_check_intercept(SVM_EXIT_VINTR);
762 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
763 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
764 do_interrupt(intno, 0, 0, 0, 1);
765 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
766 next_tb = 0;
767#endif
768 }
769 }
770#elif defined(TARGET_PPC)
771#if 0
772 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
773 cpu_ppc_reset(env);
774 }
775#endif
776 if (interrupt_request & CPU_INTERRUPT_HARD) {
777 ppc_hw_interrupt(env);
778 if (env->pending_interrupts == 0)
779 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
780 next_tb = 0;
781 }
782#elif defined(TARGET_MICROBLAZE)
783 if ((interrupt_request & CPU_INTERRUPT_HARD)
784 && (env->sregs[SR_MSR] & MSR_IE)
785 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
786 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
787 env->exception_index = EXCP_IRQ;
788 do_interrupt(env);
789 next_tb = 0;
790 }
791#elif defined(TARGET_MIPS)
792 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
793 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
794 (env->CP0_Status & (1 << CP0St_IE)) &&
795 !(env->CP0_Status & (1 << CP0St_EXL)) &&
796 !(env->CP0_Status & (1 << CP0St_ERL)) &&
797 !(env->hflags & MIPS_HFLAG_DM)) {
798 /* Raise it */
799 env->exception_index = EXCP_EXT_INTERRUPT;
800 env->error_code = 0;
801 do_interrupt(env);
802 next_tb = 0;
803 }
804#elif defined(TARGET_SPARC)
805 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
806 cpu_interrupts_enabled(env)) {
807 int pil = env->interrupt_index & 15;
808 int type = env->interrupt_index & 0xf0;
809
810 if (((type == TT_EXTINT) &&
811 (pil == 15 || pil > env->psrpil)) ||
812 type != TT_EXTINT) {
813 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
814 env->exception_index = env->interrupt_index;
815 do_interrupt(env);
816 env->interrupt_index = 0;
817#if !defined(CONFIG_USER_ONLY)
818 cpu_check_irqs(env);
819#endif
820 next_tb = 0;
821 }
822 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
823 //do_interrupt(0, 0, 0, 0, 0);
824 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
825 }
826#elif defined(TARGET_ARM)
827 if (interrupt_request & CPU_INTERRUPT_FIQ
828 && !(env->uncached_cpsr & CPSR_F)) {
829 env->exception_index = EXCP_FIQ;
830 do_interrupt(env);
831 next_tb = 0;
832 }
833 /* ARMv7-M interrupt return works by loading a magic value
834 into the PC. On real hardware the load causes the
835 return to occur. The qemu implementation performs the
836 jump normally, then does the exception return when the
837 CPU tries to execute code at the magic address.
838 This will cause the magic PC value to be pushed to
839 the stack if an interrupt occured at the wrong time.
840 We avoid this by disabling interrupts when
841 pc contains a magic address. */
842 if (interrupt_request & CPU_INTERRUPT_HARD
843 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
844 || !(env->uncached_cpsr & CPSR_I))) {
845 env->exception_index = EXCP_IRQ;
846 do_interrupt(env);
847 next_tb = 0;
848 }
849#elif defined(TARGET_SH4)
850 if (interrupt_request & CPU_INTERRUPT_HARD) {
851 do_interrupt(env);
852 next_tb = 0;
853 }
854#elif defined(TARGET_ALPHA)
855 if (interrupt_request & CPU_INTERRUPT_HARD) {
856 do_interrupt(env);
857 next_tb = 0;
858 }
859#elif defined(TARGET_CRIS)
860 if (interrupt_request & CPU_INTERRUPT_HARD
861 && (env->pregs[PR_CCS] & I_FLAG)) {
862 env->exception_index = EXCP_IRQ;
863 do_interrupt(env);
864 next_tb = 0;
865 }
866 if (interrupt_request & CPU_INTERRUPT_NMI
867 && (env->pregs[PR_CCS] & M_FLAG)) {
868 env->exception_index = EXCP_NMI;
869 do_interrupt(env);
870 next_tb = 0;
871 }
872#elif defined(TARGET_M68K)
873 if (interrupt_request & CPU_INTERRUPT_HARD
874 && ((env->sr & SR_I) >> SR_I_SHIFT)
875 < env->pending_level) {
876 /* Real hardware gets the interrupt vector via an
877 IACK cycle at this point. Current emulated
878 hardware doesn't rely on this, so we
879 provide/save the vector when the interrupt is
880 first signalled. */
881 env->exception_index = env->pending_vector;
882 do_interrupt(1);
883 next_tb = 0;
884 }
885#endif
886 /* Don't use the cached interupt_request value,
887 do_interrupt may have updated the EXITTB flag. */
888 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
889 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
890 /* ensure that no TB jump will be modified as
891 the program flow was changed */
892 next_tb = 0;
893 }
894 }
895 if (unlikely(env->exit_request)) {
896 env->exit_request = 0;
897 env->exception_index = EXCP_INTERRUPT;
898 cpu_loop_exit();
899 }
900#ifdef DEBUG_EXEC
901 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
902 /* restore flags in standard format */
903 regs_to_env();
904#if defined(TARGET_I386)
905 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
906 log_cpu_state(env, X86_DUMP_CCOP);
907 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
908#elif defined(TARGET_ARM)
909 log_cpu_state(env, 0);
910#elif defined(TARGET_SPARC)
911 log_cpu_state(env, 0);
912#elif defined(TARGET_PPC)
913 log_cpu_state(env, 0);
914#elif defined(TARGET_M68K)
915 cpu_m68k_flush_flags(env, env->cc_op);
916 env->cc_op = CC_OP_FLAGS;
917 env->sr = (env->sr & 0xffe0)
918 | env->cc_dest | (env->cc_x << 4);
919 log_cpu_state(env, 0);
920#elif defined(TARGET_MICROBLAZE)
921 log_cpu_state(env, 0);
922#elif defined(TARGET_MIPS)
923 log_cpu_state(env, 0);
924#elif defined(TARGET_SH4)
925 log_cpu_state(env, 0);
926#elif defined(TARGET_ALPHA)
927 log_cpu_state(env, 0);
928#elif defined(TARGET_CRIS)
929 log_cpu_state(env, 0);
930#else
931#error unsupported target CPU
932#endif
933 }
934#endif
935 spin_lock(&tb_lock);
936 tb = tb_find_fast();
937 /* Note: we do it here to avoid a gcc bug on Mac OS X when
938 doing it in tb_find_slow */
939 if (tb_invalidated_flag) {
940 /* as some TB could have been invalidated because
941 of memory exceptions while generating the code, we
942 must recompute the hash index here */
943 next_tb = 0;
944 tb_invalidated_flag = 0;
945 }
946#ifdef DEBUG_EXEC
947 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
948 (long)tb->tc_ptr, tb->pc,
949 lookup_symbol(tb->pc));
950#endif
951 /* see if we can patch the calling TB. When the TB
952 spans two pages, we cannot safely do a direct
953 jump. */
954 {
955 if (next_tb != 0 &&
956#ifdef CONFIG_KQEMU
957 (env->kqemu_enabled != 2) &&
958#endif
959 tb->page_addr[1] == -1) {
960 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
961 }
962 }
963 spin_unlock(&tb_lock);
964 env->current_tb = tb;
965
966 /* cpu_interrupt might be called while translating the
967 TB, but before it is linked into a potentially
968 infinite loop and becomes env->current_tb. Avoid
969 starting execution if there is a pending interrupt. */
970 if (unlikely (env->exit_request))
971 env->current_tb = NULL;
972
973 while (env->current_tb) {
974 tc_ptr = tb->tc_ptr;
975 /* execute the generated code */
976#if defined(__sparc__) && !defined(HOST_SOLARIS)
977#undef env
978 env = cpu_single_env;
979#define env cpu_single_env
980#endif
981 next_tb = tcg_qemu_tb_exec(tc_ptr);
982 env->current_tb = NULL;
983 if ((next_tb & 3) == 2) {
984 /* Instruction counter expired. */
985 int insns_left;
986 tb = (TranslationBlock *)(long)(next_tb & ~3);
987 /* Restore PC. */
988 cpu_pc_from_tb(env, tb);
989 insns_left = env->icount_decr.u32;
990 if (env->icount_extra && insns_left >= 0) {
991 /* Refill decrementer and continue execution. */
992 env->icount_extra += insns_left;
993 if (env->icount_extra > 0xffff) {
994 insns_left = 0xffff;
995 } else {
996 insns_left = env->icount_extra;
997 }
998 env->icount_extra -= insns_left;
999 env->icount_decr.u16.low = insns_left;
1000 } else {
1001 if (insns_left > 0) {
1002 /* Execute remaining instructions. */
1003 cpu_exec_nocache(insns_left, tb);
1004 }
1005 env->exception_index = EXCP_INTERRUPT;
1006 next_tb = 0;
1007 cpu_loop_exit();
1008 }
1009 }
1010 }
1011 /* reset soft MMU for next block (it can currently
1012 only be set by a memory fault) */
1013#if defined(CONFIG_KQEMU)
1014#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
1015 if (kqemu_is_ok(env) &&
1016 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
1017 cpu_loop_exit();
1018 }
1019#endif
1020 } /* for(;;) */
1021 } else {
1022 env_to_regs();
1023 }
1024 } /* for(;;) */
1025
1026
1027#if defined(TARGET_I386)
1028 /* restore flags in standard format */
1029 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
1030#elif defined(TARGET_ARM)
1031 /* XXX: Save/restore host fpu exception state?. */
1032#elif defined(TARGET_SPARC)
1033#elif defined(TARGET_PPC)
1034#elif defined(TARGET_M68K)
1035 cpu_m68k_flush_flags(env, env->cc_op);
1036 env->cc_op = CC_OP_FLAGS;
1037 env->sr = (env->sr & 0xffe0)
1038 | env->cc_dest | (env->cc_x << 4);
1039#elif defined(TARGET_MICROBLAZE)
1040#elif defined(TARGET_MIPS)
1041#elif defined(TARGET_SH4)
1042#elif defined(TARGET_ALPHA)
1043#elif defined(TARGET_CRIS)
1044 /* XXXXX */
1045#else
1046#error unsupported target CPU
1047#endif
1048
1049 /* restore global registers */
1050#include "hostregs_helper.h"
1051
1052 /* fail safe : never use cpu_single_env outside cpu_exec() */
1053 cpu_single_env = NULL;
1054 return ret;
1055}
1056
1057#endif /* !VBOX */
1058
1059/* must only be called from the generated code as an exception can be
1060 generated */
1061void tb_invalidate_page_range(target_ulong start, target_ulong end)
1062{
1063 /* XXX: cannot enable it yet because it yields to MMU exception
1064 where NIP != read address on PowerPC */
1065#if 0
1066 target_ulong phys_addr;
1067 phys_addr = get_phys_addr_code(env, start);
1068 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1069#endif
1070}
1071
1072#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1073
1074void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1075{
1076 CPUX86State *saved_env;
1077
1078 saved_env = env;
1079 env = s;
1080 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1081 selector &= 0xffff;
1082 cpu_x86_load_seg_cache(env, seg_reg, selector,
1083 (selector << 4), 0xffff, 0);
1084 } else {
1085 helper_load_seg(seg_reg, selector);
1086 }
1087 env = saved_env;
1088}
1089
1090void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
1091{
1092 CPUX86State *saved_env;
1093
1094 saved_env = env;
1095 env = s;
1096
1097 helper_fsave(ptr, data32);
1098
1099 env = saved_env;
1100}
1101
1102void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
1103{
1104 CPUX86State *saved_env;
1105
1106 saved_env = env;
1107 env = s;
1108
1109 helper_frstor(ptr, data32);
1110
1111 env = saved_env;
1112}
1113
1114#endif /* TARGET_I386 */
1115
1116#if !defined(CONFIG_SOFTMMU)
1117
1118#if defined(TARGET_I386)
1119
1120/* 'pc' is the host PC at which the exception was raised. 'address' is
1121 the effective address of the memory exception. 'is_write' is 1 if a
1122 write caused the exception and otherwise 0'. 'old_set' is the
1123 signal set which should be restored */
1124static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1125 int is_write, sigset_t *old_set,
1126 void *puc)
1127{
1128 TranslationBlock *tb;
1129 int ret;
1130
1131 if (cpu_single_env)
1132 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1133#if defined(DEBUG_SIGNAL)
1134 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1135 pc, address, is_write, *(unsigned long *)old_set);
1136#endif
1137 /* XXX: locking issue */
1138 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1139 return 1;
1140 }
1141
1142 /* see if it is an MMU fault */
1143 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1144 if (ret < 0)
1145 return 0; /* not an MMU fault */
1146 if (ret == 0)
1147 return 1; /* the MMU fault was handled without causing real CPU fault */
1148 /* now we have a real cpu fault */
1149 tb = tb_find_pc(pc);
1150 if (tb) {
1151 /* the PC is inside the translated code. It means that we have
1152 a virtual CPU fault */
1153 cpu_restore_state(tb, env, pc, puc);
1154 }
1155 if (ret == 1) {
1156#if 0
1157 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
1158 env->eip, env->cr[2], env->error_code);
1159#endif
1160 /* we restore the process signal mask as the sigreturn should
1161 do it (XXX: use sigsetjmp) */
1162 sigprocmask(SIG_SETMASK, old_set, NULL);
1163 raise_exception_err(env->exception_index, env->error_code);
1164 } else {
1165 /* activate soft MMU for this block */
1166 env->hflags |= HF_SOFTMMU_MASK;
1167 cpu_resume_from_signal(env, puc);
1168 }
1169 /* never comes here */
1170 return 1;
1171}
1172
1173#elif defined(TARGET_ARM)
1174static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1175 int is_write, sigset_t *old_set,
1176 void *puc)
1177{
1178 TranslationBlock *tb;
1179 int ret;
1180
1181 if (cpu_single_env)
1182 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1183#if defined(DEBUG_SIGNAL)
1184 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1185 pc, address, is_write, *(unsigned long *)old_set);
1186#endif
1187 /* XXX: locking issue */
1188 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1189 return 1;
1190 }
1191 /* see if it is an MMU fault */
1192 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1193 if (ret < 0)
1194 return 0; /* not an MMU fault */
1195 if (ret == 0)
1196 return 1; /* the MMU fault was handled without causing real CPU fault */
1197 /* now we have a real cpu fault */
1198 tb = tb_find_pc(pc);
1199 if (tb) {
1200 /* the PC is inside the translated code. It means that we have
1201 a virtual CPU fault */
1202 cpu_restore_state(tb, env, pc, puc);
1203 }
1204 /* we restore the process signal mask as the sigreturn should
1205 do it (XXX: use sigsetjmp) */
1206 sigprocmask(SIG_SETMASK, old_set, NULL);
1207 cpu_loop_exit();
1208 /* never comes here */
1209 return 1;
1210}
1211#elif defined(TARGET_SPARC)
1212static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1213 int is_write, sigset_t *old_set,
1214 void *puc)
1215{
1216 TranslationBlock *tb;
1217 int ret;
1218
1219 if (cpu_single_env)
1220 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1221#if defined(DEBUG_SIGNAL)
1222 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1223 pc, address, is_write, *(unsigned long *)old_set);
1224#endif
1225 /* XXX: locking issue */
1226 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1227 return 1;
1228 }
1229 /* see if it is an MMU fault */
1230 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1231 if (ret < 0)
1232 return 0; /* not an MMU fault */
1233 if (ret == 0)
1234 return 1; /* the MMU fault was handled without causing real CPU fault */
1235 /* now we have a real cpu fault */
1236 tb = tb_find_pc(pc);
1237 if (tb) {
1238 /* the PC is inside the translated code. It means that we have
1239 a virtual CPU fault */
1240 cpu_restore_state(tb, env, pc, puc);
1241 }
1242 /* we restore the process signal mask as the sigreturn should
1243 do it (XXX: use sigsetjmp) */
1244 sigprocmask(SIG_SETMASK, old_set, NULL);
1245 cpu_loop_exit();
1246 /* never comes here */
1247 return 1;
1248}
1249#elif defined (TARGET_PPC)
1250static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1251 int is_write, sigset_t *old_set,
1252 void *puc)
1253{
1254 TranslationBlock *tb;
1255 int ret;
1256
1257 if (cpu_single_env)
1258 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1259#if defined(DEBUG_SIGNAL)
1260 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1261 pc, address, is_write, *(unsigned long *)old_set);
1262#endif
1263 /* XXX: locking issue */
1264 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1265 return 1;
1266 }
1267
1268 /* see if it is an MMU fault */
1269 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1270 if (ret < 0)
1271 return 0; /* not an MMU fault */
1272 if (ret == 0)
1273 return 1; /* the MMU fault was handled without causing real CPU fault */
1274
1275 /* now we have a real cpu fault */
1276 tb = tb_find_pc(pc);
1277 if (tb) {
1278 /* the PC is inside the translated code. It means that we have
1279 a virtual CPU fault */
1280 cpu_restore_state(tb, env, pc, puc);
1281 }
1282 if (ret == 1) {
1283#if 0
1284 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1285 env->nip, env->error_code, tb);
1286#endif
1287 /* we restore the process signal mask as the sigreturn should
1288 do it (XXX: use sigsetjmp) */
1289 sigprocmask(SIG_SETMASK, old_set, NULL);
1290 cpu_loop_exit();
1291 } else {
1292 /* activate soft MMU for this block */
1293 cpu_resume_from_signal(env, puc);
1294 }
1295 /* never comes here */
1296 return 1;
1297}
1298
1299#elif defined(TARGET_M68K)
1300static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1301 int is_write, sigset_t *old_set,
1302 void *puc)
1303{
1304 TranslationBlock *tb;
1305 int ret;
1306
1307 if (cpu_single_env)
1308 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1309#if defined(DEBUG_SIGNAL)
1310 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1311 pc, address, is_write, *(unsigned long *)old_set);
1312#endif
1313 /* XXX: locking issue */
1314 if (is_write && page_unprotect(address, pc, puc)) {
1315 return 1;
1316 }
1317 /* see if it is an MMU fault */
1318 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1319 if (ret < 0)
1320 return 0; /* not an MMU fault */
1321 if (ret == 0)
1322 return 1; /* the MMU fault was handled without causing real CPU fault */
1323 /* now we have a real cpu fault */
1324 tb = tb_find_pc(pc);
1325 if (tb) {
1326 /* the PC is inside the translated code. It means that we have
1327 a virtual CPU fault */
1328 cpu_restore_state(tb, env, pc, puc);
1329 }
1330 /* we restore the process signal mask as the sigreturn should
1331 do it (XXX: use sigsetjmp) */
1332 sigprocmask(SIG_SETMASK, old_set, NULL);
1333 cpu_loop_exit();
1334 /* never comes here */
1335 return 1;
1336}
1337
1338#elif defined (TARGET_MIPS)
1339static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1340 int is_write, sigset_t *old_set,
1341 void *puc)
1342{
1343 TranslationBlock *tb;
1344 int ret;
1345
1346 if (cpu_single_env)
1347 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1348#if defined(DEBUG_SIGNAL)
1349 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1350 pc, address, is_write, *(unsigned long *)old_set);
1351#endif
1352 /* XXX: locking issue */
1353 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1354 return 1;
1355 }
1356
1357 /* see if it is an MMU fault */
1358 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1359 if (ret < 0)
1360 return 0; /* not an MMU fault */
1361 if (ret == 0)
1362 return 1; /* the MMU fault was handled without causing real CPU fault */
1363
1364 /* now we have a real cpu fault */
1365 tb = tb_find_pc(pc);
1366 if (tb) {
1367 /* the PC is inside the translated code. It means that we have
1368 a virtual CPU fault */
1369 cpu_restore_state(tb, env, pc, puc);
1370 }
1371 if (ret == 1) {
1372#if 0
1373 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1374 env->PC, env->error_code, tb);
1375#endif
1376 /* we restore the process signal mask as the sigreturn should
1377 do it (XXX: use sigsetjmp) */
1378 sigprocmask(SIG_SETMASK, old_set, NULL);
1379 cpu_loop_exit();
1380 } else {
1381 /* activate soft MMU for this block */
1382 cpu_resume_from_signal(env, puc);
1383 }
1384 /* never comes here */
1385 return 1;
1386}
1387
1388#elif defined (TARGET_MICROBLAZE)
1389static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1390 int is_write, sigset_t *old_set,
1391 void *puc)
1392{
1393 TranslationBlock *tb;
1394 int ret;
1395
1396 if (cpu_single_env)
1397 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1398#if defined(DEBUG_SIGNAL)
1399 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1400 pc, address, is_write, *(unsigned long *)old_set);
1401#endif
1402 /* XXX: locking issue */
1403 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1404 return 1;
1405 }
1406
1407 /* see if it is an MMU fault */
1408 ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1409 if (ret < 0)
1410 return 0; /* not an MMU fault */
1411 if (ret == 0)
1412 return 1; /* the MMU fault was handled without causing real CPU fault */
1413
1414 /* now we have a real cpu fault */
1415 tb = tb_find_pc(pc);
1416 if (tb) {
1417 /* the PC is inside the translated code. It means that we have
1418 a virtual CPU fault */
1419 cpu_restore_state(tb, env, pc, puc);
1420 }
1421 if (ret == 1) {
1422#if 0
1423 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1424 env->PC, env->error_code, tb);
1425#endif
1426 /* we restore the process signal mask as the sigreturn should
1427 do it (XXX: use sigsetjmp) */
1428 sigprocmask(SIG_SETMASK, old_set, NULL);
1429 cpu_loop_exit();
1430 } else {
1431 /* activate soft MMU for this block */
1432 cpu_resume_from_signal(env, puc);
1433 }
1434 /* never comes here */
1435 return 1;
1436}
1437
1438#elif defined (TARGET_SH4)
1439static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1440 int is_write, sigset_t *old_set,
1441 void *puc)
1442{
1443 TranslationBlock *tb;
1444 int ret;
1445
1446 if (cpu_single_env)
1447 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1448#if defined(DEBUG_SIGNAL)
1449 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1450 pc, address, is_write, *(unsigned long *)old_set);
1451#endif
1452 /* XXX: locking issue */
1453 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1454 return 1;
1455 }
1456
1457 /* see if it is an MMU fault */
1458 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1459 if (ret < 0)
1460 return 0; /* not an MMU fault */
1461 if (ret == 0)
1462 return 1; /* the MMU fault was handled without causing real CPU fault */
1463
1464 /* now we have a real cpu fault */
1465 tb = tb_find_pc(pc);
1466 if (tb) {
1467 /* the PC is inside the translated code. It means that we have
1468 a virtual CPU fault */
1469 cpu_restore_state(tb, env, pc, puc);
1470 }
1471#if 0
1472 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1473 env->nip, env->error_code, tb);
1474#endif
1475 /* we restore the process signal mask as the sigreturn should
1476 do it (XXX: use sigsetjmp) */
1477 sigprocmask(SIG_SETMASK, old_set, NULL);
1478 cpu_loop_exit();
1479 /* never comes here */
1480 return 1;
1481}
1482
1483#elif defined (TARGET_ALPHA)
1484static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1485 int is_write, sigset_t *old_set,
1486 void *puc)
1487{
1488 TranslationBlock *tb;
1489 int ret;
1490
1491 if (cpu_single_env)
1492 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1493#if defined(DEBUG_SIGNAL)
1494 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1495 pc, address, is_write, *(unsigned long *)old_set);
1496#endif
1497 /* XXX: locking issue */
1498 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1499 return 1;
1500 }
1501
1502 /* see if it is an MMU fault */
1503 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1504 if (ret < 0)
1505 return 0; /* not an MMU fault */
1506 if (ret == 0)
1507 return 1; /* the MMU fault was handled without causing real CPU fault */
1508
1509 /* now we have a real cpu fault */
1510 tb = tb_find_pc(pc);
1511 if (tb) {
1512 /* the PC is inside the translated code. It means that we have
1513 a virtual CPU fault */
1514 cpu_restore_state(tb, env, pc, puc);
1515 }
1516#if 0
1517 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1518 env->nip, env->error_code, tb);
1519#endif
1520 /* we restore the process signal mask as the sigreturn should
1521 do it (XXX: use sigsetjmp) */
1522 sigprocmask(SIG_SETMASK, old_set, NULL);
1523 cpu_loop_exit();
1524 /* never comes here */
1525 return 1;
1526}
1527#elif defined (TARGET_CRIS)
1528static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1529 int is_write, sigset_t *old_set,
1530 void *puc)
1531{
1532 TranslationBlock *tb;
1533 int ret;
1534
1535 if (cpu_single_env)
1536 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1537#if defined(DEBUG_SIGNAL)
1538 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1539 pc, address, is_write, *(unsigned long *)old_set);
1540#endif
1541 /* XXX: locking issue */
1542 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1543 return 1;
1544 }
1545
1546 /* see if it is an MMU fault */
1547 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1548 if (ret < 0)
1549 return 0; /* not an MMU fault */
1550 if (ret == 0)
1551 return 1; /* the MMU fault was handled without causing real CPU fault */
1552
1553 /* now we have a real cpu fault */
1554 tb = tb_find_pc(pc);
1555 if (tb) {
1556 /* the PC is inside the translated code. It means that we have
1557 a virtual CPU fault */
1558 cpu_restore_state(tb, env, pc, puc);
1559 }
1560 /* we restore the process signal mask as the sigreturn should
1561 do it (XXX: use sigsetjmp) */
1562 sigprocmask(SIG_SETMASK, old_set, NULL);
1563 cpu_loop_exit();
1564 /* never comes here */
1565 return 1;
1566}
1567
1568#else
1569#error unsupported target CPU
1570#endif
1571
1572#if defined(__i386__)
1573
1574#if defined(__APPLE__)
1575# include <sys/ucontext.h>
1576
1577# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1578# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1579# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1580# define MASK_sig(context) ((context)->uc_sigmask)
1581#elif defined(__OpenBSD__)
1582# define EIP_sig(context) ((context)->sc_eip)
1583# define TRAP_sig(context) ((context)->sc_trapno)
1584# define ERROR_sig(context) ((context)->sc_err)
1585# define MASK_sig(context) ((context)->sc_mask)
1586#else
1587# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1588# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1589# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1590# define MASK_sig(context) ((context)->uc_sigmask)
1591#endif
1592
1593int cpu_signal_handler(int host_signum, void *pinfo,
1594 void *puc)
1595{
1596 siginfo_t *info = pinfo;
1597#if defined(__OpenBSD__)
1598 struct sigcontext *uc = puc;
1599#else
1600 struct ucontext *uc = puc;
1601#endif
1602 unsigned long pc;
1603 int trapno;
1604
1605#ifndef REG_EIP
1606/* for glibc 2.1 */
1607#define REG_EIP EIP
1608#define REG_ERR ERR
1609#define REG_TRAPNO TRAPNO
1610#endif
1611 pc = EIP_sig(uc);
1612 trapno = TRAP_sig(uc);
1613 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1614 trapno == 0xe ?
1615 (ERROR_sig(uc) >> 1) & 1 : 0,
1616 &MASK_sig(uc), puc);
1617}
1618
1619#elif defined(__x86_64__)
1620
1621#ifdef __NetBSD__
1622#define PC_sig(context) _UC_MACHINE_PC(context)
1623#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1624#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1625#define MASK_sig(context) ((context)->uc_sigmask)
1626#elif defined(__OpenBSD__)
1627#define PC_sig(context) ((context)->sc_rip)
1628#define TRAP_sig(context) ((context)->sc_trapno)
1629#define ERROR_sig(context) ((context)->sc_err)
1630#define MASK_sig(context) ((context)->sc_mask)
1631#else
1632#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1633#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1634#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1635#define MASK_sig(context) ((context)->uc_sigmask)
1636#endif
1637
1638int cpu_signal_handler(int host_signum, void *pinfo,
1639 void *puc)
1640{
1641 siginfo_t *info = pinfo;
1642 unsigned long pc;
1643#ifdef __NetBSD__
1644 ucontext_t *uc = puc;
1645#elif defined(__OpenBSD__)
1646 struct sigcontext *uc = puc;
1647#else
1648 struct ucontext *uc = puc;
1649#endif
1650
1651 pc = PC_sig(uc);
1652 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1653 TRAP_sig(uc) == 0xe ?
1654 (ERROR_sig(uc) >> 1) & 1 : 0,
1655 &MASK_sig(uc), puc);
1656}
1657
1658#elif defined(_ARCH_PPC)
1659
1660/***********************************************************************
1661 * signal context platform-specific definitions
1662 * From Wine
1663 */
1664#ifdef linux
1665/* All Registers access - only for local access */
1666# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1667/* Gpr Registers access */
1668# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1669# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1670# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1671# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1672# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1673# define LR_sig(context) REG_sig(link, context) /* Link register */
1674# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1675/* Float Registers access */
1676# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1677# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1678/* Exception Registers access */
1679# define DAR_sig(context) REG_sig(dar, context)
1680# define DSISR_sig(context) REG_sig(dsisr, context)
1681# define TRAP_sig(context) REG_sig(trap, context)
1682#endif /* linux */
1683
1684#ifdef __APPLE__
1685# include <sys/ucontext.h>
1686typedef struct ucontext SIGCONTEXT;
1687/* All Registers access - only for local access */
1688# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1689# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1690# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1691# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1692/* Gpr Registers access */
1693# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1694# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1695# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1696# define CTR_sig(context) REG_sig(ctr, context)
1697# define XER_sig(context) REG_sig(xer, context) /* Link register */
1698# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1699# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1700/* Float Registers access */
1701# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1702# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1703/* Exception Registers access */
1704# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1705# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1706# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1707#endif /* __APPLE__ */
1708
1709int cpu_signal_handler(int host_signum, void *pinfo,
1710 void *puc)
1711{
1712 siginfo_t *info = pinfo;
1713 struct ucontext *uc = puc;
1714 unsigned long pc;
1715 int is_write;
1716
1717 pc = IAR_sig(uc);
1718 is_write = 0;
1719#if 0
1720 /* ppc 4xx case */
1721 if (DSISR_sig(uc) & 0x00800000)
1722 is_write = 1;
1723#else
1724 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1725 is_write = 1;
1726#endif
1727 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1728 is_write, &uc->uc_sigmask, puc);
1729}
1730
1731#elif defined(__alpha__)
1732
1733int cpu_signal_handler(int host_signum, void *pinfo,
1734 void *puc)
1735{
1736 siginfo_t *info = pinfo;
1737 struct ucontext *uc = puc;
1738 uint32_t *pc = uc->uc_mcontext.sc_pc;
1739 uint32_t insn = *pc;
1740 int is_write = 0;
1741
1742 /* XXX: need kernel patch to get write flag faster */
1743 switch (insn >> 26) {
1744 case 0x0d: // stw
1745 case 0x0e: // stb
1746 case 0x0f: // stq_u
1747 case 0x24: // stf
1748 case 0x25: // stg
1749 case 0x26: // sts
1750 case 0x27: // stt
1751 case 0x2c: // stl
1752 case 0x2d: // stq
1753 case 0x2e: // stl_c
1754 case 0x2f: // stq_c
1755 is_write = 1;
1756 }
1757
1758 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1759 is_write, &uc->uc_sigmask, puc);
1760}
1761#elif defined(__sparc__)
1762
1763int cpu_signal_handler(int host_signum, void *pinfo,
1764 void *puc)
1765{
1766 siginfo_t *info = pinfo;
1767 int is_write;
1768 uint32_t insn;
1769#if !defined(__arch64__) || defined(HOST_SOLARIS)
1770 uint32_t *regs = (uint32_t *)(info + 1);
1771 void *sigmask = (regs + 20);
1772 /* XXX: is there a standard glibc define ? */
1773 unsigned long pc = regs[1];
1774#else
1775#ifdef __linux__
1776 struct sigcontext *sc = puc;
1777 unsigned long pc = sc->sigc_regs.tpc;
1778 void *sigmask = (void *)sc->sigc_mask;
1779#elif defined(__OpenBSD__)
1780 struct sigcontext *uc = puc;
1781 unsigned long pc = uc->sc_pc;
1782 void *sigmask = (void *)(long)uc->sc_mask;
1783#endif
1784#endif
1785
1786 /* XXX: need kernel patch to get write flag faster */
1787 is_write = 0;
1788 insn = *(uint32_t *)pc;
1789 if ((insn >> 30) == 3) {
1790 switch((insn >> 19) & 0x3f) {
1791 case 0x05: // stb
1792 case 0x15: // stba
1793 case 0x06: // sth
1794 case 0x16: // stha
1795 case 0x04: // st
1796 case 0x14: // sta
1797 case 0x07: // std
1798 case 0x17: // stda
1799 case 0x0e: // stx
1800 case 0x1e: // stxa
1801 case 0x24: // stf
1802 case 0x34: // stfa
1803 case 0x27: // stdf
1804 case 0x37: // stdfa
1805 case 0x26: // stqf
1806 case 0x36: // stqfa
1807 case 0x25: // stfsr
1808 case 0x3c: // casa
1809 case 0x3e: // casxa
1810 is_write = 1;
1811 break;
1812 }
1813 }
1814 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1815 is_write, sigmask, NULL);
1816}
1817
1818#elif defined(__arm__)
1819
1820int cpu_signal_handler(int host_signum, void *pinfo,
1821 void *puc)
1822{
1823 siginfo_t *info = pinfo;
1824 struct ucontext *uc = puc;
1825 unsigned long pc;
1826 int is_write;
1827
1828#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1829 pc = uc->uc_mcontext.gregs[R15];
1830#else
1831 pc = uc->uc_mcontext.arm_pc;
1832#endif
1833 /* XXX: compute is_write */
1834 is_write = 0;
1835 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1836 is_write,
1837 &uc->uc_sigmask, puc);
1838}
1839
1840#elif defined(__mc68000)
1841
1842int cpu_signal_handler(int host_signum, void *pinfo,
1843 void *puc)
1844{
1845 siginfo_t *info = pinfo;
1846 struct ucontext *uc = puc;
1847 unsigned long pc;
1848 int is_write;
1849
1850 pc = uc->uc_mcontext.gregs[16];
1851 /* XXX: compute is_write */
1852 is_write = 0;
1853 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1854 is_write,
1855 &uc->uc_sigmask, puc);
1856}
1857
1858#elif defined(__ia64)
1859
1860#ifndef __ISR_VALID
1861 /* This ought to be in <bits/siginfo.h>... */
1862# define __ISR_VALID 1
1863#endif
1864
1865int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1866{
1867 siginfo_t *info = pinfo;
1868 struct ucontext *uc = puc;
1869 unsigned long ip;
1870 int is_write = 0;
1871
1872 ip = uc->uc_mcontext.sc_ip;
1873 switch (host_signum) {
1874 case SIGILL:
1875 case SIGFPE:
1876 case SIGSEGV:
1877 case SIGBUS:
1878 case SIGTRAP:
1879 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1880 /* ISR.W (write-access) is bit 33: */
1881 is_write = (info->si_isr >> 33) & 1;
1882 break;
1883
1884 default:
1885 break;
1886 }
1887 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1888 is_write,
1889 &uc->uc_sigmask, puc);
1890}
1891
1892#elif defined(__s390__)
1893
1894int cpu_signal_handler(int host_signum, void *pinfo,
1895 void *puc)
1896{
1897 siginfo_t *info = pinfo;
1898 struct ucontext *uc = puc;
1899 unsigned long pc;
1900 int is_write;
1901
1902 pc = uc->uc_mcontext.psw.addr;
1903 /* XXX: compute is_write */
1904 is_write = 0;
1905 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1906 is_write, &uc->uc_sigmask, puc);
1907}
1908
1909#elif defined(__mips__)
1910
1911int cpu_signal_handler(int host_signum, void *pinfo,
1912 void *puc)
1913{
1914 siginfo_t *info = pinfo;
1915 struct ucontext *uc = puc;
1916 greg_t pc = uc->uc_mcontext.pc;
1917 int is_write;
1918
1919 /* XXX: compute is_write */
1920 is_write = 0;
1921 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1922 is_write, &uc->uc_sigmask, puc);
1923}
1924
1925#elif defined(__hppa__)
1926
1927int cpu_signal_handler(int host_signum, void *pinfo,
1928 void *puc)
1929{
1930 struct siginfo *info = pinfo;
1931 struct ucontext *uc = puc;
1932 unsigned long pc;
1933 int is_write;
1934
1935 pc = uc->uc_mcontext.sc_iaoq[0];
1936 /* FIXME: compute is_write */
1937 is_write = 0;
1938 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1939 is_write,
1940 &uc->uc_sigmask, puc);
1941}
1942
1943#else
1944
1945#error host CPU specific signal handler needed
1946
1947#endif
1948
1949#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette