VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 36171

Last change on this file since 36171 was 36171, checked in by vboxsync, 14 years ago

rem: Merged in changes from the branches/stable_0_10 (r7249).

  • Property svn:eol-style set to native
File size: 65.6 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include "config.h"
31#define CPU_NO_GLOBAL_REGS
32#include "exec.h"
33#include "disas.h"
34#include "tcg.h"
35#include "kvm.h"
36
37#if !defined(CONFIG_SOFTMMU)
38#undef EAX
39#undef ECX
40#undef EDX
41#undef EBX
42#undef ESP
43#undef EBP
44#undef ESI
45#undef EDI
46#undef EIP
47#include <signal.h>
48#ifdef __linux__
49#include <sys/ucontext.h>
50#endif
51#endif
52
53#if defined(__sparc__) && !defined(HOST_SOLARIS)
54// Work around ugly bugs in glibc that mangle global register contents
55#undef env
56#define env cpu_single_env
57#endif
58
59int tb_invalidated_flag;
60
61//#define DEBUG_EXEC
62//#define DEBUG_SIGNAL
63
64void cpu_loop_exit(void)
65{
66 /* NOTE: the register at this point must be saved by hand because
67 longjmp restore them */
68 regs_to_env();
69 longjmp(env->jmp_env, 1);
70}
71
72/* exit the current TB from a signal handler. The host registers are
73 restored in a state compatible with the CPU emulator
74 */
75void cpu_resume_from_signal(CPUState *env1, void *puc)
76{
77#if !defined(CONFIG_SOFTMMU)
78#ifdef __linux__
79 struct ucontext *uc = puc;
80#elif defined(__OpenBSD__)
81 struct sigcontext *uc = puc;
82#endif
83#endif
84
85 env = env1;
86
87 /* XXX: restore cpu registers saved in host registers */
88
89#if !defined(CONFIG_SOFTMMU)
90 if (puc) {
91 /* XXX: use siglongjmp ? */
92#ifdef __linux__
93 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
94#elif defined(__OpenBSD__)
95 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
96#endif
97 }
98#endif
99 env->exception_index = -1;
100 longjmp(env->jmp_env, 1);
101}
102
103/* Execute the code without caching the generated code. An interpreter
104 could be used if available. */
105static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
106{
107 unsigned long next_tb;
108 TranslationBlock *tb;
109
110 /* Should never happen.
111 We only end up here when an existing TB is too long. */
112 if (max_cycles > CF_COUNT_MASK)
113 max_cycles = CF_COUNT_MASK;
114
115 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
116 max_cycles);
117 env->current_tb = tb;
118 /* execute the generated code */
119#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
120 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
121#else
122 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
123#endif
124
125 if ((next_tb & 3) == 2) {
126 /* Restore PC. This may happen if async event occurs before
127 the TB starts executing. */
128 cpu_pc_from_tb(env, tb);
129 }
130 tb_phys_invalidate(tb, -1);
131 tb_free(tb);
132}
133
134static TranslationBlock *tb_find_slow(target_ulong pc,
135 target_ulong cs_base,
136 uint64_t flags)
137{
138 TranslationBlock *tb, **ptb1;
139 unsigned int h;
140 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
141
142 tb_invalidated_flag = 0;
143
144 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
145
146 /* find translated block using physical mappings */
147 phys_pc = get_phys_addr_code(env, pc);
148 phys_page1 = phys_pc & TARGET_PAGE_MASK;
149 phys_page2 = -1;
150 h = tb_phys_hash_func(phys_pc);
151 ptb1 = &tb_phys_hash[h];
152 for(;;) {
153 tb = *ptb1;
154 if (!tb)
155 goto not_found;
156 if (tb->pc == pc &&
157 tb->page_addr[0] == phys_page1 &&
158 tb->cs_base == cs_base &&
159 tb->flags == flags) {
160 /* check next page if needed */
161 if (tb->page_addr[1] != -1) {
162 virt_page2 = (pc & TARGET_PAGE_MASK) +
163 TARGET_PAGE_SIZE;
164 phys_page2 = get_phys_addr_code(env, virt_page2);
165 if (tb->page_addr[1] == phys_page2)
166 goto found;
167 } else {
168 goto found;
169 }
170 }
171 ptb1 = &tb->phys_hash_next;
172 }
173 not_found:
174 /* if no translated code available, then translate it now */
175 tb = tb_gen_code(env, pc, cs_base, flags, 0);
176
177 found:
178 /* we add the TB in the virtual pc hash table */
179 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
180 return tb;
181}
182
183static inline TranslationBlock *tb_find_fast(void)
184{
185 TranslationBlock *tb;
186 target_ulong cs_base, pc;
187 int flags;
188
189 /* we record a subset of the CPU state. It will
190 always be the same before a given translated block
191 is executed. */
192 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
193 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
194 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
195 tb->flags != flags)) {
196 tb = tb_find_slow(pc, cs_base, flags);
197 }
198 return tb;
199}
200
201static CPUDebugExcpHandler *debug_excp_handler;
202
203CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
204{
205 CPUDebugExcpHandler *old_handler = debug_excp_handler;
206
207 debug_excp_handler = handler;
208 return old_handler;
209}
210
211static void cpu_handle_debug_exception(CPUState *env)
212{
213 CPUWatchpoint *wp;
214
215 if (!env->watchpoint_hit)
216 TAILQ_FOREACH(wp, &env->watchpoints, entry)
217 wp->flags &= ~BP_WATCHPOINT_HIT;
218
219 if (debug_excp_handler)
220 debug_excp_handler(env);
221}
222
223/* main execution loop */
224
225#ifdef VBOX
226
227int cpu_exec(CPUState *env1)
228{
229#define DECLARE_HOST_REGS 1
230#include "hostregs_helper.h"
231 int ret = 0, interrupt_request;
232 TranslationBlock *tb;
233 uint8_t *tc_ptr;
234 unsigned long next_tb;
235
236 cpu_single_env = env1;
237
238 /* first we save global registers */
239#define SAVE_HOST_REGS 1
240#include "hostregs_helper.h"
241 env = env1;
242
243 env_to_regs();
244#if defined(TARGET_I386)
245 /* put eflags in CPU temporary format */
246 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
247 DF = 1 - (2 * ((env->eflags >> 10) & 1));
248 CC_OP = CC_OP_EFLAGS;
249 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
250#elif defined(TARGET_SPARC)
251#elif defined(TARGET_M68K)
252 env->cc_op = CC_OP_FLAGS;
253 env->cc_dest = env->sr & 0xf;
254 env->cc_x = (env->sr >> 4) & 1;
255#elif defined(TARGET_ALPHA)
256#elif defined(TARGET_ARM)
257#elif defined(TARGET_PPC)
258#elif defined(TARGET_MIPS)
259#elif defined(TARGET_SH4)
260#elif defined(TARGET_CRIS)
261 /* XXXXX */
262#else
263#error unsupported target CPU
264#endif
265#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
266 env->exception_index = -1;
267#endif
268
269 /* prepare setjmp context for exception handling */
270 for(;;) {
271 if (setjmp(env->jmp_env) == 0)
272 {
273 env->current_tb = NULL;
274
275 /*
276 * Check for fatal errors first
277 */
278 if (env->interrupt_request & CPU_INTERRUPT_RC) {
279 env->exception_index = EXCP_RC;
280 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
281 ret = env->exception_index;
282 cpu_loop_exit();
283 }
284
285 /* if an exception is pending, we execute it here */
286 if (env->exception_index >= 0) {
287 if (env->exception_index >= EXCP_INTERRUPT) {
288 /* exit request from the cpu execution loop */
289 ret = env->exception_index;
290 if (ret == EXCP_DEBUG)
291 cpu_handle_debug_exception(env);
292 break;
293 } else {
294 /* simulate a real cpu exception. On i386, it can
295 trigger new exceptions, but we do not handle
296 double or triple faults yet. */
297 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
298 Log(("do_interrupt %d %d %RGv\n", env->exception_index, env->exception_is_int, (RTGCPTR)env->exception_next_eip));
299 do_interrupt(env->exception_index,
300 env->exception_is_int,
301 env->error_code,
302 env->exception_next_eip, 0);
303 /* successfully delivered */
304 env->old_exception = -1;
305 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
306 }
307 env->exception_index = -1;
308 }
309
310 next_tb = 0; /* force lookup of first TB */
311 for(;;)
312 {
313 interrupt_request = env->interrupt_request;
314 if (unlikely(interrupt_request)) {
315 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
316 /* Mask out external interrupts for this step. */
317 interrupt_request &= ~(CPU_INTERRUPT_HARD |
318 CPU_INTERRUPT_FIQ |
319 CPU_INTERRUPT_SMI |
320 CPU_INTERRUPT_NMI);
321 }
322 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
323 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
324 env->exception_index = EXCP_DEBUG;
325 cpu_loop_exit();
326 }
327 /** @todo: reconcile with what QEMU really does */
328
329 /* Single instruction exec request, we execute it and return (one way or the other).
330 The caller will always reschedule after doing this operation! */
331 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
332 {
333 /* not in flight are we? (if we are, we trapped) */
334 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
335 {
336 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
337 env->exception_index = EXCP_SINGLE_INSTR;
338 if (emulate_single_instr(env) == -1)
339 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip));
340
341 /* When we receive an external interrupt during execution of this single
342 instruction, then we should stay here. We will leave when we're ready
343 for raw-mode or when interrupted by pending EMT requests. */
344 interrupt_request = env->interrupt_request; /* reload this! */
345 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
346 || !(env->eflags & IF_MASK)
347 || (env->hflags & HF_INHIBIT_IRQ_MASK)
348 || (env->state & CPU_RAW_HWACC)
349 )
350 {
351 env->exception_index = ret = EXCP_SINGLE_INSTR;
352 cpu_loop_exit();
353 }
354 }
355 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
356 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
357 }
358
359 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
360 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
361 !(env->hflags & HF_SMM_MASK)) {
362 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
363 do_smm_enter();
364 next_tb = 0;
365 }
366 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
367 (env->eflags & IF_MASK) &&
368 !(env->hflags & HF_INHIBIT_IRQ_MASK))
369 {
370 /* if hardware interrupt pending, we execute it */
371 int intno;
372 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
373 intno = cpu_get_pic_interrupt(env);
374 if (intno >= 0)
375 {
376 Log(("do_interrupt %d\n", intno));
377 do_interrupt(intno, 0, 0, 0, 1);
378 }
379 /* ensure that no TB jump will be modified as
380 the program flow was changed */
381 next_tb = 0;
382 }
383 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
384 {
385 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
386 /* ensure that no TB jump will be modified as
387 the program flow was changed */
388 next_tb = 0;
389 }
390 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
391 if (interrupt_request & CPU_INTERRUPT_EXIT)
392 {
393 env->exception_index = EXCP_INTERRUPT;
394 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
395 ret = env->exception_index;
396 cpu_loop_exit();
397 }
398 if (interrupt_request & CPU_INTERRUPT_RC)
399 {
400 env->exception_index = EXCP_RC;
401 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
402 ret = env->exception_index;
403 cpu_loop_exit();
404 }
405 }
406 if (unlikely(env->exit_request)) {
407 env->exit_request = 0;
408 env->exception_index = EXCP_INTERRUPT;
409 cpu_loop_exit();
410 }
411
412 /*
413 * Check if we the CPU state allows us to execute the code in raw-mode.
414 */
415 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
416 if (remR3CanExecuteRaw(env,
417 env->eip + env->segs[R_CS].base,
418 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
419 &env->exception_index))
420 {
421 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
422 ret = env->exception_index;
423 cpu_loop_exit();
424 }
425 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
426
427 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
428 spin_lock(&tb_lock);
429 tb = tb_find_fast();
430 /* Note: we do it here to avoid a gcc bug on Mac OS X when
431 doing it in tb_find_slow */
432 if (tb_invalidated_flag) {
433 /* as some TB could have been invalidated because
434 of memory exceptions while generating the code, we
435 must recompute the hash index here */
436 next_tb = 0;
437 tb_invalidated_flag = 0;
438 }
439
440 /* see if we can patch the calling TB. When the TB
441 spans two pages, we cannot safely do a direct
442 jump. */
443 if (next_tb != 0
444 && !(tb->cflags & CF_RAW_MODE)
445 && tb->page_addr[1] == -1)
446 {
447 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
448 }
449 spin_unlock(&tb_lock);
450 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
451
452 env->current_tb = tb;
453
454 /* cpu_interrupt might be called while translating the
455 TB, but before it is linked into a potentially
456 infinite loop and becomes env->current_tb. Avoid
457 starting execution if there is a pending interrupt. */
458 if (unlikely (env->exit_request))
459 env->current_tb = NULL;
460
461 while (env->current_tb) {
462 tc_ptr = tb->tc_ptr;
463 /* execute the generated code */
464 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
465#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
466 tcg_qemu_tb_exec(tc_ptr, next_tb);
467#else
468 next_tb = tcg_qemu_tb_exec(tc_ptr);
469#endif
470 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
471 env->current_tb = NULL;
472 if ((next_tb & 3) == 2) {
473 /* Instruction counter expired. */
474 int insns_left;
475 tb = (TranslationBlock *)(long)(next_tb & ~3);
476 /* Restore PC. */
477 cpu_pc_from_tb(env, tb);
478 insns_left = env->icount_decr.u32;
479 if (env->icount_extra && insns_left >= 0) {
480 /* Refill decrementer and continue execution. */
481 env->icount_extra += insns_left;
482 if (env->icount_extra > 0xffff) {
483 insns_left = 0xffff;
484 } else {
485 insns_left = env->icount_extra;
486 }
487 env->icount_extra -= insns_left;
488 env->icount_decr.u16.low = insns_left;
489 } else {
490 if (insns_left > 0) {
491 /* Execute remaining instructions. */
492 cpu_exec_nocache(insns_left, tb);
493 }
494 env->exception_index = EXCP_INTERRUPT;
495 next_tb = 0;
496 cpu_loop_exit();
497 }
498 }
499 }
500
501 /* reset soft MMU for next block (it can currently
502 only be set by a memory fault) */
503#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
504 if (env->hflags & HF_SOFTMMU_MASK) {
505 env->hflags &= ~HF_SOFTMMU_MASK;
506 /* do not allow linking to another block */
507 next_tb = 0;
508 }
509#endif
510 } /* for(;;) */
511 } else {
512 env_to_regs();
513 }
514#ifdef VBOX_HIGH_RES_TIMERS_HACK
515 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
516 unnecessary (like crashing during emulate single instruction).
517 Note! Don't use env1->pVM here, the code wouldn't run with
518 gcc-4.4/amd64 anymore, see #3883. */
519 env->current_tb = NULL;
520 if ( !(env->interrupt_request & ( CPU_INTERRUPT_EXIT | CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
521 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
522 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
523 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
524 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
525 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
526 TMR3TimerQueuesDo(env->pVM);
527 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
528 }
529#endif
530 } /* for(;;) */
531
532#if defined(TARGET_I386)
533 /* restore flags in standard format */
534 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
535#else
536#error unsupported target CPU
537#endif
538#include "hostregs_helper.h"
539 return ret;
540}
541
542#else /* !VBOX */
543int cpu_exec(CPUState *env1)
544{
545#define DECLARE_HOST_REGS 1
546#include "hostregs_helper.h"
547 int ret, interrupt_request;
548 TranslationBlock *tb;
549 uint8_t *tc_ptr;
550 unsigned long next_tb;
551
552 if (cpu_halted(env1) == EXCP_HALTED)
553 return EXCP_HALTED;
554
555 cpu_single_env = env1;
556
557 /* first we save global registers */
558#define SAVE_HOST_REGS 1
559#include "hostregs_helper.h"
560 env = env1;
561
562 env_to_regs();
563#if defined(TARGET_I386)
564 /* put eflags in CPU temporary format */
565 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
566 DF = 1 - (2 * ((env->eflags >> 10) & 1));
567 CC_OP = CC_OP_EFLAGS;
568 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
569#elif defined(TARGET_SPARC)
570#elif defined(TARGET_M68K)
571 env->cc_op = CC_OP_FLAGS;
572 env->cc_dest = env->sr & 0xf;
573 env->cc_x = (env->sr >> 4) & 1;
574#elif defined(TARGET_ALPHA)
575#elif defined(TARGET_ARM)
576#elif defined(TARGET_PPC)
577#elif defined(TARGET_MIPS)
578#elif defined(TARGET_SH4)
579#elif defined(TARGET_CRIS)
580 /* XXXXX */
581#else
582#error unsupported target CPU
583#endif
584 env->exception_index = -1;
585
586 /* prepare setjmp context for exception handling */
587 for(;;) {
588 if (setjmp(env->jmp_env) == 0) {
589 env->current_tb = NULL;
590 /* if an exception is pending, we execute it here */
591 if (env->exception_index >= 0) {
592 if (env->exception_index >= EXCP_INTERRUPT) {
593 /* exit request from the cpu execution loop */
594 ret = env->exception_index;
595 if (ret == EXCP_DEBUG)
596 cpu_handle_debug_exception(env);
597 break;
598 } else {
599#if defined(CONFIG_USER_ONLY)
600 /* if user mode only, we simulate a fake exception
601 which will be handled outside the cpu execution
602 loop */
603#if defined(TARGET_I386)
604 do_interrupt_user(env->exception_index,
605 env->exception_is_int,
606 env->error_code,
607 env->exception_next_eip);
608 /* successfully delivered */
609 env->old_exception = -1;
610#endif
611 ret = env->exception_index;
612 break;
613#else
614#if defined(TARGET_I386)
615 /* simulate a real cpu exception. On i386, it can
616 trigger new exceptions, but we do not handle
617 double or triple faults yet. */
618 do_interrupt(env->exception_index,
619 env->exception_is_int,
620 env->error_code,
621 env->exception_next_eip, 0);
622 /* successfully delivered */
623 env->old_exception = -1;
624#elif defined(TARGET_PPC)
625 do_interrupt(env);
626#elif defined(TARGET_MIPS)
627 do_interrupt(env);
628#elif defined(TARGET_SPARC)
629 do_interrupt(env);
630#elif defined(TARGET_ARM)
631 do_interrupt(env);
632#elif defined(TARGET_SH4)
633 do_interrupt(env);
634#elif defined(TARGET_ALPHA)
635 do_interrupt(env);
636#elif defined(TARGET_CRIS)
637 do_interrupt(env);
638#elif defined(TARGET_M68K)
639 do_interrupt(0);
640#endif
641#endif
642 }
643 env->exception_index = -1;
644 }
645#ifdef USE_KQEMU
646 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
647 int ret;
648 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
649 ret = kqemu_cpu_exec(env);
650 /* put eflags in CPU temporary format */
651 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
652 DF = 1 - (2 * ((env->eflags >> 10) & 1));
653 CC_OP = CC_OP_EFLAGS;
654 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
655 if (ret == 1) {
656 /* exception */
657 longjmp(env->jmp_env, 1);
658 } else if (ret == 2) {
659 /* softmmu execution needed */
660 } else {
661 if (env->interrupt_request != 0 || env->exit_request != 0) {
662 /* hardware interrupt will be executed just after */
663 } else {
664 /* otherwise, we restart */
665 longjmp(env->jmp_env, 1);
666 }
667 }
668 }
669#endif
670
671 if (kvm_enabled()) {
672 kvm_cpu_exec(env);
673 longjmp(env->jmp_env, 1);
674 }
675
676 next_tb = 0; /* force lookup of first TB */
677 for(;;) {
678 interrupt_request = env->interrupt_request;
679 if (unlikely(interrupt_request)) {
680 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
681 /* Mask out external interrupts for this step. */
682 interrupt_request &= ~(CPU_INTERRUPT_HARD |
683 CPU_INTERRUPT_FIQ |
684 CPU_INTERRUPT_SMI |
685 CPU_INTERRUPT_NMI);
686 }
687 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
688 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
689 env->exception_index = EXCP_DEBUG;
690 cpu_loop_exit();
691 }
692#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
693 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
694 if (interrupt_request & CPU_INTERRUPT_HALT) {
695 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
696 env->halted = 1;
697 env->exception_index = EXCP_HLT;
698 cpu_loop_exit();
699 }
700#endif
701#if defined(TARGET_I386)
702 if (env->hflags2 & HF2_GIF_MASK) {
703 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
704 !(env->hflags & HF_SMM_MASK)) {
705 svm_check_intercept(SVM_EXIT_SMI);
706 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
707 do_smm_enter();
708 next_tb = 0;
709 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
710 !(env->hflags2 & HF2_NMI_MASK)) {
711 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
712 env->hflags2 |= HF2_NMI_MASK;
713 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
714 next_tb = 0;
715 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
716 (((env->hflags2 & HF2_VINTR_MASK) &&
717 (env->hflags2 & HF2_HIF_MASK)) ||
718 (!(env->hflags2 & HF2_VINTR_MASK) &&
719 (env->eflags & IF_MASK &&
720 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
721 int intno;
722 svm_check_intercept(SVM_EXIT_INTR);
723 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
724 intno = cpu_get_pic_interrupt(env);
725 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
726 do_interrupt(intno, 0, 0, 0, 1);
727 /* ensure that no TB jump will be modified as
728 the program flow was changed */
729 next_tb = 0;
730#if !defined(CONFIG_USER_ONLY)
731 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
732 (env->eflags & IF_MASK) &&
733 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
734 int intno;
735 /* FIXME: this should respect TPR */
736 svm_check_intercept(SVM_EXIT_VINTR);
737 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
738 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
739 do_interrupt(intno, 0, 0, 0, 1);
740 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
741 next_tb = 0;
742#endif
743 }
744 }
745#elif defined(TARGET_PPC)
746#if 0
747 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
748 cpu_ppc_reset(env);
749 }
750#endif
751 if (interrupt_request & CPU_INTERRUPT_HARD) {
752 ppc_hw_interrupt(env);
753 if (env->pending_interrupts == 0)
754 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
755 next_tb = 0;
756 }
757#elif defined(TARGET_MIPS)
758 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
759 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
760 (env->CP0_Status & (1 << CP0St_IE)) &&
761 !(env->CP0_Status & (1 << CP0St_EXL)) &&
762 !(env->CP0_Status & (1 << CP0St_ERL)) &&
763 !(env->hflags & MIPS_HFLAG_DM)) {
764 /* Raise it */
765 env->exception_index = EXCP_EXT_INTERRUPT;
766 env->error_code = 0;
767 do_interrupt(env);
768 next_tb = 0;
769 }
770#elif defined(TARGET_SPARC)
771 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
772 (env->psret != 0)) {
773 int pil = env->interrupt_index & 15;
774 int type = env->interrupt_index & 0xf0;
775
776 if (((type == TT_EXTINT) &&
777 (pil == 15 || pil > env->psrpil)) ||
778 type != TT_EXTINT) {
779 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
780 env->exception_index = env->interrupt_index;
781 do_interrupt(env);
782 env->interrupt_index = 0;
783#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
784 cpu_check_irqs(env);
785#endif
786 next_tb = 0;
787 }
788 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
789 //do_interrupt(0, 0, 0, 0, 0);
790 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
791 }
792#elif defined(TARGET_ARM)
793 if (interrupt_request & CPU_INTERRUPT_FIQ
794 && !(env->uncached_cpsr & CPSR_F)) {
795 env->exception_index = EXCP_FIQ;
796 do_interrupt(env);
797 next_tb = 0;
798 }
799 /* ARMv7-M interrupt return works by loading a magic value
800 into the PC. On real hardware the load causes the
801 return to occur. The qemu implementation performs the
802 jump normally, then does the exception return when the
803 CPU tries to execute code at the magic address.
804 This will cause the magic PC value to be pushed to
805 the stack if an interrupt occured at the wrong time.
806 We avoid this by disabling interrupts when
807 pc contains a magic address. */
808 if (interrupt_request & CPU_INTERRUPT_HARD
809 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
810 || !(env->uncached_cpsr & CPSR_I))) {
811 env->exception_index = EXCP_IRQ;
812 do_interrupt(env);
813 next_tb = 0;
814 }
815#elif defined(TARGET_SH4)
816 if (interrupt_request & CPU_INTERRUPT_HARD) {
817 do_interrupt(env);
818 next_tb = 0;
819 }
820#elif defined(TARGET_ALPHA)
821 if (interrupt_request & CPU_INTERRUPT_HARD) {
822 do_interrupt(env);
823 next_tb = 0;
824 }
825#elif defined(TARGET_CRIS)
826 if (interrupt_request & CPU_INTERRUPT_HARD
827 && (env->pregs[PR_CCS] & I_FLAG)) {
828 env->exception_index = EXCP_IRQ;
829 do_interrupt(env);
830 next_tb = 0;
831 }
832 if (interrupt_request & CPU_INTERRUPT_NMI
833 && (env->pregs[PR_CCS] & M_FLAG)) {
834 env->exception_index = EXCP_NMI;
835 do_interrupt(env);
836 next_tb = 0;
837 }
838#elif defined(TARGET_M68K)
839 if (interrupt_request & CPU_INTERRUPT_HARD
840 && ((env->sr & SR_I) >> SR_I_SHIFT)
841 < env->pending_level) {
842 /* Real hardware gets the interrupt vector via an
843 IACK cycle at this point. Current emulated
844 hardware doesn't rely on this, so we
845 provide/save the vector when the interrupt is
846 first signalled. */
847 env->exception_index = env->pending_vector;
848 do_interrupt(1);
849 next_tb = 0;
850 }
851#endif
852 /* Don't use the cached interupt_request value,
853 do_interrupt may have updated the EXITTB flag. */
854 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
855 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
856 /* ensure that no TB jump will be modified as
857 the program flow was changed */
858 next_tb = 0;
859 }
860 }
861 if (unlikely(env->exit_request)) {
862 env->exit_request = 0;
863 env->exception_index = EXCP_INTERRUPT;
864 cpu_loop_exit();
865 }
866#ifdef DEBUG_EXEC
867 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
868 /* restore flags in standard format */
869 regs_to_env();
870#if defined(TARGET_I386)
871 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
872 log_cpu_state(env, X86_DUMP_CCOP);
873 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
874#elif defined(TARGET_ARM)
875 log_cpu_state(env, 0);
876#elif defined(TARGET_SPARC)
877 log_cpu_state(env, 0);
878#elif defined(TARGET_PPC)
879 log_cpu_state(env, 0);
880#elif defined(TARGET_M68K)
881 cpu_m68k_flush_flags(env, env->cc_op);
882 env->cc_op = CC_OP_FLAGS;
883 env->sr = (env->sr & 0xffe0)
884 | env->cc_dest | (env->cc_x << 4);
885 log_cpu_state(env, 0);
886#elif defined(TARGET_MIPS)
887 log_cpu_state(env, 0);
888#elif defined(TARGET_SH4)
889 log_cpu_state(env, 0);
890#elif defined(TARGET_ALPHA)
891 log_cpu_state(env, 0);
892#elif defined(TARGET_CRIS)
893 log_cpu_state(env, 0);
894#else
895#error unsupported target CPU
896#endif
897 }
898#endif
899 spin_lock(&tb_lock);
900 tb = tb_find_fast();
901 /* Note: we do it here to avoid a gcc bug on Mac OS X when
902 doing it in tb_find_slow */
903 if (tb_invalidated_flag) {
904 /* as some TB could have been invalidated because
905 of memory exceptions while generating the code, we
906 must recompute the hash index here */
907 next_tb = 0;
908 tb_invalidated_flag = 0;
909 }
910#ifdef DEBUG_EXEC
911 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
912 (long)tb->tc_ptr, tb->pc,
913 lookup_symbol(tb->pc));
914#endif
915 /* see if we can patch the calling TB. When the TB
916 spans two pages, we cannot safely do a direct
917 jump. */
918 {
919 if (next_tb != 0 &&
920#ifdef USE_KQEMU
921 (env->kqemu_enabled != 2) &&
922#endif
923 tb->page_addr[1] == -1) {
924 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
925 }
926 }
927 spin_unlock(&tb_lock);
928 env->current_tb = tb;
929
930 /* cpu_interrupt might be called while translating the
931 TB, but before it is linked into a potentially
932 infinite loop and becomes env->current_tb. Avoid
933 starting execution if there is a pending interrupt. */
934 if (unlikely (env->exit_request))
935 env->current_tb = NULL;
936
937 while (env->current_tb) {
938 tc_ptr = tb->tc_ptr;
939 /* execute the generated code */
940#if defined(__sparc__) && !defined(HOST_SOLARIS)
941#undef env
942 env = cpu_single_env;
943#define env cpu_single_env
944#endif
945 next_tb = tcg_qemu_tb_exec(tc_ptr);
946 env->current_tb = NULL;
947 if ((next_tb & 3) == 2) {
948 /* Instruction counter expired. */
949 int insns_left;
950 tb = (TranslationBlock *)(long)(next_tb & ~3);
951 /* Restore PC. */
952 cpu_pc_from_tb(env, tb);
953 insns_left = env->icount_decr.u32;
954 if (env->icount_extra && insns_left >= 0) {
955 /* Refill decrementer and continue execution. */
956 env->icount_extra += insns_left;
957 if (env->icount_extra > 0xffff) {
958 insns_left = 0xffff;
959 } else {
960 insns_left = env->icount_extra;
961 }
962 env->icount_extra -= insns_left;
963 env->icount_decr.u16.low = insns_left;
964 } else {
965 if (insns_left > 0) {
966 /* Execute remaining instructions. */
967 cpu_exec_nocache(insns_left, tb);
968 }
969 env->exception_index = EXCP_INTERRUPT;
970 next_tb = 0;
971 cpu_loop_exit();
972 }
973 }
974 }
975 /* reset soft MMU for next block (it can currently
976 only be set by a memory fault) */
977#if defined(USE_KQEMU)
978#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
979 if (kqemu_is_ok(env) &&
980 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
981 cpu_loop_exit();
982 }
983#endif
984 } /* for(;;) */
985 } else {
986 env_to_regs();
987 }
988 } /* for(;;) */
989
990
991#if defined(TARGET_I386)
992 /* restore flags in standard format */
993 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
994#elif defined(TARGET_ARM)
995 /* XXX: Save/restore host fpu exception state?. */
996#elif defined(TARGET_SPARC)
997#elif defined(TARGET_PPC)
998#elif defined(TARGET_M68K)
999 cpu_m68k_flush_flags(env, env->cc_op);
1000 env->cc_op = CC_OP_FLAGS;
1001 env->sr = (env->sr & 0xffe0)
1002 | env->cc_dest | (env->cc_x << 4);
1003#elif defined(TARGET_MIPS)
1004#elif defined(TARGET_SH4)
1005#elif defined(TARGET_ALPHA)
1006#elif defined(TARGET_CRIS)
1007 /* XXXXX */
1008#else
1009#error unsupported target CPU
1010#endif
1011
1012 /* restore global registers */
1013#include "hostregs_helper.h"
1014
1015 /* fail safe : never use cpu_single_env outside cpu_exec() */
1016 cpu_single_env = NULL;
1017 return ret;
1018}
1019
1020#endif /* !VBOX */
1021
1022/* must only be called from the generated code as an exception can be
1023 generated */
1024void tb_invalidate_page_range(target_ulong start, target_ulong end)
1025{
1026 /* XXX: cannot enable it yet because it yields to MMU exception
1027 where NIP != read address on PowerPC */
1028#if 0
1029 target_ulong phys_addr;
1030 phys_addr = get_phys_addr_code(env, start);
1031 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1032#endif
1033}
1034
1035#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1036
1037void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1038{
1039 CPUX86State *saved_env;
1040
1041 saved_env = env;
1042 env = s;
1043 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1044 selector &= 0xffff;
1045 cpu_x86_load_seg_cache(env, seg_reg, selector,
1046 (selector << 4), 0xffff, 0);
1047 } else {
1048 helper_load_seg(seg_reg, selector);
1049 }
1050 env = saved_env;
1051}
1052
1053void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
1054{
1055 CPUX86State *saved_env;
1056
1057 saved_env = env;
1058 env = s;
1059
1060 helper_fsave(ptr, data32);
1061
1062 env = saved_env;
1063}
1064
1065void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
1066{
1067 CPUX86State *saved_env;
1068
1069 saved_env = env;
1070 env = s;
1071
1072 helper_frstor(ptr, data32);
1073
1074 env = saved_env;
1075}
1076
1077#endif /* TARGET_I386 */
1078
1079#if !defined(CONFIG_SOFTMMU)
1080
1081#if defined(TARGET_I386)
1082
1083/* 'pc' is the host PC at which the exception was raised. 'address' is
1084 the effective address of the memory exception. 'is_write' is 1 if a
1085 write caused the exception and otherwise 0'. 'old_set' is the
1086 signal set which should be restored */
1087static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1088 int is_write, sigset_t *old_set,
1089 void *puc)
1090{
1091 TranslationBlock *tb;
1092 int ret;
1093
1094 if (cpu_single_env)
1095 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1096#if defined(DEBUG_SIGNAL)
1097 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1098 pc, address, is_write, *(unsigned long *)old_set);
1099#endif
1100 /* XXX: locking issue */
1101 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1102 return 1;
1103 }
1104
1105 /* see if it is an MMU fault */
1106 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1107 if (ret < 0)
1108 return 0; /* not an MMU fault */
1109 if (ret == 0)
1110 return 1; /* the MMU fault was handled without causing real CPU fault */
1111 /* now we have a real cpu fault */
1112 tb = tb_find_pc(pc);
1113 if (tb) {
1114 /* the PC is inside the translated code. It means that we have
1115 a virtual CPU fault */
1116 cpu_restore_state(tb, env, pc, puc);
1117 }
1118 if (ret == 1) {
1119#if 0
1120 printf("PF exception: EIP=0x%RGv CR2=0x%RGv error=0x%x\n",
1121 env->eip, env->cr[2], env->error_code);
1122#endif
1123 /* we restore the process signal mask as the sigreturn should
1124 do it (XXX: use sigsetjmp) */
1125 sigprocmask(SIG_SETMASK, old_set, NULL);
1126 raise_exception_err(env->exception_index, env->error_code);
1127 } else {
1128 /* activate soft MMU for this block */
1129 env->hflags |= HF_SOFTMMU_MASK;
1130 cpu_resume_from_signal(env, puc);
1131 }
1132 /* never comes here */
1133 return 1;
1134}
1135
1136#elif defined(TARGET_ARM)
1137static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1138 int is_write, sigset_t *old_set,
1139 void *puc)
1140{
1141 TranslationBlock *tb;
1142 int ret;
1143
1144 if (cpu_single_env)
1145 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1146#if defined(DEBUG_SIGNAL)
1147 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1148 pc, address, is_write, *(unsigned long *)old_set);
1149#endif
1150 /* XXX: locking issue */
1151 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1152 return 1;
1153 }
1154 /* see if it is an MMU fault */
1155 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1156 if (ret < 0)
1157 return 0; /* not an MMU fault */
1158 if (ret == 0)
1159 return 1; /* the MMU fault was handled without causing real CPU fault */
1160 /* now we have a real cpu fault */
1161 tb = tb_find_pc(pc);
1162 if (tb) {
1163 /* the PC is inside the translated code. It means that we have
1164 a virtual CPU fault */
1165 cpu_restore_state(tb, env, pc, puc);
1166 }
1167 /* we restore the process signal mask as the sigreturn should
1168 do it (XXX: use sigsetjmp) */
1169 sigprocmask(SIG_SETMASK, old_set, NULL);
1170 cpu_loop_exit();
1171 /* never comes here */
1172 return 1;
1173}
1174#elif defined(TARGET_SPARC)
1175static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1176 int is_write, sigset_t *old_set,
1177 void *puc)
1178{
1179 TranslationBlock *tb;
1180 int ret;
1181
1182 if (cpu_single_env)
1183 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1184#if defined(DEBUG_SIGNAL)
1185 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1186 pc, address, is_write, *(unsigned long *)old_set);
1187#endif
1188 /* XXX: locking issue */
1189 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1190 return 1;
1191 }
1192 /* see if it is an MMU fault */
1193 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1194 if (ret < 0)
1195 return 0; /* not an MMU fault */
1196 if (ret == 0)
1197 return 1; /* the MMU fault was handled without causing real CPU fault */
1198 /* now we have a real cpu fault */
1199 tb = tb_find_pc(pc);
1200 if (tb) {
1201 /* the PC is inside the translated code. It means that we have
1202 a virtual CPU fault */
1203 cpu_restore_state(tb, env, pc, puc);
1204 }
1205 /* we restore the process signal mask as the sigreturn should
1206 do it (XXX: use sigsetjmp) */
1207 sigprocmask(SIG_SETMASK, old_set, NULL);
1208 cpu_loop_exit();
1209 /* never comes here */
1210 return 1;
1211}
1212#elif defined (TARGET_PPC)
1213static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1214 int is_write, sigset_t *old_set,
1215 void *puc)
1216{
1217 TranslationBlock *tb;
1218 int ret;
1219
1220 if (cpu_single_env)
1221 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1222#if defined(DEBUG_SIGNAL)
1223 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1224 pc, address, is_write, *(unsigned long *)old_set);
1225#endif
1226 /* XXX: locking issue */
1227 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1228 return 1;
1229 }
1230
1231 /* see if it is an MMU fault */
1232 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1233 if (ret < 0)
1234 return 0; /* not an MMU fault */
1235 if (ret == 0)
1236 return 1; /* the MMU fault was handled without causing real CPU fault */
1237
1238 /* now we have a real cpu fault */
1239 tb = tb_find_pc(pc);
1240 if (tb) {
1241 /* the PC is inside the translated code. It means that we have
1242 a virtual CPU fault */
1243 cpu_restore_state(tb, env, pc, puc);
1244 }
1245 if (ret == 1) {
1246#if 0
1247 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1248 env->nip, env->error_code, tb);
1249#endif
1250 /* we restore the process signal mask as the sigreturn should
1251 do it (XXX: use sigsetjmp) */
1252 sigprocmask(SIG_SETMASK, old_set, NULL);
1253 cpu_loop_exit();
1254 } else {
1255 /* activate soft MMU for this block */
1256 cpu_resume_from_signal(env, puc);
1257 }
1258 /* never comes here */
1259 return 1;
1260}
1261
1262#elif defined(TARGET_M68K)
1263static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1264 int is_write, sigset_t *old_set,
1265 void *puc)
1266{
1267 TranslationBlock *tb;
1268 int ret;
1269
1270 if (cpu_single_env)
1271 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1272#if defined(DEBUG_SIGNAL)
1273 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1274 pc, address, is_write, *(unsigned long *)old_set);
1275#endif
1276 /* XXX: locking issue */
1277 if (is_write && page_unprotect(address, pc, puc)) {
1278 return 1;
1279 }
1280 /* see if it is an MMU fault */
1281 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1282 if (ret < 0)
1283 return 0; /* not an MMU fault */
1284 if (ret == 0)
1285 return 1; /* the MMU fault was handled without causing real CPU fault */
1286 /* now we have a real cpu fault */
1287 tb = tb_find_pc(pc);
1288 if (tb) {
1289 /* the PC is inside the translated code. It means that we have
1290 a virtual CPU fault */
1291 cpu_restore_state(tb, env, pc, puc);
1292 }
1293 /* we restore the process signal mask as the sigreturn should
1294 do it (XXX: use sigsetjmp) */
1295 sigprocmask(SIG_SETMASK, old_set, NULL);
1296 cpu_loop_exit();
1297 /* never comes here */
1298 return 1;
1299}
1300
1301#elif defined (TARGET_MIPS)
1302static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1303 int is_write, sigset_t *old_set,
1304 void *puc)
1305{
1306 TranslationBlock *tb;
1307 int ret;
1308
1309 if (cpu_single_env)
1310 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1311#if defined(DEBUG_SIGNAL)
1312 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1313 pc, address, is_write, *(unsigned long *)old_set);
1314#endif
1315 /* XXX: locking issue */
1316 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1317 return 1;
1318 }
1319
1320 /* see if it is an MMU fault */
1321 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1322 if (ret < 0)
1323 return 0; /* not an MMU fault */
1324 if (ret == 0)
1325 return 1; /* the MMU fault was handled without causing real CPU fault */
1326
1327 /* now we have a real cpu fault */
1328 tb = tb_find_pc(pc);
1329 if (tb) {
1330 /* the PC is inside the translated code. It means that we have
1331 a virtual CPU fault */
1332 cpu_restore_state(tb, env, pc, puc);
1333 }
1334 if (ret == 1) {
1335#if 0
1336 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1337 env->PC, env->error_code, tb);
1338#endif
1339 /* we restore the process signal mask as the sigreturn should
1340 do it (XXX: use sigsetjmp) */
1341 sigprocmask(SIG_SETMASK, old_set, NULL);
1342 cpu_loop_exit();
1343 } else {
1344 /* activate soft MMU for this block */
1345 cpu_resume_from_signal(env, puc);
1346 }
1347 /* never comes here */
1348 return 1;
1349}
1350
1351#elif defined (TARGET_SH4)
1352static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1353 int is_write, sigset_t *old_set,
1354 void *puc)
1355{
1356 TranslationBlock *tb;
1357 int ret;
1358
1359 if (cpu_single_env)
1360 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1361#if defined(DEBUG_SIGNAL)
1362 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1363 pc, address, is_write, *(unsigned long *)old_set);
1364#endif
1365 /* XXX: locking issue */
1366 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1367 return 1;
1368 }
1369
1370 /* see if it is an MMU fault */
1371 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1372 if (ret < 0)
1373 return 0; /* not an MMU fault */
1374 if (ret == 0)
1375 return 1; /* the MMU fault was handled without causing real CPU fault */
1376
1377 /* now we have a real cpu fault */
1378 tb = tb_find_pc(pc);
1379 if (tb) {
1380 /* the PC is inside the translated code. It means that we have
1381 a virtual CPU fault */
1382 cpu_restore_state(tb, env, pc, puc);
1383 }
1384#if 0
1385 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1386 env->nip, env->error_code, tb);
1387#endif
1388 /* we restore the process signal mask as the sigreturn should
1389 do it (XXX: use sigsetjmp) */
1390 sigprocmask(SIG_SETMASK, old_set, NULL);
1391 cpu_loop_exit();
1392 /* never comes here */
1393 return 1;
1394}
1395
1396#elif defined (TARGET_ALPHA)
1397static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1398 int is_write, sigset_t *old_set,
1399 void *puc)
1400{
1401 TranslationBlock *tb;
1402 int ret;
1403
1404 if (cpu_single_env)
1405 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1406#if defined(DEBUG_SIGNAL)
1407 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1408 pc, address, is_write, *(unsigned long *)old_set);
1409#endif
1410 /* XXX: locking issue */
1411 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1412 return 1;
1413 }
1414
1415 /* see if it is an MMU fault */
1416 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1417 if (ret < 0)
1418 return 0; /* not an MMU fault */
1419 if (ret == 0)
1420 return 1; /* the MMU fault was handled without causing real CPU fault */
1421
1422 /* now we have a real cpu fault */
1423 tb = tb_find_pc(pc);
1424 if (tb) {
1425 /* the PC is inside the translated code. It means that we have
1426 a virtual CPU fault */
1427 cpu_restore_state(tb, env, pc, puc);
1428 }
1429#if 0
1430 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1431 env->nip, env->error_code, tb);
1432#endif
1433 /* we restore the process signal mask as the sigreturn should
1434 do it (XXX: use sigsetjmp) */
1435 sigprocmask(SIG_SETMASK, old_set, NULL);
1436 cpu_loop_exit();
1437 /* never comes here */
1438 return 1;
1439}
1440#elif defined (TARGET_CRIS)
1441static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1442 int is_write, sigset_t *old_set,
1443 void *puc)
1444{
1445 TranslationBlock *tb;
1446 int ret;
1447
1448 if (cpu_single_env)
1449 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1450#if defined(DEBUG_SIGNAL)
1451 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1452 pc, address, is_write, *(unsigned long *)old_set);
1453#endif
1454 /* XXX: locking issue */
1455 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1456 return 1;
1457 }
1458
1459 /* see if it is an MMU fault */
1460 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1461 if (ret < 0)
1462 return 0; /* not an MMU fault */
1463 if (ret == 0)
1464 return 1; /* the MMU fault was handled without causing real CPU fault */
1465
1466 /* now we have a real cpu fault */
1467 tb = tb_find_pc(pc);
1468 if (tb) {
1469 /* the PC is inside the translated code. It means that we have
1470 a virtual CPU fault */
1471 cpu_restore_state(tb, env, pc, puc);
1472 }
1473 /* we restore the process signal mask as the sigreturn should
1474 do it (XXX: use sigsetjmp) */
1475 sigprocmask(SIG_SETMASK, old_set, NULL);
1476 cpu_loop_exit();
1477 /* never comes here */
1478 return 1;
1479}
1480
1481#else
1482#error unsupported target CPU
1483#endif
1484
1485#if defined(__i386__)
1486
1487#if defined(__APPLE__)
1488# include <sys/ucontext.h>
1489
1490# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1491# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1492# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1493#else
1494# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1495# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1496# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1497#endif
1498
1499int cpu_signal_handler(int host_signum, void *pinfo,
1500 void *puc)
1501{
1502 siginfo_t *info = pinfo;
1503 struct ucontext *uc = puc;
1504 unsigned long pc;
1505 int trapno;
1506
1507#ifndef REG_EIP
1508/* for glibc 2.1 */
1509#define REG_EIP EIP
1510#define REG_ERR ERR
1511#define REG_TRAPNO TRAPNO
1512#endif
1513 pc = EIP_sig(uc);
1514 trapno = TRAP_sig(uc);
1515 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1516 trapno == 0xe ?
1517 (ERROR_sig(uc) >> 1) & 1 : 0,
1518 &uc->uc_sigmask, puc);
1519}
1520
1521#elif defined(__x86_64__)
1522
1523#ifdef __NetBSD__
1524#define REG_ERR _REG_ERR
1525#define REG_TRAPNO _REG_TRAPNO
1526
1527#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1528#define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1529#else
1530#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1531#define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1532#endif
1533
1534int cpu_signal_handler(int host_signum, void *pinfo,
1535 void *puc)
1536{
1537 siginfo_t *info = pinfo;
1538 unsigned long pc;
1539#ifdef __NetBSD__
1540 ucontext_t *uc = puc;
1541#else
1542 struct ucontext *uc = puc;
1543#endif
1544
1545 pc = QEMU_UC_MACHINE_PC(uc);
1546 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1547 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
1548 (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
1549 &uc->uc_sigmask, puc);
1550}
1551
1552#elif defined(_ARCH_PPC)
1553
1554/***********************************************************************
1555 * signal context platform-specific definitions
1556 * From Wine
1557 */
1558#ifdef linux
1559/* All Registers access - only for local access */
1560# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1561/* Gpr Registers access */
1562# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1563# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1564# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1565# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1566# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1567# define LR_sig(context) REG_sig(link, context) /* Link register */
1568# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1569/* Float Registers access */
1570# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1571# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1572/* Exception Registers access */
1573# define DAR_sig(context) REG_sig(dar, context)
1574# define DSISR_sig(context) REG_sig(dsisr, context)
1575# define TRAP_sig(context) REG_sig(trap, context)
1576#endif /* linux */
1577
1578#ifdef __APPLE__
1579# include <sys/ucontext.h>
1580typedef struct ucontext SIGCONTEXT;
1581/* All Registers access - only for local access */
1582# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1583# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1584# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1585# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1586/* Gpr Registers access */
1587# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1588# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1589# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1590# define CTR_sig(context) REG_sig(ctr, context)
1591# define XER_sig(context) REG_sig(xer, context) /* Link register */
1592# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1593# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1594/* Float Registers access */
1595# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1596# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1597/* Exception Registers access */
1598# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1599# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1600# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1601#endif /* __APPLE__ */
1602
1603int cpu_signal_handler(int host_signum, void *pinfo,
1604 void *puc)
1605{
1606 siginfo_t *info = pinfo;
1607 struct ucontext *uc = puc;
1608 unsigned long pc;
1609 int is_write;
1610
1611 pc = IAR_sig(uc);
1612 is_write = 0;
1613#if 0
1614 /* ppc 4xx case */
1615 if (DSISR_sig(uc) & 0x00800000)
1616 is_write = 1;
1617#else
1618 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1619 is_write = 1;
1620#endif
1621 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1622 is_write, &uc->uc_sigmask, puc);
1623}
1624
1625#elif defined(__alpha__)
1626
1627int cpu_signal_handler(int host_signum, void *pinfo,
1628 void *puc)
1629{
1630 siginfo_t *info = pinfo;
1631 struct ucontext *uc = puc;
1632 uint32_t *pc = uc->uc_mcontext.sc_pc;
1633 uint32_t insn = *pc;
1634 int is_write = 0;
1635
1636 /* XXX: need kernel patch to get write flag faster */
1637 switch (insn >> 26) {
1638 case 0x0d: // stw
1639 case 0x0e: // stb
1640 case 0x0f: // stq_u
1641 case 0x24: // stf
1642 case 0x25: // stg
1643 case 0x26: // sts
1644 case 0x27: // stt
1645 case 0x2c: // stl
1646 case 0x2d: // stq
1647 case 0x2e: // stl_c
1648 case 0x2f: // stq_c
1649 is_write = 1;
1650 }
1651
1652 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1653 is_write, &uc->uc_sigmask, puc);
1654}
1655#elif defined(__sparc__)
1656
1657int cpu_signal_handler(int host_signum, void *pinfo,
1658 void *puc)
1659{
1660 siginfo_t *info = pinfo;
1661 int is_write;
1662 uint32_t insn;
1663#if !defined(__arch64__) || defined(HOST_SOLARIS)
1664 uint32_t *regs = (uint32_t *)(info + 1);
1665 void *sigmask = (regs + 20);
1666 /* XXX: is there a standard glibc define ? */
1667 unsigned long pc = regs[1];
1668#else
1669#ifdef __linux__
1670 struct sigcontext *sc = puc;
1671 unsigned long pc = sc->sigc_regs.tpc;
1672 void *sigmask = (void *)sc->sigc_mask;
1673#elif defined(__OpenBSD__)
1674 struct sigcontext *uc = puc;
1675 unsigned long pc = uc->sc_pc;
1676 void *sigmask = (void *)(long)uc->sc_mask;
1677#endif
1678#endif
1679
1680 /* XXX: need kernel patch to get write flag faster */
1681 is_write = 0;
1682 insn = *(uint32_t *)pc;
1683 if ((insn >> 30) == 3) {
1684 switch((insn >> 19) & 0x3f) {
1685 case 0x05: // stb
1686 case 0x06: // sth
1687 case 0x04: // st
1688 case 0x07: // std
1689 case 0x24: // stf
1690 case 0x27: // stdf
1691 case 0x25: // stfsr
1692 is_write = 1;
1693 break;
1694 }
1695 }
1696 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1697 is_write, sigmask, NULL);
1698}
1699
1700#elif defined(__arm__)
1701
1702int cpu_signal_handler(int host_signum, void *pinfo,
1703 void *puc)
1704{
1705 siginfo_t *info = pinfo;
1706 struct ucontext *uc = puc;
1707 unsigned long pc;
1708 int is_write;
1709
1710#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1711 pc = uc->uc_mcontext.gregs[R15];
1712#else
1713 pc = uc->uc_mcontext.arm_pc;
1714#endif
1715 /* XXX: compute is_write */
1716 is_write = 0;
1717 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1718 is_write,
1719 &uc->uc_sigmask, puc);
1720}
1721
1722#elif defined(__mc68000)
1723
1724int cpu_signal_handler(int host_signum, void *pinfo,
1725 void *puc)
1726{
1727 siginfo_t *info = pinfo;
1728 struct ucontext *uc = puc;
1729 unsigned long pc;
1730 int is_write;
1731
1732 pc = uc->uc_mcontext.gregs[16];
1733 /* XXX: compute is_write */
1734 is_write = 0;
1735 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1736 is_write,
1737 &uc->uc_sigmask, puc);
1738}
1739
1740#elif defined(__ia64)
1741
1742#ifndef __ISR_VALID
1743 /* This ought to be in <bits/siginfo.h>... */
1744# define __ISR_VALID 1
1745#endif
1746
1747int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1748{
1749 siginfo_t *info = pinfo;
1750 struct ucontext *uc = puc;
1751 unsigned long ip;
1752 int is_write = 0;
1753
1754 ip = uc->uc_mcontext.sc_ip;
1755 switch (host_signum) {
1756 case SIGILL:
1757 case SIGFPE:
1758 case SIGSEGV:
1759 case SIGBUS:
1760 case SIGTRAP:
1761 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1762 /* ISR.W (write-access) is bit 33: */
1763 is_write = (info->si_isr >> 33) & 1;
1764 break;
1765
1766 default:
1767 break;
1768 }
1769 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1770 is_write,
1771 &uc->uc_sigmask, puc);
1772}
1773
1774#elif defined(__s390__)
1775
1776int cpu_signal_handler(int host_signum, void *pinfo,
1777 void *puc)
1778{
1779 siginfo_t *info = pinfo;
1780 struct ucontext *uc = puc;
1781 unsigned long pc;
1782 int is_write;
1783
1784 pc = uc->uc_mcontext.psw.addr;
1785 /* XXX: compute is_write */
1786 is_write = 0;
1787 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1788 is_write, &uc->uc_sigmask, puc);
1789}
1790
1791#elif defined(__mips__)
1792
1793int cpu_signal_handler(int host_signum, void *pinfo,
1794 void *puc)
1795{
1796 siginfo_t *info = pinfo;
1797 struct ucontext *uc = puc;
1798 greg_t pc = uc->uc_mcontext.pc;
1799 int is_write;
1800
1801 /* XXX: compute is_write */
1802 is_write = 0;
1803 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1804 is_write, &uc->uc_sigmask, puc);
1805}
1806
1807#elif defined(__hppa__)
1808
1809int cpu_signal_handler(int host_signum, void *pinfo,
1810 void *puc)
1811{
1812 struct siginfo *info = pinfo;
1813 struct ucontext *uc = puc;
1814 unsigned long pc;
1815 int is_write;
1816
1817 pc = uc->uc_mcontext.sc_iaoq[0];
1818 /* FIXME: compute is_write */
1819 is_write = 0;
1820 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1821 is_write,
1822 &uc->uc_sigmask, puc);
1823}
1824
1825#else
1826
1827#error host CPU specific signal handler needed
1828
1829#endif
1830
1831#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette