VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 36170

Last change on this file since 36170 was 36170, checked in by vboxsync, 14 years ago

rem: synced up to svn://svn.savannah.nongnu.org/qemu/trunk@6686 (repo UUID c046a42c-6fe2-441c-8c8c-71466251a162).

  • Property svn:eol-style set to native
File size: 65.4 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include "config.h"
31#define CPU_NO_GLOBAL_REGS
32#include "exec.h"
33#include "disas.h"
34#include "tcg.h"
35#include "kvm.h"
36
37#if !defined(CONFIG_SOFTMMU)
38#undef EAX
39#undef ECX
40#undef EDX
41#undef EBX
42#undef ESP
43#undef EBP
44#undef ESI
45#undef EDI
46#undef EIP
47#include <signal.h>
48#ifdef __linux__
49#include <sys/ucontext.h>
50#endif
51#endif
52
53#if defined(__sparc__) && !defined(HOST_SOLARIS)
54// Work around ugly bugs in glibc that mangle global register contents
55#undef env
56#define env cpu_single_env
57#endif
58
59int tb_invalidated_flag;
60
61//#define DEBUG_EXEC
62//#define DEBUG_SIGNAL
63
64void cpu_loop_exit(void)
65{
66 /* NOTE: the register at this point must be saved by hand because
67 longjmp restore them */
68 regs_to_env();
69 longjmp(env->jmp_env, 1);
70}
71
72/* exit the current TB from a signal handler. The host registers are
73 restored in a state compatible with the CPU emulator
74 */
75void cpu_resume_from_signal(CPUState *env1, void *puc)
76{
77#if !defined(CONFIG_SOFTMMU)
78#ifdef __linux__
79 struct ucontext *uc = puc;
80#elif defined(__OpenBSD__)
81 struct sigcontext *uc = puc;
82#endif
83#endif
84
85 env = env1;
86
87 /* XXX: restore cpu registers saved in host registers */
88
89#if !defined(CONFIG_SOFTMMU)
90 if (puc) {
91 /* XXX: use siglongjmp ? */
92#ifdef __linux__
93 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
94#elif defined(__OpenBSD__)
95 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
96#endif
97 }
98#endif
99 env->exception_index = -1;
100 longjmp(env->jmp_env, 1);
101}
102
103/* Execute the code without caching the generated code. An interpreter
104 could be used if available. */
105static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
106{
107 unsigned long next_tb;
108 TranslationBlock *tb;
109
110 /* Should never happen.
111 We only end up here when an existing TB is too long. */
112 if (max_cycles > CF_COUNT_MASK)
113 max_cycles = CF_COUNT_MASK;
114
115 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
116 max_cycles);
117 env->current_tb = tb;
118 /* execute the generated code */
119#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
120 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
121#else
122 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
123#endif
124
125 if ((next_tb & 3) == 2) {
126 /* Restore PC. This may happen if async event occurs before
127 the TB starts executing. */
128 cpu_pc_from_tb(env, tb);
129 }
130 tb_phys_invalidate(tb, -1);
131 tb_free(tb);
132}
133
134static TranslationBlock *tb_find_slow(target_ulong pc,
135 target_ulong cs_base,
136 uint64_t flags)
137{
138 TranslationBlock *tb, **ptb1;
139 unsigned int h;
140 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
141
142 tb_invalidated_flag = 0;
143
144 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
145
146 /* find translated block using physical mappings */
147 phys_pc = get_phys_addr_code(env, pc);
148 phys_page1 = phys_pc & TARGET_PAGE_MASK;
149 phys_page2 = -1;
150 h = tb_phys_hash_func(phys_pc);
151 ptb1 = &tb_phys_hash[h];
152 for(;;) {
153 tb = *ptb1;
154 if (!tb)
155 goto not_found;
156 if (tb->pc == pc &&
157 tb->page_addr[0] == phys_page1 &&
158 tb->cs_base == cs_base &&
159 tb->flags == flags) {
160 /* check next page if needed */
161 if (tb->page_addr[1] != -1) {
162 virt_page2 = (pc & TARGET_PAGE_MASK) +
163 TARGET_PAGE_SIZE;
164 phys_page2 = get_phys_addr_code(env, virt_page2);
165 if (tb->page_addr[1] == phys_page2)
166 goto found;
167 } else {
168 goto found;
169 }
170 }
171 ptb1 = &tb->phys_hash_next;
172 }
173 not_found:
174 /* if no translated code available, then translate it now */
175 tb = tb_gen_code(env, pc, cs_base, flags, 0);
176
177 found:
178 /* we add the TB in the virtual pc hash table */
179 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
180 return tb;
181}
182
183static inline TranslationBlock *tb_find_fast(void)
184{
185 TranslationBlock *tb;
186 target_ulong cs_base, pc;
187 int flags;
188
189 /* we record a subset of the CPU state. It will
190 always be the same before a given translated block
191 is executed. */
192 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
193 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
194 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
195 tb->flags != flags)) {
196 tb = tb_find_slow(pc, cs_base, flags);
197 }
198 return tb;
199}
200
201static CPUDebugExcpHandler *debug_excp_handler;
202
203CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
204{
205 CPUDebugExcpHandler *old_handler = debug_excp_handler;
206
207 debug_excp_handler = handler;
208 return old_handler;
209}
210
211static void cpu_handle_debug_exception(CPUState *env)
212{
213 CPUWatchpoint *wp;
214
215 if (!env->watchpoint_hit)
216 TAILQ_FOREACH(wp, &env->watchpoints, entry)
217 wp->flags &= ~BP_WATCHPOINT_HIT;
218
219 if (debug_excp_handler)
220 debug_excp_handler(env);
221}
222
223/* main execution loop */
224
225#ifdef VBOX
226
227int cpu_exec(CPUState *env1)
228{
229#define DECLARE_HOST_REGS 1
230#include "hostregs_helper.h"
231 int ret = 0, interrupt_request;
232 TranslationBlock *tb;
233 uint8_t *tc_ptr;
234 unsigned long next_tb;
235
236 cpu_single_env = env1;
237
238 /* first we save global registers */
239#define SAVE_HOST_REGS 1
240#include "hostregs_helper.h"
241 env = env1;
242
243 env_to_regs();
244#if defined(TARGET_I386)
245 /* put eflags in CPU temporary format */
246 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
247 DF = 1 - (2 * ((env->eflags >> 10) & 1));
248 CC_OP = CC_OP_EFLAGS;
249 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
250#elif defined(TARGET_SPARC)
251#elif defined(TARGET_M68K)
252 env->cc_op = CC_OP_FLAGS;
253 env->cc_dest = env->sr & 0xf;
254 env->cc_x = (env->sr >> 4) & 1;
255#elif defined(TARGET_ALPHA)
256#elif defined(TARGET_ARM)
257#elif defined(TARGET_PPC)
258#elif defined(TARGET_MIPS)
259#elif defined(TARGET_SH4)
260#elif defined(TARGET_CRIS)
261 /* XXXXX */
262#else
263#error unsupported target CPU
264#endif
265#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
266 env->exception_index = -1;
267#endif
268
269 /* prepare setjmp context for exception handling */
270 for(;;) {
271 if (setjmp(env->jmp_env) == 0)
272 {
273 env->current_tb = NULL;
274
275 /*
276 * Check for fatal errors first
277 */
278 if (env->interrupt_request & CPU_INTERRUPT_RC) {
279 env->exception_index = EXCP_RC;
280 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
281 ret = env->exception_index;
282 cpu_loop_exit();
283 }
284
285 /* if an exception is pending, we execute it here */
286 if (env->exception_index >= 0) {
287 if (env->exception_index >= EXCP_INTERRUPT) {
288 /* exit request from the cpu execution loop */
289 ret = env->exception_index;
290 if (ret == EXCP_DEBUG)
291 cpu_handle_debug_exception(env);
292 break;
293 } else {
294 /* simulate a real cpu exception. On i386, it can
295 trigger new exceptions, but we do not handle
296 double or triple faults yet. */
297 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
298 Log(("do_interrupt %d %d %RGv\n", env->exception_index, env->exception_is_int, (RTGCPTR)env->exception_next_eip));
299 do_interrupt(env->exception_index,
300 env->exception_is_int,
301 env->error_code,
302 env->exception_next_eip, 0);
303 /* successfully delivered */
304 env->old_exception = -1;
305 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
306 }
307 env->exception_index = -1;
308 }
309
310 next_tb = 0; /* force lookup of first TB */
311 for(;;)
312 {
313 interrupt_request = env->interrupt_request;
314 if (unlikely(interrupt_request)) {
315 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
316 /* Mask out external interrupts for this step. */
317 interrupt_request &= ~(CPU_INTERRUPT_HARD |
318 CPU_INTERRUPT_FIQ |
319 CPU_INTERRUPT_SMI |
320 CPU_INTERRUPT_NMI);
321 }
322 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
323 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
324 env->exception_index = EXCP_DEBUG;
325 cpu_loop_exit();
326 }
327 /** @todo: reconcile with what QEMU really does */
328
329 /* Single instruction exec request, we execute it and return (one way or the other).
330 The caller will always reschedule after doing this operation! */
331 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
332 {
333 /* not in flight are we? (if we are, we trapped) */
334 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
335 {
336 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
337 env->exception_index = EXCP_SINGLE_INSTR;
338 if (emulate_single_instr(env) == -1)
339 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip));
340
341 /* When we receive an external interrupt during execution of this single
342 instruction, then we should stay here. We will leave when we're ready
343 for raw-mode or when interrupted by pending EMT requests. */
344 interrupt_request = env->interrupt_request; /* reload this! */
345 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
346 || !(env->eflags & IF_MASK)
347 || (env->hflags & HF_INHIBIT_IRQ_MASK)
348 || (env->state & CPU_RAW_HWACC)
349 )
350 {
351 env->exception_index = ret = EXCP_SINGLE_INSTR;
352 cpu_loop_exit();
353 }
354 }
355 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
356 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
357 }
358
359 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
360 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
361 !(env->hflags & HF_SMM_MASK)) {
362 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
363 do_smm_enter();
364 next_tb = 0;
365 }
366 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
367 (env->eflags & IF_MASK) &&
368 !(env->hflags & HF_INHIBIT_IRQ_MASK))
369 {
370 /* if hardware interrupt pending, we execute it */
371 int intno;
372 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
373 intno = cpu_get_pic_interrupt(env);
374 if (intno >= 0)
375 {
376 Log(("do_interrupt %d\n", intno));
377 do_interrupt(intno, 0, 0, 0, 1);
378 }
379 /* ensure that no TB jump will be modified as
380 the program flow was changed */
381 next_tb = 0;
382 }
383 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
384 {
385 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
386 /* ensure that no TB jump will be modified as
387 the program flow was changed */
388 next_tb = 0;
389 }
390 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
391 if (interrupt_request & CPU_INTERRUPT_EXIT)
392 {
393 env->exception_index = EXCP_INTERRUPT;
394 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
395 ret = env->exception_index;
396 cpu_loop_exit();
397 }
398 if (interrupt_request & CPU_INTERRUPT_RC)
399 {
400 env->exception_index = EXCP_RC;
401 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
402 ret = env->exception_index;
403 cpu_loop_exit();
404 }
405 }
406
407 /*
408 * Check if we the CPU state allows us to execute the code in raw-mode.
409 */
410 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
411 if (remR3CanExecuteRaw(env,
412 env->eip + env->segs[R_CS].base,
413 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
414 &env->exception_index))
415 {
416 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
417 ret = env->exception_index;
418 cpu_loop_exit();
419 }
420 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
421
422 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
423 spin_lock(&tb_lock);
424 tb = tb_find_fast();
425 /* Note: we do it here to avoid a gcc bug on Mac OS X when
426 doing it in tb_find_slow */
427 if (tb_invalidated_flag) {
428 /* as some TB could have been invalidated because
429 of memory exceptions while generating the code, we
430 must recompute the hash index here */
431 next_tb = 0;
432 tb_invalidated_flag = 0;
433 }
434
435 /* see if we can patch the calling TB. When the TB
436 spans two pages, we cannot safely do a direct
437 jump. */
438 if (next_tb != 0
439 && !(tb->cflags & CF_RAW_MODE)
440 && tb->page_addr[1] == -1)
441 {
442 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
443 }
444 spin_unlock(&tb_lock);
445 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
446
447 env->current_tb = tb;
448
449 /* cpu_interrupt might be called while translating the
450 TB, but before it is linked into a potentially
451 infinite loop and becomes env->current_tb. Avoid
452 starting execution if there is a pending interrupt. */
453 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
454 env->current_tb = NULL;
455
456 while (env->current_tb) {
457 tc_ptr = tb->tc_ptr;
458 /* execute the generated code */
459 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
460#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
461 tcg_qemu_tb_exec(tc_ptr, next_tb);
462#else
463 next_tb = tcg_qemu_tb_exec(tc_ptr);
464#endif
465 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
466 env->current_tb = NULL;
467 if ((next_tb & 3) == 2) {
468 /* Instruction counter expired. */
469 int insns_left;
470 tb = (TranslationBlock *)(long)(next_tb & ~3);
471 /* Restore PC. */
472 cpu_pc_from_tb(env, tb);
473 insns_left = env->icount_decr.u32;
474 if (env->icount_extra && insns_left >= 0) {
475 /* Refill decrementer and continue execution. */
476 env->icount_extra += insns_left;
477 if (env->icount_extra > 0xffff) {
478 insns_left = 0xffff;
479 } else {
480 insns_left = env->icount_extra;
481 }
482 env->icount_extra -= insns_left;
483 env->icount_decr.u16.low = insns_left;
484 } else {
485 if (insns_left > 0) {
486 /* Execute remaining instructions. */
487 cpu_exec_nocache(insns_left, tb);
488 }
489 env->exception_index = EXCP_INTERRUPT;
490 next_tb = 0;
491 cpu_loop_exit();
492 }
493 }
494 }
495
496 /* reset soft MMU for next block (it can currently
497 only be set by a memory fault) */
498#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
499 if (env->hflags & HF_SOFTMMU_MASK) {
500 env->hflags &= ~HF_SOFTMMU_MASK;
501 /* do not allow linking to another block */
502 next_tb = 0;
503 }
504#endif
505 } /* for(;;) */
506 } else {
507 env_to_regs();
508 }
509#ifdef VBOX_HIGH_RES_TIMERS_HACK
510 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
511 unnecessary (like crashing during emulate single instruction).
512 Note! Don't use env1->pVM here, the code wouldn't run with
513 gcc-4.4/amd64 anymore, see #3883. */
514 env->current_tb = NULL;
515 if ( !(env->interrupt_request & ( CPU_INTERRUPT_EXIT | CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
516 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
517 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
518 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
519 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
520 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
521 TMR3TimerQueuesDo(env->pVM);
522 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
523 }
524#endif
525 } /* for(;;) */
526
527#if defined(TARGET_I386)
528 /* restore flags in standard format */
529 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
530#else
531#error unsupported target CPU
532#endif
533#include "hostregs_helper.h"
534 return ret;
535}
536
537#else /* !VBOX */
538int cpu_exec(CPUState *env1)
539{
540#define DECLARE_HOST_REGS 1
541#include "hostregs_helper.h"
542 int ret, interrupt_request;
543 TranslationBlock *tb;
544 uint8_t *tc_ptr;
545 unsigned long next_tb;
546
547 if (cpu_halted(env1) == EXCP_HALTED)
548 return EXCP_HALTED;
549
550 cpu_single_env = env1;
551
552 /* first we save global registers */
553#define SAVE_HOST_REGS 1
554#include "hostregs_helper.h"
555 env = env1;
556
557 env_to_regs();
558#if defined(TARGET_I386)
559 /* put eflags in CPU temporary format */
560 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
561 DF = 1 - (2 * ((env->eflags >> 10) & 1));
562 CC_OP = CC_OP_EFLAGS;
563 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
564#elif defined(TARGET_SPARC)
565#elif defined(TARGET_M68K)
566 env->cc_op = CC_OP_FLAGS;
567 env->cc_dest = env->sr & 0xf;
568 env->cc_x = (env->sr >> 4) & 1;
569#elif defined(TARGET_ALPHA)
570#elif defined(TARGET_ARM)
571#elif defined(TARGET_PPC)
572#elif defined(TARGET_MIPS)
573#elif defined(TARGET_SH4)
574#elif defined(TARGET_CRIS)
575 /* XXXXX */
576#else
577#error unsupported target CPU
578#endif
579 env->exception_index = -1;
580
581 /* prepare setjmp context for exception handling */
582 for(;;) {
583 if (setjmp(env->jmp_env) == 0) {
584 env->current_tb = NULL;
585 /* if an exception is pending, we execute it here */
586 if (env->exception_index >= 0) {
587 if (env->exception_index >= EXCP_INTERRUPT) {
588 /* exit request from the cpu execution loop */
589 ret = env->exception_index;
590 if (ret == EXCP_DEBUG)
591 cpu_handle_debug_exception(env);
592 break;
593 } else {
594#if defined(CONFIG_USER_ONLY)
595 /* if user mode only, we simulate a fake exception
596 which will be handled outside the cpu execution
597 loop */
598#if defined(TARGET_I386)
599 do_interrupt_user(env->exception_index,
600 env->exception_is_int,
601 env->error_code,
602 env->exception_next_eip);
603 /* successfully delivered */
604 env->old_exception = -1;
605#endif
606 ret = env->exception_index;
607 break;
608#else
609#if defined(TARGET_I386)
610 /* simulate a real cpu exception. On i386, it can
611 trigger new exceptions, but we do not handle
612 double or triple faults yet. */
613 do_interrupt(env->exception_index,
614 env->exception_is_int,
615 env->error_code,
616 env->exception_next_eip, 0);
617 /* successfully delivered */
618 env->old_exception = -1;
619#elif defined(TARGET_PPC)
620 do_interrupt(env);
621#elif defined(TARGET_MIPS)
622 do_interrupt(env);
623#elif defined(TARGET_SPARC)
624 do_interrupt(env);
625#elif defined(TARGET_ARM)
626 do_interrupt(env);
627#elif defined(TARGET_SH4)
628 do_interrupt(env);
629#elif defined(TARGET_ALPHA)
630 do_interrupt(env);
631#elif defined(TARGET_CRIS)
632 do_interrupt(env);
633#elif defined(TARGET_M68K)
634 do_interrupt(0);
635#endif
636#endif
637 }
638 env->exception_index = -1;
639 }
640#ifdef USE_KQEMU
641 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
642 int ret;
643 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
644 ret = kqemu_cpu_exec(env);
645 /* put eflags in CPU temporary format */
646 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
647 DF = 1 - (2 * ((env->eflags >> 10) & 1));
648 CC_OP = CC_OP_EFLAGS;
649 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
650 if (ret == 1) {
651 /* exception */
652 longjmp(env->jmp_env, 1);
653 } else if (ret == 2) {
654 /* softmmu execution needed */
655 } else {
656 if (env->interrupt_request != 0) {
657 /* hardware interrupt will be executed just after */
658 } else {
659 /* otherwise, we restart */
660 longjmp(env->jmp_env, 1);
661 }
662 }
663 }
664#endif
665
666 if (kvm_enabled()) {
667 kvm_cpu_exec(env);
668 longjmp(env->jmp_env, 1);
669 }
670
671 next_tb = 0; /* force lookup of first TB */
672 for(;;) {
673 interrupt_request = env->interrupt_request;
674 if (unlikely(interrupt_request)) {
675 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
676 /* Mask out external interrupts for this step. */
677 interrupt_request &= ~(CPU_INTERRUPT_HARD |
678 CPU_INTERRUPT_FIQ |
679 CPU_INTERRUPT_SMI |
680 CPU_INTERRUPT_NMI);
681 }
682 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
683 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
684 env->exception_index = EXCP_DEBUG;
685 cpu_loop_exit();
686 }
687#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
688 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
689 if (interrupt_request & CPU_INTERRUPT_HALT) {
690 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
691 env->halted = 1;
692 env->exception_index = EXCP_HLT;
693 cpu_loop_exit();
694 }
695#endif
696#if defined(TARGET_I386)
697 if (env->hflags2 & HF2_GIF_MASK) {
698 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
699 !(env->hflags & HF_SMM_MASK)) {
700 svm_check_intercept(SVM_EXIT_SMI);
701 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
702 do_smm_enter();
703 next_tb = 0;
704 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
705 !(env->hflags2 & HF2_NMI_MASK)) {
706 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
707 env->hflags2 |= HF2_NMI_MASK;
708 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
709 next_tb = 0;
710 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
711 (((env->hflags2 & HF2_VINTR_MASK) &&
712 (env->hflags2 & HF2_HIF_MASK)) ||
713 (!(env->hflags2 & HF2_VINTR_MASK) &&
714 (env->eflags & IF_MASK &&
715 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
716 int intno;
717 svm_check_intercept(SVM_EXIT_INTR);
718 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
719 intno = cpu_get_pic_interrupt(env);
720 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
721 do_interrupt(intno, 0, 0, 0, 1);
722 /* ensure that no TB jump will be modified as
723 the program flow was changed */
724 next_tb = 0;
725#if !defined(CONFIG_USER_ONLY)
726 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
727 (env->eflags & IF_MASK) &&
728 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
729 int intno;
730 /* FIXME: this should respect TPR */
731 svm_check_intercept(SVM_EXIT_VINTR);
732 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
733 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
734 do_interrupt(intno, 0, 0, 0, 1);
735 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
736 next_tb = 0;
737#endif
738 }
739 }
740#elif defined(TARGET_PPC)
741#if 0
742 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
743 cpu_ppc_reset(env);
744 }
745#endif
746 if (interrupt_request & CPU_INTERRUPT_HARD) {
747 ppc_hw_interrupt(env);
748 if (env->pending_interrupts == 0)
749 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
750 next_tb = 0;
751 }
752#elif defined(TARGET_MIPS)
753 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
754 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
755 (env->CP0_Status & (1 << CP0St_IE)) &&
756 !(env->CP0_Status & (1 << CP0St_EXL)) &&
757 !(env->CP0_Status & (1 << CP0St_ERL)) &&
758 !(env->hflags & MIPS_HFLAG_DM)) {
759 /* Raise it */
760 env->exception_index = EXCP_EXT_INTERRUPT;
761 env->error_code = 0;
762 do_interrupt(env);
763 next_tb = 0;
764 }
765#elif defined(TARGET_SPARC)
766 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
767 (env->psret != 0)) {
768 int pil = env->interrupt_index & 15;
769 int type = env->interrupt_index & 0xf0;
770
771 if (((type == TT_EXTINT) &&
772 (pil == 15 || pil > env->psrpil)) ||
773 type != TT_EXTINT) {
774 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
775 env->exception_index = env->interrupt_index;
776 do_interrupt(env);
777 env->interrupt_index = 0;
778#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
779 cpu_check_irqs(env);
780#endif
781 next_tb = 0;
782 }
783 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
784 //do_interrupt(0, 0, 0, 0, 0);
785 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
786 }
787#elif defined(TARGET_ARM)
788 if (interrupt_request & CPU_INTERRUPT_FIQ
789 && !(env->uncached_cpsr & CPSR_F)) {
790 env->exception_index = EXCP_FIQ;
791 do_interrupt(env);
792 next_tb = 0;
793 }
794 /* ARMv7-M interrupt return works by loading a magic value
795 into the PC. On real hardware the load causes the
796 return to occur. The qemu implementation performs the
797 jump normally, then does the exception return when the
798 CPU tries to execute code at the magic address.
799 This will cause the magic PC value to be pushed to
800 the stack if an interrupt occured at the wrong time.
801 We avoid this by disabling interrupts when
802 pc contains a magic address. */
803 if (interrupt_request & CPU_INTERRUPT_HARD
804 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
805 || !(env->uncached_cpsr & CPSR_I))) {
806 env->exception_index = EXCP_IRQ;
807 do_interrupt(env);
808 next_tb = 0;
809 }
810#elif defined(TARGET_SH4)
811 if (interrupt_request & CPU_INTERRUPT_HARD) {
812 do_interrupt(env);
813 next_tb = 0;
814 }
815#elif defined(TARGET_ALPHA)
816 if (interrupt_request & CPU_INTERRUPT_HARD) {
817 do_interrupt(env);
818 next_tb = 0;
819 }
820#elif defined(TARGET_CRIS)
821 if (interrupt_request & CPU_INTERRUPT_HARD
822 && (env->pregs[PR_CCS] & I_FLAG)) {
823 env->exception_index = EXCP_IRQ;
824 do_interrupt(env);
825 next_tb = 0;
826 }
827 if (interrupt_request & CPU_INTERRUPT_NMI
828 && (env->pregs[PR_CCS] & M_FLAG)) {
829 env->exception_index = EXCP_NMI;
830 do_interrupt(env);
831 next_tb = 0;
832 }
833#elif defined(TARGET_M68K)
834 if (interrupt_request & CPU_INTERRUPT_HARD
835 && ((env->sr & SR_I) >> SR_I_SHIFT)
836 < env->pending_level) {
837 /* Real hardware gets the interrupt vector via an
838 IACK cycle at this point. Current emulated
839 hardware doesn't rely on this, so we
840 provide/save the vector when the interrupt is
841 first signalled. */
842 env->exception_index = env->pending_vector;
843 do_interrupt(1);
844 next_tb = 0;
845 }
846#endif
847 /* Don't use the cached interupt_request value,
848 do_interrupt may have updated the EXITTB flag. */
849 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
850 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
851 /* ensure that no TB jump will be modified as
852 the program flow was changed */
853 next_tb = 0;
854 }
855 if (interrupt_request & CPU_INTERRUPT_EXIT) {
856 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
857 env->exception_index = EXCP_INTERRUPT;
858 cpu_loop_exit();
859 }
860 }
861#ifdef DEBUG_EXEC
862 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
863 /* restore flags in standard format */
864 regs_to_env();
865#if defined(TARGET_I386)
866 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
867 log_cpu_state(env, X86_DUMP_CCOP);
868 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
869#elif defined(TARGET_ARM)
870 log_cpu_state(env, 0);
871#elif defined(TARGET_SPARC)
872 log_cpu_state(env, 0);
873#elif defined(TARGET_PPC)
874 log_cpu_state(env, 0);
875#elif defined(TARGET_M68K)
876 cpu_m68k_flush_flags(env, env->cc_op);
877 env->cc_op = CC_OP_FLAGS;
878 env->sr = (env->sr & 0xffe0)
879 | env->cc_dest | (env->cc_x << 4);
880 log_cpu_state(env, 0);
881#elif defined(TARGET_MIPS)
882 log_cpu_state(env, 0);
883#elif defined(TARGET_SH4)
884 log_cpu_state(env, 0);
885#elif defined(TARGET_ALPHA)
886 log_cpu_state(env, 0);
887#elif defined(TARGET_CRIS)
888 log_cpu_state(env, 0);
889#else
890#error unsupported target CPU
891#endif
892 }
893#endif
894 spin_lock(&tb_lock);
895 tb = tb_find_fast();
896 /* Note: we do it here to avoid a gcc bug on Mac OS X when
897 doing it in tb_find_slow */
898 if (tb_invalidated_flag) {
899 /* as some TB could have been invalidated because
900 of memory exceptions while generating the code, we
901 must recompute the hash index here */
902 next_tb = 0;
903 tb_invalidated_flag = 0;
904 }
905#ifdef DEBUG_EXEC
906 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
907 (long)tb->tc_ptr, tb->pc,
908 lookup_symbol(tb->pc));
909#endif
910 /* see if we can patch the calling TB. When the TB
911 spans two pages, we cannot safely do a direct
912 jump. */
913 {
914 if (next_tb != 0 &&
915#ifdef USE_KQEMU
916 (env->kqemu_enabled != 2) &&
917#endif
918 tb->page_addr[1] == -1) {
919 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
920 }
921 }
922 spin_unlock(&tb_lock);
923 env->current_tb = tb;
924
925 /* cpu_interrupt might be called while translating the
926 TB, but before it is linked into a potentially
927 infinite loop and becomes env->current_tb. Avoid
928 starting execution if there is a pending interrupt. */
929 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
930 env->current_tb = NULL;
931
932 while (env->current_tb) {
933 tc_ptr = tb->tc_ptr;
934 /* execute the generated code */
935#if defined(__sparc__) && !defined(HOST_SOLARIS)
936#undef env
937 env = cpu_single_env;
938#define env cpu_single_env
939#endif
940 next_tb = tcg_qemu_tb_exec(tc_ptr);
941 env->current_tb = NULL;
942 if ((next_tb & 3) == 2) {
943 /* Instruction counter expired. */
944 int insns_left;
945 tb = (TranslationBlock *)(long)(next_tb & ~3);
946 /* Restore PC. */
947 cpu_pc_from_tb(env, tb);
948 insns_left = env->icount_decr.u32;
949 if (env->icount_extra && insns_left >= 0) {
950 /* Refill decrementer and continue execution. */
951 env->icount_extra += insns_left;
952 if (env->icount_extra > 0xffff) {
953 insns_left = 0xffff;
954 } else {
955 insns_left = env->icount_extra;
956 }
957 env->icount_extra -= insns_left;
958 env->icount_decr.u16.low = insns_left;
959 } else {
960 if (insns_left > 0) {
961 /* Execute remaining instructions. */
962 cpu_exec_nocache(insns_left, tb);
963 }
964 env->exception_index = EXCP_INTERRUPT;
965 next_tb = 0;
966 cpu_loop_exit();
967 }
968 }
969 }
970 /* reset soft MMU for next block (it can currently
971 only be set by a memory fault) */
972#if defined(USE_KQEMU)
973#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
974 if (kqemu_is_ok(env) &&
975 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
976 cpu_loop_exit();
977 }
978#endif
979 } /* for(;;) */
980 } else {
981 env_to_regs();
982 }
983 } /* for(;;) */
984
985
986#if defined(TARGET_I386)
987 /* restore flags in standard format */
988 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
989#elif defined(TARGET_ARM)
990 /* XXX: Save/restore host fpu exception state?. */
991#elif defined(TARGET_SPARC)
992#elif defined(TARGET_PPC)
993#elif defined(TARGET_M68K)
994 cpu_m68k_flush_flags(env, env->cc_op);
995 env->cc_op = CC_OP_FLAGS;
996 env->sr = (env->sr & 0xffe0)
997 | env->cc_dest | (env->cc_x << 4);
998#elif defined(TARGET_MIPS)
999#elif defined(TARGET_SH4)
1000#elif defined(TARGET_ALPHA)
1001#elif defined(TARGET_CRIS)
1002 /* XXXXX */
1003#else
1004#error unsupported target CPU
1005#endif
1006
1007 /* restore global registers */
1008#include "hostregs_helper.h"
1009
1010 /* fail safe : never use cpu_single_env outside cpu_exec() */
1011 cpu_single_env = NULL;
1012 return ret;
1013}
1014
1015#endif /* !VBOX */
1016
1017/* must only be called from the generated code as an exception can be
1018 generated */
1019void tb_invalidate_page_range(target_ulong start, target_ulong end)
1020{
1021 /* XXX: cannot enable it yet because it yields to MMU exception
1022 where NIP != read address on PowerPC */
1023#if 0
1024 target_ulong phys_addr;
1025 phys_addr = get_phys_addr_code(env, start);
1026 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1027#endif
1028}
1029
1030#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1031
1032void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1033{
1034 CPUX86State *saved_env;
1035
1036 saved_env = env;
1037 env = s;
1038 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1039 selector &= 0xffff;
1040 cpu_x86_load_seg_cache(env, seg_reg, selector,
1041 (selector << 4), 0xffff, 0);
1042 } else {
1043 helper_load_seg(seg_reg, selector);
1044 }
1045 env = saved_env;
1046}
1047
1048void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
1049{
1050 CPUX86State *saved_env;
1051
1052 saved_env = env;
1053 env = s;
1054
1055 helper_fsave(ptr, data32);
1056
1057 env = saved_env;
1058}
1059
1060void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
1061{
1062 CPUX86State *saved_env;
1063
1064 saved_env = env;
1065 env = s;
1066
1067 helper_frstor(ptr, data32);
1068
1069 env = saved_env;
1070}
1071
1072#endif /* TARGET_I386 */
1073
1074#if !defined(CONFIG_SOFTMMU)
1075
1076#if defined(TARGET_I386)
1077
1078/* 'pc' is the host PC at which the exception was raised. 'address' is
1079 the effective address of the memory exception. 'is_write' is 1 if a
1080 write caused the exception and otherwise 0'. 'old_set' is the
1081 signal set which should be restored */
1082static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1083 int is_write, sigset_t *old_set,
1084 void *puc)
1085{
1086 TranslationBlock *tb;
1087 int ret;
1088
1089 if (cpu_single_env)
1090 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1091#if defined(DEBUG_SIGNAL)
1092 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1093 pc, address, is_write, *(unsigned long *)old_set);
1094#endif
1095 /* XXX: locking issue */
1096 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1097 return 1;
1098 }
1099
1100 /* see if it is an MMU fault */
1101 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1102 if (ret < 0)
1103 return 0; /* not an MMU fault */
1104 if (ret == 0)
1105 return 1; /* the MMU fault was handled without causing real CPU fault */
1106 /* now we have a real cpu fault */
1107 tb = tb_find_pc(pc);
1108 if (tb) {
1109 /* the PC is inside the translated code. It means that we have
1110 a virtual CPU fault */
1111 cpu_restore_state(tb, env, pc, puc);
1112 }
1113 if (ret == 1) {
1114#if 0
1115 printf("PF exception: EIP=0x%RGv CR2=0x%RGv error=0x%x\n",
1116 env->eip, env->cr[2], env->error_code);
1117#endif
1118 /* we restore the process signal mask as the sigreturn should
1119 do it (XXX: use sigsetjmp) */
1120 sigprocmask(SIG_SETMASK, old_set, NULL);
1121 raise_exception_err(env->exception_index, env->error_code);
1122 } else {
1123 /* activate soft MMU for this block */
1124 env->hflags |= HF_SOFTMMU_MASK;
1125 cpu_resume_from_signal(env, puc);
1126 }
1127 /* never comes here */
1128 return 1;
1129}
1130
1131#elif defined(TARGET_ARM)
1132static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1133 int is_write, sigset_t *old_set,
1134 void *puc)
1135{
1136 TranslationBlock *tb;
1137 int ret;
1138
1139 if (cpu_single_env)
1140 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1141#if defined(DEBUG_SIGNAL)
1142 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1143 pc, address, is_write, *(unsigned long *)old_set);
1144#endif
1145 /* XXX: locking issue */
1146 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1147 return 1;
1148 }
1149 /* see if it is an MMU fault */
1150 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1151 if (ret < 0)
1152 return 0; /* not an MMU fault */
1153 if (ret == 0)
1154 return 1; /* the MMU fault was handled without causing real CPU fault */
1155 /* now we have a real cpu fault */
1156 tb = tb_find_pc(pc);
1157 if (tb) {
1158 /* the PC is inside the translated code. It means that we have
1159 a virtual CPU fault */
1160 cpu_restore_state(tb, env, pc, puc);
1161 }
1162 /* we restore the process signal mask as the sigreturn should
1163 do it (XXX: use sigsetjmp) */
1164 sigprocmask(SIG_SETMASK, old_set, NULL);
1165 cpu_loop_exit();
1166 /* never comes here */
1167 return 1;
1168}
1169#elif defined(TARGET_SPARC)
1170static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1171 int is_write, sigset_t *old_set,
1172 void *puc)
1173{
1174 TranslationBlock *tb;
1175 int ret;
1176
1177 if (cpu_single_env)
1178 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1179#if defined(DEBUG_SIGNAL)
1180 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1181 pc, address, is_write, *(unsigned long *)old_set);
1182#endif
1183 /* XXX: locking issue */
1184 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1185 return 1;
1186 }
1187 /* see if it is an MMU fault */
1188 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1189 if (ret < 0)
1190 return 0; /* not an MMU fault */
1191 if (ret == 0)
1192 return 1; /* the MMU fault was handled without causing real CPU fault */
1193 /* now we have a real cpu fault */
1194 tb = tb_find_pc(pc);
1195 if (tb) {
1196 /* the PC is inside the translated code. It means that we have
1197 a virtual CPU fault */
1198 cpu_restore_state(tb, env, pc, puc);
1199 }
1200 /* we restore the process signal mask as the sigreturn should
1201 do it (XXX: use sigsetjmp) */
1202 sigprocmask(SIG_SETMASK, old_set, NULL);
1203 cpu_loop_exit();
1204 /* never comes here */
1205 return 1;
1206}
1207#elif defined (TARGET_PPC)
1208static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1209 int is_write, sigset_t *old_set,
1210 void *puc)
1211{
1212 TranslationBlock *tb;
1213 int ret;
1214
1215 if (cpu_single_env)
1216 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1217#if defined(DEBUG_SIGNAL)
1218 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1219 pc, address, is_write, *(unsigned long *)old_set);
1220#endif
1221 /* XXX: locking issue */
1222 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1223 return 1;
1224 }
1225
1226 /* see if it is an MMU fault */
1227 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1228 if (ret < 0)
1229 return 0; /* not an MMU fault */
1230 if (ret == 0)
1231 return 1; /* the MMU fault was handled without causing real CPU fault */
1232
1233 /* now we have a real cpu fault */
1234 tb = tb_find_pc(pc);
1235 if (tb) {
1236 /* the PC is inside the translated code. It means that we have
1237 a virtual CPU fault */
1238 cpu_restore_state(tb, env, pc, puc);
1239 }
1240 if (ret == 1) {
1241#if 0
1242 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1243 env->nip, env->error_code, tb);
1244#endif
1245 /* we restore the process signal mask as the sigreturn should
1246 do it (XXX: use sigsetjmp) */
1247 sigprocmask(SIG_SETMASK, old_set, NULL);
1248 cpu_loop_exit();
1249 } else {
1250 /* activate soft MMU for this block */
1251 cpu_resume_from_signal(env, puc);
1252 }
1253 /* never comes here */
1254 return 1;
1255}
1256
1257#elif defined(TARGET_M68K)
1258static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1259 int is_write, sigset_t *old_set,
1260 void *puc)
1261{
1262 TranslationBlock *tb;
1263 int ret;
1264
1265 if (cpu_single_env)
1266 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1267#if defined(DEBUG_SIGNAL)
1268 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1269 pc, address, is_write, *(unsigned long *)old_set);
1270#endif
1271 /* XXX: locking issue */
1272 if (is_write && page_unprotect(address, pc, puc)) {
1273 return 1;
1274 }
1275 /* see if it is an MMU fault */
1276 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1277 if (ret < 0)
1278 return 0; /* not an MMU fault */
1279 if (ret == 0)
1280 return 1; /* the MMU fault was handled without causing real CPU fault */
1281 /* now we have a real cpu fault */
1282 tb = tb_find_pc(pc);
1283 if (tb) {
1284 /* the PC is inside the translated code. It means that we have
1285 a virtual CPU fault */
1286 cpu_restore_state(tb, env, pc, puc);
1287 }
1288 /* we restore the process signal mask as the sigreturn should
1289 do it (XXX: use sigsetjmp) */
1290 sigprocmask(SIG_SETMASK, old_set, NULL);
1291 cpu_loop_exit();
1292 /* never comes here */
1293 return 1;
1294}
1295
1296#elif defined (TARGET_MIPS)
1297static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1298 int is_write, sigset_t *old_set,
1299 void *puc)
1300{
1301 TranslationBlock *tb;
1302 int ret;
1303
1304 if (cpu_single_env)
1305 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1306#if defined(DEBUG_SIGNAL)
1307 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1308 pc, address, is_write, *(unsigned long *)old_set);
1309#endif
1310 /* XXX: locking issue */
1311 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1312 return 1;
1313 }
1314
1315 /* see if it is an MMU fault */
1316 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1317 if (ret < 0)
1318 return 0; /* not an MMU fault */
1319 if (ret == 0)
1320 return 1; /* the MMU fault was handled without causing real CPU fault */
1321
1322 /* now we have a real cpu fault */
1323 tb = tb_find_pc(pc);
1324 if (tb) {
1325 /* the PC is inside the translated code. It means that we have
1326 a virtual CPU fault */
1327 cpu_restore_state(tb, env, pc, puc);
1328 }
1329 if (ret == 1) {
1330#if 0
1331 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1332 env->PC, env->error_code, tb);
1333#endif
1334 /* we restore the process signal mask as the sigreturn should
1335 do it (XXX: use sigsetjmp) */
1336 sigprocmask(SIG_SETMASK, old_set, NULL);
1337 cpu_loop_exit();
1338 } else {
1339 /* activate soft MMU for this block */
1340 cpu_resume_from_signal(env, puc);
1341 }
1342 /* never comes here */
1343 return 1;
1344}
1345
1346#elif defined (TARGET_SH4)
1347static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1348 int is_write, sigset_t *old_set,
1349 void *puc)
1350{
1351 TranslationBlock *tb;
1352 int ret;
1353
1354 if (cpu_single_env)
1355 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1356#if defined(DEBUG_SIGNAL)
1357 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1358 pc, address, is_write, *(unsigned long *)old_set);
1359#endif
1360 /* XXX: locking issue */
1361 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1362 return 1;
1363 }
1364
1365 /* see if it is an MMU fault */
1366 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1367 if (ret < 0)
1368 return 0; /* not an MMU fault */
1369 if (ret == 0)
1370 return 1; /* the MMU fault was handled without causing real CPU fault */
1371
1372 /* now we have a real cpu fault */
1373 tb = tb_find_pc(pc);
1374 if (tb) {
1375 /* the PC is inside the translated code. It means that we have
1376 a virtual CPU fault */
1377 cpu_restore_state(tb, env, pc, puc);
1378 }
1379#if 0
1380 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1381 env->nip, env->error_code, tb);
1382#endif
1383 /* we restore the process signal mask as the sigreturn should
1384 do it (XXX: use sigsetjmp) */
1385 sigprocmask(SIG_SETMASK, old_set, NULL);
1386 cpu_loop_exit();
1387 /* never comes here */
1388 return 1;
1389}
1390
1391#elif defined (TARGET_ALPHA)
1392static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1393 int is_write, sigset_t *old_set,
1394 void *puc)
1395{
1396 TranslationBlock *tb;
1397 int ret;
1398
1399 if (cpu_single_env)
1400 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1401#if defined(DEBUG_SIGNAL)
1402 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1403 pc, address, is_write, *(unsigned long *)old_set);
1404#endif
1405 /* XXX: locking issue */
1406 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1407 return 1;
1408 }
1409
1410 /* see if it is an MMU fault */
1411 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1412 if (ret < 0)
1413 return 0; /* not an MMU fault */
1414 if (ret == 0)
1415 return 1; /* the MMU fault was handled without causing real CPU fault */
1416
1417 /* now we have a real cpu fault */
1418 tb = tb_find_pc(pc);
1419 if (tb) {
1420 /* the PC is inside the translated code. It means that we have
1421 a virtual CPU fault */
1422 cpu_restore_state(tb, env, pc, puc);
1423 }
1424#if 0
1425 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1426 env->nip, env->error_code, tb);
1427#endif
1428 /* we restore the process signal mask as the sigreturn should
1429 do it (XXX: use sigsetjmp) */
1430 sigprocmask(SIG_SETMASK, old_set, NULL);
1431 cpu_loop_exit();
1432 /* never comes here */
1433 return 1;
1434}
1435#elif defined (TARGET_CRIS)
1436static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1437 int is_write, sigset_t *old_set,
1438 void *puc)
1439{
1440 TranslationBlock *tb;
1441 int ret;
1442
1443 if (cpu_single_env)
1444 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1445#if defined(DEBUG_SIGNAL)
1446 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1447 pc, address, is_write, *(unsigned long *)old_set);
1448#endif
1449 /* XXX: locking issue */
1450 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1451 return 1;
1452 }
1453
1454 /* see if it is an MMU fault */
1455 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1456 if (ret < 0)
1457 return 0; /* not an MMU fault */
1458 if (ret == 0)
1459 return 1; /* the MMU fault was handled without causing real CPU fault */
1460
1461 /* now we have a real cpu fault */
1462 tb = tb_find_pc(pc);
1463 if (tb) {
1464 /* the PC is inside the translated code. It means that we have
1465 a virtual CPU fault */
1466 cpu_restore_state(tb, env, pc, puc);
1467 }
1468 /* we restore the process signal mask as the sigreturn should
1469 do it (XXX: use sigsetjmp) */
1470 sigprocmask(SIG_SETMASK, old_set, NULL);
1471 cpu_loop_exit();
1472 /* never comes here */
1473 return 1;
1474}
1475
1476#else
1477#error unsupported target CPU
1478#endif
1479
1480#if defined(__i386__)
1481
1482#if defined(__APPLE__)
1483# include <sys/ucontext.h>
1484
1485# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1486# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1487# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1488#else
1489# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1490# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1491# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1492#endif
1493
1494int cpu_signal_handler(int host_signum, void *pinfo,
1495 void *puc)
1496{
1497 siginfo_t *info = pinfo;
1498 struct ucontext *uc = puc;
1499 unsigned long pc;
1500 int trapno;
1501
1502#ifndef REG_EIP
1503/* for glibc 2.1 */
1504#define REG_EIP EIP
1505#define REG_ERR ERR
1506#define REG_TRAPNO TRAPNO
1507#endif
1508 pc = EIP_sig(uc);
1509 trapno = TRAP_sig(uc);
1510 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1511 trapno == 0xe ?
1512 (ERROR_sig(uc) >> 1) & 1 : 0,
1513 &uc->uc_sigmask, puc);
1514}
1515
1516#elif defined(__x86_64__)
1517
1518#ifdef __NetBSD__
1519#define REG_ERR _REG_ERR
1520#define REG_TRAPNO _REG_TRAPNO
1521
1522#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1523#define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1524#else
1525#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1526#define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1527#endif
1528
1529int cpu_signal_handler(int host_signum, void *pinfo,
1530 void *puc)
1531{
1532 siginfo_t *info = pinfo;
1533 unsigned long pc;
1534#ifdef __NetBSD__
1535 ucontext_t *uc = puc;
1536#else
1537 struct ucontext *uc = puc;
1538#endif
1539
1540 pc = QEMU_UC_MACHINE_PC(uc);
1541 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1542 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
1543 (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
1544 &uc->uc_sigmask, puc);
1545}
1546
1547#elif defined(_ARCH_PPC)
1548
1549/***********************************************************************
1550 * signal context platform-specific definitions
1551 * From Wine
1552 */
1553#ifdef linux
1554/* All Registers access - only for local access */
1555# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1556/* Gpr Registers access */
1557# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1558# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1559# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1560# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1561# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1562# define LR_sig(context) REG_sig(link, context) /* Link register */
1563# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1564/* Float Registers access */
1565# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1566# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1567/* Exception Registers access */
1568# define DAR_sig(context) REG_sig(dar, context)
1569# define DSISR_sig(context) REG_sig(dsisr, context)
1570# define TRAP_sig(context) REG_sig(trap, context)
1571#endif /* linux */
1572
1573#ifdef __APPLE__
1574# include <sys/ucontext.h>
1575typedef struct ucontext SIGCONTEXT;
1576/* All Registers access - only for local access */
1577# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1578# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1579# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1580# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1581/* Gpr Registers access */
1582# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1583# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1584# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1585# define CTR_sig(context) REG_sig(ctr, context)
1586# define XER_sig(context) REG_sig(xer, context) /* Link register */
1587# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1588# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1589/* Float Registers access */
1590# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1591# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1592/* Exception Registers access */
1593# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1594# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1595# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1596#endif /* __APPLE__ */
1597
1598int cpu_signal_handler(int host_signum, void *pinfo,
1599 void *puc)
1600{
1601 siginfo_t *info = pinfo;
1602 struct ucontext *uc = puc;
1603 unsigned long pc;
1604 int is_write;
1605
1606 pc = IAR_sig(uc);
1607 is_write = 0;
1608#if 0
1609 /* ppc 4xx case */
1610 if (DSISR_sig(uc) & 0x00800000)
1611 is_write = 1;
1612#else
1613 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1614 is_write = 1;
1615#endif
1616 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1617 is_write, &uc->uc_sigmask, puc);
1618}
1619
1620#elif defined(__alpha__)
1621
1622int cpu_signal_handler(int host_signum, void *pinfo,
1623 void *puc)
1624{
1625 siginfo_t *info = pinfo;
1626 struct ucontext *uc = puc;
1627 uint32_t *pc = uc->uc_mcontext.sc_pc;
1628 uint32_t insn = *pc;
1629 int is_write = 0;
1630
1631 /* XXX: need kernel patch to get write flag faster */
1632 switch (insn >> 26) {
1633 case 0x0d: // stw
1634 case 0x0e: // stb
1635 case 0x0f: // stq_u
1636 case 0x24: // stf
1637 case 0x25: // stg
1638 case 0x26: // sts
1639 case 0x27: // stt
1640 case 0x2c: // stl
1641 case 0x2d: // stq
1642 case 0x2e: // stl_c
1643 case 0x2f: // stq_c
1644 is_write = 1;
1645 }
1646
1647 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1648 is_write, &uc->uc_sigmask, puc);
1649}
1650#elif defined(__sparc__)
1651
1652int cpu_signal_handler(int host_signum, void *pinfo,
1653 void *puc)
1654{
1655 siginfo_t *info = pinfo;
1656 int is_write;
1657 uint32_t insn;
1658#if !defined(__arch64__) || defined(HOST_SOLARIS)
1659 uint32_t *regs = (uint32_t *)(info + 1);
1660 void *sigmask = (regs + 20);
1661 /* XXX: is there a standard glibc define ? */
1662 unsigned long pc = regs[1];
1663#else
1664#ifdef __linux__
1665 struct sigcontext *sc = puc;
1666 unsigned long pc = sc->sigc_regs.tpc;
1667 void *sigmask = (void *)sc->sigc_mask;
1668#elif defined(__OpenBSD__)
1669 struct sigcontext *uc = puc;
1670 unsigned long pc = uc->sc_pc;
1671 void *sigmask = (void *)(long)uc->sc_mask;
1672#endif
1673#endif
1674
1675 /* XXX: need kernel patch to get write flag faster */
1676 is_write = 0;
1677 insn = *(uint32_t *)pc;
1678 if ((insn >> 30) == 3) {
1679 switch((insn >> 19) & 0x3f) {
1680 case 0x05: // stb
1681 case 0x06: // sth
1682 case 0x04: // st
1683 case 0x07: // std
1684 case 0x24: // stf
1685 case 0x27: // stdf
1686 case 0x25: // stfsr
1687 is_write = 1;
1688 break;
1689 }
1690 }
1691 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1692 is_write, sigmask, NULL);
1693}
1694
1695#elif defined(__arm__)
1696
1697int cpu_signal_handler(int host_signum, void *pinfo,
1698 void *puc)
1699{
1700 siginfo_t *info = pinfo;
1701 struct ucontext *uc = puc;
1702 unsigned long pc;
1703 int is_write;
1704
1705#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1706 pc = uc->uc_mcontext.gregs[R15];
1707#else
1708 pc = uc->uc_mcontext.arm_pc;
1709#endif
1710 /* XXX: compute is_write */
1711 is_write = 0;
1712 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1713 is_write,
1714 &uc->uc_sigmask, puc);
1715}
1716
1717#elif defined(__mc68000)
1718
1719int cpu_signal_handler(int host_signum, void *pinfo,
1720 void *puc)
1721{
1722 siginfo_t *info = pinfo;
1723 struct ucontext *uc = puc;
1724 unsigned long pc;
1725 int is_write;
1726
1727 pc = uc->uc_mcontext.gregs[16];
1728 /* XXX: compute is_write */
1729 is_write = 0;
1730 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1731 is_write,
1732 &uc->uc_sigmask, puc);
1733}
1734
1735#elif defined(__ia64)
1736
1737#ifndef __ISR_VALID
1738 /* This ought to be in <bits/siginfo.h>... */
1739# define __ISR_VALID 1
1740#endif
1741
1742int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1743{
1744 siginfo_t *info = pinfo;
1745 struct ucontext *uc = puc;
1746 unsigned long ip;
1747 int is_write = 0;
1748
1749 ip = uc->uc_mcontext.sc_ip;
1750 switch (host_signum) {
1751 case SIGILL:
1752 case SIGFPE:
1753 case SIGSEGV:
1754 case SIGBUS:
1755 case SIGTRAP:
1756 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1757 /* ISR.W (write-access) is bit 33: */
1758 is_write = (info->si_isr >> 33) & 1;
1759 break;
1760
1761 default:
1762 break;
1763 }
1764 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1765 is_write,
1766 &uc->uc_sigmask, puc);
1767}
1768
1769#elif defined(__s390__)
1770
1771int cpu_signal_handler(int host_signum, void *pinfo,
1772 void *puc)
1773{
1774 siginfo_t *info = pinfo;
1775 struct ucontext *uc = puc;
1776 unsigned long pc;
1777 int is_write;
1778
1779 pc = uc->uc_mcontext.psw.addr;
1780 /* XXX: compute is_write */
1781 is_write = 0;
1782 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1783 is_write, &uc->uc_sigmask, puc);
1784}
1785
1786#elif defined(__mips__)
1787
1788int cpu_signal_handler(int host_signum, void *pinfo,
1789 void *puc)
1790{
1791 siginfo_t *info = pinfo;
1792 struct ucontext *uc = puc;
1793 greg_t pc = uc->uc_mcontext.pc;
1794 int is_write;
1795
1796 /* XXX: compute is_write */
1797 is_write = 0;
1798 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1799 is_write, &uc->uc_sigmask, puc);
1800}
1801
1802#elif defined(__hppa__)
1803
1804int cpu_signal_handler(int host_signum, void *pinfo,
1805 void *puc)
1806{
1807 struct siginfo *info = pinfo;
1808 struct ucontext *uc = puc;
1809 unsigned long pc;
1810 int is_write;
1811
1812 pc = uc->uc_mcontext.sc_iaoq[0];
1813 /* FIXME: compute is_write */
1814 is_write = 0;
1815 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1816 is_write,
1817 &uc->uc_sigmask, puc);
1818}
1819
1820#else
1821
1822#error host CPU specific signal handler needed
1823
1824#endif
1825
1826#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette