VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 34045

Last change on this file since 34045 was 33656, checked in by vboxsync, 14 years ago

*: rebrand Sun (L)GPL disclaimers

  • Property svn:eol-style set to native
File size: 61.0 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include "config.h"
31#define CPU_NO_GLOBAL_REGS
32#include "exec.h"
33#include "disas.h"
34#include "tcg.h"
35
36#if !defined(CONFIG_SOFTMMU)
37#undef EAX
38#undef ECX
39#undef EDX
40#undef EBX
41#undef ESP
42#undef EBP
43#undef ESI
44#undef EDI
45#undef EIP
46#include <signal.h>
47#include <sys/ucontext.h>
48#endif
49
50#if defined(__sparc__) && !defined(HOST_SOLARIS)
51// Work around ugly bugs in glibc that mangle global register contents
52#undef env
53#define env cpu_single_env
54#endif
55
56int tb_invalidated_flag;
57
58//#define DEBUG_EXEC
59//#define DEBUG_SIGNAL
60
61
62void cpu_loop_exit(void)
63{
64 /* NOTE: the register at this point must be saved by hand because
65 longjmp restore them */
66 regs_to_env();
67 longjmp(env->jmp_env, 1);
68}
69
70#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
71#define reg_T2
72#endif
73
74/* exit the current TB from a signal handler. The host registers are
75 restored in a state compatible with the CPU emulator
76 */
77void cpu_resume_from_signal(CPUState *env1, void *puc)
78{
79#if !defined(CONFIG_SOFTMMU)
80 struct ucontext *uc = puc;
81#endif
82
83 env = env1;
84
85 /* XXX: restore cpu registers saved in host registers */
86
87#if !defined(CONFIG_SOFTMMU)
88 if (puc) {
89 /* XXX: use siglongjmp ? */
90 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
91 }
92#endif
93 longjmp(env->jmp_env, 1);
94}
95
96/* Execute the code without caching the generated code. An interpreter
97 could be used if available. */
98static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
99{
100 unsigned long next_tb;
101 TranslationBlock *tb;
102
103 /* Should never happen.
104 We only end up here when an existing TB is too long. */
105 if (max_cycles > CF_COUNT_MASK)
106 max_cycles = CF_COUNT_MASK;
107
108 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109 max_cycles);
110 env->current_tb = tb;
111 /* execute the generated code */
112#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
113 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
114#else
115 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
116#endif
117
118 if ((next_tb & 3) == 2) {
119 /* Restore PC. This may happen if async event occurs before
120 the TB starts executing. */
121 CPU_PC_FROM_TB(env, tb);
122 }
123 tb_phys_invalidate(tb, -1);
124 tb_free(tb);
125}
126
127static TranslationBlock *tb_find_slow(target_ulong pc,
128 target_ulong cs_base,
129 uint64_t flags)
130{
131 TranslationBlock *tb, **ptb1;
132 unsigned int h;
133 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
134
135 tb_invalidated_flag = 0;
136
137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
138
139 /* find translated block using physical mappings */
140 phys_pc = get_phys_addr_code(env, pc);
141 phys_page1 = phys_pc & TARGET_PAGE_MASK;
142 phys_page2 = -1;
143 h = tb_phys_hash_func(phys_pc);
144 ptb1 = &tb_phys_hash[h];
145 for(;;) {
146 tb = *ptb1;
147 if (!tb)
148 goto not_found;
149 if (tb->pc == pc &&
150 tb->page_addr[0] == phys_page1 &&
151 tb->cs_base == cs_base &&
152 tb->flags == flags) {
153 /* check next page if needed */
154 if (tb->page_addr[1] != -1) {
155 virt_page2 = (pc & TARGET_PAGE_MASK) +
156 TARGET_PAGE_SIZE;
157 phys_page2 = get_phys_addr_code(env, virt_page2);
158 if (tb->page_addr[1] == phys_page2)
159 goto found;
160 } else {
161 goto found;
162 }
163 }
164 ptb1 = &tb->phys_hash_next;
165 }
166 not_found:
167 /* if no translated code available, then translate it now */
168 tb = tb_gen_code(env, pc, cs_base, flags, 0);
169
170 found:
171 /* we add the TB in the virtual pc hash table */
172 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
173 return tb;
174}
175
176#ifndef VBOX
177static inline TranslationBlock *tb_find_fast(void)
178#else
179DECLINLINE(TranslationBlock *) tb_find_fast(void)
180#endif
181{
182 TranslationBlock *tb;
183 target_ulong cs_base, pc;
184 uint64_t flags;
185
186 /* we record a subset of the CPU state. It will
187 always be the same before a given translated block
188 is executed. */
189#if defined(TARGET_I386)
190 flags = env->hflags;
191 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
192 cs_base = env->segs[R_CS].base;
193 pc = cs_base + env->eip;
194#elif defined(TARGET_ARM)
195 flags = env->thumb | (env->vfp.vec_len << 1)
196 | (env->vfp.vec_stride << 4);
197 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
198 flags |= (1 << 6);
199 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
200 flags |= (1 << 7);
201 flags |= (env->condexec_bits << 8);
202 cs_base = 0;
203 pc = env->regs[15];
204#elif defined(TARGET_SPARC)
205#ifdef TARGET_SPARC64
206 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
207 flags = ((env->pstate & PS_AM) << 2)
208 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
209 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
210#else
211 // FPU enable . Supervisor
212 flags = (env->psref << 4) | env->psrs;
213#endif
214 cs_base = env->npc;
215 pc = env->pc;
216#elif defined(TARGET_PPC)
217 flags = env->hflags;
218 cs_base = 0;
219 pc = env->nip;
220#elif defined(TARGET_MIPS)
221 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
222 cs_base = 0;
223 pc = env->active_tc.PC;
224#elif defined(TARGET_M68K)
225 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
226 | (env->sr & SR_S) /* Bit 13 */
227 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
228 cs_base = 0;
229 pc = env->pc;
230#elif defined(TARGET_SH4)
231 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
232 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
233 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
234 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
235 cs_base = 0;
236 pc = env->pc;
237#elif defined(TARGET_ALPHA)
238 flags = env->ps;
239 cs_base = 0;
240 pc = env->pc;
241#elif defined(TARGET_CRIS)
242 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
243 flags |= env->dslot;
244 cs_base = 0;
245 pc = env->pc;
246#else
247#error unsupported CPU
248#endif
249 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
250 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
251 tb->flags != flags)) {
252 tb = tb_find_slow(pc, cs_base, flags);
253 }
254 return tb;
255}
256
257/* main execution loop */
258
259#ifdef VBOX
260
261int cpu_exec(CPUState *env1)
262{
263#define DECLARE_HOST_REGS 1
264#include "hostregs_helper.h"
265 int ret = 0, interrupt_request;
266 TranslationBlock *tb;
267 uint8_t *tc_ptr;
268 unsigned long next_tb;
269
270 cpu_single_env = env1;
271
272 /* first we save global registers */
273#define SAVE_HOST_REGS 1
274#include "hostregs_helper.h"
275 env = env1;
276
277 env_to_regs();
278#if defined(TARGET_I386)
279 /* put eflags in CPU temporary format */
280 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
281 DF = 1 - (2 * ((env->eflags >> 10) & 1));
282 CC_OP = CC_OP_EFLAGS;
283 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
284#elif defined(TARGET_SPARC)
285#elif defined(TARGET_M68K)
286 env->cc_op = CC_OP_FLAGS;
287 env->cc_dest = env->sr & 0xf;
288 env->cc_x = (env->sr >> 4) & 1;
289#elif defined(TARGET_ALPHA)
290#elif defined(TARGET_ARM)
291#elif defined(TARGET_PPC)
292#elif defined(TARGET_MIPS)
293#elif defined(TARGET_SH4)
294#elif defined(TARGET_CRIS)
295 /* XXXXX */
296#else
297#error unsupported target CPU
298#endif
299#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
300 env->exception_index = -1;
301#endif
302
303 /* prepare setjmp context for exception handling */
304 for(;;) {
305 if (setjmp(env->jmp_env) == 0)
306 {
307 env->current_tb = NULL;
308
309 /*
310 * Check for fatal errors first
311 */
312 if (env->interrupt_request & CPU_INTERRUPT_RC) {
313 env->exception_index = EXCP_RC;
314 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
315 ret = env->exception_index;
316 cpu_loop_exit();
317 }
318
319 /* if an exception is pending, we execute it here */
320 if (env->exception_index >= 0) {
321 Assert(!env->user_mode_only);
322 if (env->exception_index >= EXCP_INTERRUPT) {
323 /* exit request from the cpu execution loop */
324 ret = env->exception_index;
325 break;
326 } else {
327 /* simulate a real cpu exception. On i386, it can
328 trigger new exceptions, but we do not handle
329 double or triple faults yet. */
330 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
331 Log(("do_interrupt %d %d %RGv\n", env->exception_index, env->exception_is_int, (RTGCPTR)env->exception_next_eip));
332 do_interrupt(env->exception_index,
333 env->exception_is_int,
334 env->error_code,
335 env->exception_next_eip, 0);
336 /* successfully delivered */
337 env->old_exception = -1;
338 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
339 }
340 env->exception_index = -1;
341 }
342
343 next_tb = 0; /* force lookup of first TB */
344 for(;;)
345 {
346 interrupt_request = env->interrupt_request;
347#ifndef VBOX
348 if (__builtin_expect(interrupt_request, 0))
349#else
350 if (RT_UNLIKELY(interrupt_request != 0))
351#endif
352 {
353 /** @todo: reconcile with what QEMU really does */
354
355 /* Single instruction exec request, we execute it and return (one way or the other).
356 The caller will always reschedule after doing this operation! */
357 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
358 {
359 /* not in flight are we? (if we are, we trapped) */
360 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
361 {
362 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
363 env->exception_index = EXCP_SINGLE_INSTR;
364 if (emulate_single_instr(env) == -1)
365 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip));
366
367 /* When we receive an external interrupt during execution of this single
368 instruction, then we should stay here. We will leave when we're ready
369 for raw-mode or when interrupted by pending EMT requests. */
370 interrupt_request = env->interrupt_request; /* reload this! */
371 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
372 || !(env->eflags & IF_MASK)
373 || (env->hflags & HF_INHIBIT_IRQ_MASK)
374 || (env->state & CPU_RAW_HWACC)
375 )
376 {
377 env->exception_index = ret = EXCP_SINGLE_INSTR;
378 cpu_loop_exit();
379 }
380 }
381 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
382 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
383 }
384
385 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
386 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
387 !(env->hflags & HF_SMM_MASK)) {
388 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
389 do_smm_enter();
390 next_tb = 0;
391 }
392 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
393 (env->eflags & IF_MASK) &&
394 !(env->hflags & HF_INHIBIT_IRQ_MASK))
395 {
396 /* if hardware interrupt pending, we execute it */
397 int intno;
398 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
399 intno = cpu_get_pic_interrupt(env);
400 if (intno >= 0)
401 {
402 Log(("do_interrupt %d\n", intno));
403 do_interrupt(intno, 0, 0, 0, 1);
404 }
405 /* ensure that no TB jump will be modified as
406 the program flow was changed */
407 next_tb = 0;
408 }
409 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
410 {
411 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
412 /* ensure that no TB jump will be modified as
413 the program flow was changed */
414 next_tb = 0;
415 }
416 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
417 if (interrupt_request & CPU_INTERRUPT_EXIT)
418 {
419 env->exception_index = EXCP_INTERRUPT;
420 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
421 ret = env->exception_index;
422 cpu_loop_exit();
423 }
424 if (interrupt_request & CPU_INTERRUPT_RC)
425 {
426 env->exception_index = EXCP_RC;
427 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
428 ret = env->exception_index;
429 cpu_loop_exit();
430 }
431 }
432
433 /*
434 * Check if we the CPU state allows us to execute the code in raw-mode.
435 */
436 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
437 if (remR3CanExecuteRaw(env,
438 env->eip + env->segs[R_CS].base,
439 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
440 &env->exception_index))
441 {
442 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
443 ret = env->exception_index;
444 cpu_loop_exit();
445 }
446 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
447
448 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
449 spin_lock(&tb_lock);
450 tb = tb_find_fast();
451 /* Note: we do it here to avoid a gcc bug on Mac OS X when
452 doing it in tb_find_slow */
453 if (tb_invalidated_flag) {
454 /* as some TB could have been invalidated because
455 of memory exceptions while generating the code, we
456 must recompute the hash index here */
457 next_tb = 0;
458 tb_invalidated_flag = 0;
459 }
460
461 /* see if we can patch the calling TB. When the TB
462 spans two pages, we cannot safely do a direct
463 jump. */
464 if (next_tb != 0
465 && !(tb->cflags & CF_RAW_MODE)
466 && tb->page_addr[1] == -1)
467 {
468 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
469 }
470 spin_unlock(&tb_lock);
471 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
472
473 env->current_tb = tb;
474 while (env->current_tb) {
475 tc_ptr = tb->tc_ptr;
476 /* execute the generated code */
477 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
478#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
479 tcg_qemu_tb_exec(tc_ptr, next_tb);
480#else
481 next_tb = tcg_qemu_tb_exec(tc_ptr);
482#endif
483 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
484 env->current_tb = NULL;
485 if ((next_tb & 3) == 2) {
486 /* Instruction counter expired. */
487 int insns_left;
488 tb = (TranslationBlock *)(long)(next_tb & ~3);
489 /* Restore PC. */
490 CPU_PC_FROM_TB(env, tb);
491 insns_left = env->icount_decr.u32;
492 if (env->icount_extra && insns_left >= 0) {
493 /* Refill decrementer and continue execution. */
494 env->icount_extra += insns_left;
495 if (env->icount_extra > 0xffff) {
496 insns_left = 0xffff;
497 } else {
498 insns_left = env->icount_extra;
499 }
500 env->icount_extra -= insns_left;
501 env->icount_decr.u16.low = insns_left;
502 } else {
503 if (insns_left > 0) {
504 /* Execute remaining instructions. */
505 cpu_exec_nocache(insns_left, tb);
506 }
507 env->exception_index = EXCP_INTERRUPT;
508 next_tb = 0;
509 cpu_loop_exit();
510 }
511 }
512 }
513
514 /* reset soft MMU for next block (it can currently
515 only be set by a memory fault) */
516#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
517 if (env->hflags & HF_SOFTMMU_MASK) {
518 env->hflags &= ~HF_SOFTMMU_MASK;
519 /* do not allow linking to another block */
520 next_tb = 0;
521 }
522#endif
523 } /* for(;;) */
524 } else {
525 env_to_regs();
526 }
527#ifdef VBOX_HIGH_RES_TIMERS_HACK
528 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
529 unnecessary (like crashing during emulate single instruction).
530 Note! Don't use env1->pVM here, the code wouldn't run with
531 gcc-4.4/amd64 anymore, see #3883. */
532 env->current_tb = NULL;
533 if ( !(env->interrupt_request & ( CPU_INTERRUPT_EXIT | CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
534 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
535 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
536 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
537 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
538 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
539 TMR3TimerQueuesDo(env->pVM);
540 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
541 }
542#endif
543 } /* for(;;) */
544
545#if defined(TARGET_I386)
546 /* restore flags in standard format */
547 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
548#else
549#error unsupported target CPU
550#endif
551#include "hostregs_helper.h"
552 return ret;
553}
554
555#else /* !VBOX */
556int cpu_exec(CPUState *env1)
557{
558#define DECLARE_HOST_REGS 1
559#include "hostregs_helper.h"
560 int ret, interrupt_request;
561 TranslationBlock *tb;
562 uint8_t *tc_ptr;
563 unsigned long next_tb;
564
565 if (cpu_halted(env1) == EXCP_HALTED)
566 return EXCP_HALTED;
567
568 cpu_single_env = env1;
569
570 /* first we save global registers */
571#define SAVE_HOST_REGS 1
572#include "hostregs_helper.h"
573 env = env1;
574
575 env_to_regs();
576#if defined(TARGET_I386)
577 /* put eflags in CPU temporary format */
578 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
579 DF = 1 - (2 * ((env->eflags >> 10) & 1));
580 CC_OP = CC_OP_EFLAGS;
581 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
582#elif defined(TARGET_SPARC)
583#elif defined(TARGET_M68K)
584 env->cc_op = CC_OP_FLAGS;
585 env->cc_dest = env->sr & 0xf;
586 env->cc_x = (env->sr >> 4) & 1;
587#elif defined(TARGET_ALPHA)
588#elif defined(TARGET_ARM)
589#elif defined(TARGET_PPC)
590#elif defined(TARGET_MIPS)
591#elif defined(TARGET_SH4)
592#elif defined(TARGET_CRIS)
593 /* XXXXX */
594#else
595#error unsupported target CPU
596#endif
597 env->exception_index = -1;
598
599 /* prepare setjmp context for exception handling */
600 for(;;) {
601 if (setjmp(env->jmp_env) == 0) {
602 env->current_tb = NULL;
603 /* if an exception is pending, we execute it here */
604 if (env->exception_index >= 0) {
605 if (env->exception_index >= EXCP_INTERRUPT) {
606 /* exit request from the cpu execution loop */
607 ret = env->exception_index;
608 break;
609 } else if (env->user_mode_only) {
610 /* if user mode only, we simulate a fake exception
611 which will be handled outside the cpu execution
612 loop */
613#if defined(TARGET_I386)
614 do_interrupt_user(env->exception_index,
615 env->exception_is_int,
616 env->error_code,
617 env->exception_next_eip);
618 /* successfully delivered */
619 env->old_exception = -1;
620#endif
621 ret = env->exception_index;
622 break;
623 } else {
624#if defined(TARGET_I386)
625 /* simulate a real cpu exception. On i386, it can
626 trigger new exceptions, but we do not handle
627 double or triple faults yet. */
628 do_interrupt(env->exception_index,
629 env->exception_is_int,
630 env->error_code,
631 env->exception_next_eip, 0);
632 /* successfully delivered */
633 env->old_exception = -1;
634#elif defined(TARGET_PPC)
635 do_interrupt(env);
636#elif defined(TARGET_MIPS)
637 do_interrupt(env);
638#elif defined(TARGET_SPARC)
639 do_interrupt(env);
640#elif defined(TARGET_ARM)
641 do_interrupt(env);
642#elif defined(TARGET_SH4)
643 do_interrupt(env);
644#elif defined(TARGET_ALPHA)
645 do_interrupt(env);
646#elif defined(TARGET_CRIS)
647 do_interrupt(env);
648#elif defined(TARGET_M68K)
649 do_interrupt(0);
650#endif
651 }
652 env->exception_index = -1;
653 }
654#ifdef USE_KQEMU
655 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
656 int ret;
657 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
658 ret = kqemu_cpu_exec(env);
659 /* put eflags in CPU temporary format */
660 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
661 DF = 1 - (2 * ((env->eflags >> 10) & 1));
662 CC_OP = CC_OP_EFLAGS;
663 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
664 if (ret == 1) {
665 /* exception */
666 longjmp(env->jmp_env, 1);
667 } else if (ret == 2) {
668 /* softmmu execution needed */
669 } else {
670 if (env->interrupt_request != 0) {
671 /* hardware interrupt will be executed just after */
672 } else {
673 /* otherwise, we restart */
674 longjmp(env->jmp_env, 1);
675 }
676 }
677 }
678#endif
679
680 next_tb = 0; /* force lookup of first TB */
681 for(;;) {
682 interrupt_request = env->interrupt_request;
683 if (unlikely(interrupt_request) &&
684 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
685 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
686 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
687 env->exception_index = EXCP_DEBUG;
688 cpu_loop_exit();
689 }
690#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
691 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
692 if (interrupt_request & CPU_INTERRUPT_HALT) {
693 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
694 env->halted = 1;
695 env->exception_index = EXCP_HLT;
696 cpu_loop_exit();
697 }
698#endif
699#if defined(TARGET_I386)
700 if (env->hflags2 & HF2_GIF_MASK) {
701 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
702 !(env->hflags & HF_SMM_MASK)) {
703 svm_check_intercept(SVM_EXIT_SMI);
704 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
705 do_smm_enter();
706 next_tb = 0;
707 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
708 !(env->hflags2 & HF2_NMI_MASK)) {
709 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
710 env->hflags2 |= HF2_NMI_MASK;
711 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
712 next_tb = 0;
713 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
714 (((env->hflags2 & HF2_VINTR_MASK) &&
715 (env->hflags2 & HF2_HIF_MASK)) ||
716 (!(env->hflags2 & HF2_VINTR_MASK) &&
717 (env->eflags & IF_MASK &&
718 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
719 int intno;
720 svm_check_intercept(SVM_EXIT_INTR);
721 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
722 intno = cpu_get_pic_interrupt(env);
723 if (loglevel & CPU_LOG_TB_IN_ASM) {
724 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
725 }
726 do_interrupt(intno, 0, 0, 0, 1);
727 /* ensure that no TB jump will be modified as
728 the program flow was changed */
729 next_tb = 0;
730#if !defined(CONFIG_USER_ONLY)
731 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
732 (env->eflags & IF_MASK) &&
733 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
734 int intno;
735 /* FIXME: this should respect TPR */
736 svm_check_intercept(SVM_EXIT_VINTR);
737 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
738 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
739 if (loglevel & CPU_LOG_TB_IN_ASM)
740 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
741 do_interrupt(intno, 0, 0, 0, 1);
742 next_tb = 0;
743#endif
744 }
745 }
746#elif defined(TARGET_PPC)
747#if 0
748 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
749 cpu_ppc_reset(env);
750 }
751#endif
752 if (interrupt_request & CPU_INTERRUPT_HARD) {
753 ppc_hw_interrupt(env);
754 if (env->pending_interrupts == 0)
755 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
756 next_tb = 0;
757 }
758#elif defined(TARGET_MIPS)
759 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
760 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
761 (env->CP0_Status & (1 << CP0St_IE)) &&
762 !(env->CP0_Status & (1 << CP0St_EXL)) &&
763 !(env->CP0_Status & (1 << CP0St_ERL)) &&
764 !(env->hflags & MIPS_HFLAG_DM)) {
765 /* Raise it */
766 env->exception_index = EXCP_EXT_INTERRUPT;
767 env->error_code = 0;
768 do_interrupt(env);
769 next_tb = 0;
770 }
771#elif defined(TARGET_SPARC)
772 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
773 (env->psret != 0)) {
774 int pil = env->interrupt_index & 15;
775 int type = env->interrupt_index & 0xf0;
776
777 if (((type == TT_EXTINT) &&
778 (pil == 15 || pil > env->psrpil)) ||
779 type != TT_EXTINT) {
780 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
781 env->exception_index = env->interrupt_index;
782 do_interrupt(env);
783 env->interrupt_index = 0;
784#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
785 cpu_check_irqs(env);
786#endif
787 next_tb = 0;
788 }
789 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
790 //do_interrupt(0, 0, 0, 0, 0);
791 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
792 }
793#elif defined(TARGET_ARM)
794 if (interrupt_request & CPU_INTERRUPT_FIQ
795 && !(env->uncached_cpsr & CPSR_F)) {
796 env->exception_index = EXCP_FIQ;
797 do_interrupt(env);
798 next_tb = 0;
799 }
800 /* ARMv7-M interrupt return works by loading a magic value
801 into the PC. On real hardware the load causes the
802 return to occur. The qemu implementation performs the
803 jump normally, then does the exception return when the
804 CPU tries to execute code at the magic address.
805 This will cause the magic PC value to be pushed to
806 the stack if an interrupt occurred at the wrong time.
807 We avoid this by disabling interrupts when
808 pc contains a magic address. */
809 if (interrupt_request & CPU_INTERRUPT_HARD
810 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
811 || !(env->uncached_cpsr & CPSR_I))) {
812 env->exception_index = EXCP_IRQ;
813 do_interrupt(env);
814 next_tb = 0;
815 }
816#elif defined(TARGET_SH4)
817 if (interrupt_request & CPU_INTERRUPT_HARD) {
818 do_interrupt(env);
819 next_tb = 0;
820 }
821#elif defined(TARGET_ALPHA)
822 if (interrupt_request & CPU_INTERRUPT_HARD) {
823 do_interrupt(env);
824 next_tb = 0;
825 }
826#elif defined(TARGET_CRIS)
827 if (interrupt_request & CPU_INTERRUPT_HARD
828 && (env->pregs[PR_CCS] & I_FLAG)) {
829 env->exception_index = EXCP_IRQ;
830 do_interrupt(env);
831 next_tb = 0;
832 }
833 if (interrupt_request & CPU_INTERRUPT_NMI
834 && (env->pregs[PR_CCS] & M_FLAG)) {
835 env->exception_index = EXCP_NMI;
836 do_interrupt(env);
837 next_tb = 0;
838 }
839#elif defined(TARGET_M68K)
840 if (interrupt_request & CPU_INTERRUPT_HARD
841 && ((env->sr & SR_I) >> SR_I_SHIFT)
842 < env->pending_level) {
843 /* Real hardware gets the interrupt vector via an
844 IACK cycle at this point. Current emulated
845 hardware doesn't rely on this, so we
846 provide/save the vector when the interrupt is
847 first signalled. */
848 env->exception_index = env->pending_vector;
849 do_interrupt(1);
850 next_tb = 0;
851 }
852#endif
853 /* Don't use the cached interrupt_request value,
854 do_interrupt may have updated the EXITTB flag. */
855 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
856 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
857 /* ensure that no TB jump will be modified as
858 the program flow was changed */
859 next_tb = 0;
860 }
861 if (interrupt_request & CPU_INTERRUPT_EXIT) {
862 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
863 env->exception_index = EXCP_INTERRUPT;
864 cpu_loop_exit();
865 }
866 }
867#ifdef DEBUG_EXEC
868 if ((loglevel & CPU_LOG_TB_CPU)) {
869 /* restore flags in standard format */
870 regs_to_env();
871#if defined(TARGET_I386)
872 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
873 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
874 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
875#elif defined(TARGET_ARM)
876 cpu_dump_state(env, logfile, fprintf, 0);
877#elif defined(TARGET_SPARC)
878 cpu_dump_state(env, logfile, fprintf, 0);
879#elif defined(TARGET_PPC)
880 cpu_dump_state(env, logfile, fprintf, 0);
881#elif defined(TARGET_M68K)
882 cpu_m68k_flush_flags(env, env->cc_op);
883 env->cc_op = CC_OP_FLAGS;
884 env->sr = (env->sr & 0xffe0)
885 | env->cc_dest | (env->cc_x << 4);
886 cpu_dump_state(env, logfile, fprintf, 0);
887#elif defined(TARGET_MIPS)
888 cpu_dump_state(env, logfile, fprintf, 0);
889#elif defined(TARGET_SH4)
890 cpu_dump_state(env, logfile, fprintf, 0);
891#elif defined(TARGET_ALPHA)
892 cpu_dump_state(env, logfile, fprintf, 0);
893#elif defined(TARGET_CRIS)
894 cpu_dump_state(env, logfile, fprintf, 0);
895#else
896#error unsupported target CPU
897#endif
898 }
899#endif
900 spin_lock(&tb_lock);
901 tb = tb_find_fast();
902 /* Note: we do it here to avoid a gcc bug on Mac OS X when
903 doing it in tb_find_slow */
904 if (tb_invalidated_flag) {
905 /* as some TB could have been invalidated because
906 of memory exceptions while generating the code, we
907 must recompute the hash index here */
908 next_tb = 0;
909 tb_invalidated_flag = 0;
910 }
911#ifdef DEBUG_EXEC
912 if ((loglevel & CPU_LOG_EXEC)) {
913 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
914 (long)tb->tc_ptr, tb->pc,
915 lookup_symbol(tb->pc));
916 }
917#endif
918 /* see if we can patch the calling TB. When the TB
919 spans two pages, we cannot safely do a direct
920 jump. */
921 {
922 if (next_tb != 0 &&
923#ifdef USE_KQEMU
924 (env->kqemu_enabled != 2) &&
925#endif
926 tb->page_addr[1] == -1) {
927 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
928 }
929 }
930 spin_unlock(&tb_lock);
931 env->current_tb = tb;
932 while (env->current_tb) {
933 tc_ptr = tb->tc_ptr;
934 /* execute the generated code */
935#if defined(__sparc__) && !defined(HOST_SOLARIS)
936#undef env
937 env = cpu_single_env;
938#define env cpu_single_env
939#endif
940 next_tb = tcg_qemu_tb_exec(tc_ptr);
941 env->current_tb = NULL;
942 if ((next_tb & 3) == 2) {
943 /* Instruction counter expired. */
944 int insns_left;
945 tb = (TranslationBlock *)(long)(next_tb & ~3);
946 /* Restore PC. */
947 CPU_PC_FROM_TB(env, tb);
948 insns_left = env->icount_decr.u32;
949 if (env->icount_extra && insns_left >= 0) {
950 /* Refill decrementer and continue execution. */
951 env->icount_extra += insns_left;
952 if (env->icount_extra > 0xffff) {
953 insns_left = 0xffff;
954 } else {
955 insns_left = env->icount_extra;
956 }
957 env->icount_extra -= insns_left;
958 env->icount_decr.u16.low = insns_left;
959 } else {
960 if (insns_left > 0) {
961 /* Execute remaining instructions. */
962 cpu_exec_nocache(insns_left, tb);
963 }
964 env->exception_index = EXCP_INTERRUPT;
965 next_tb = 0;
966 cpu_loop_exit();
967 }
968 }
969 }
970 /* reset soft MMU for next block (it can currently
971 only be set by a memory fault) */
972#if defined(USE_KQEMU)
973#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
974 if (kqemu_is_ok(env) &&
975 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
976 cpu_loop_exit();
977 }
978#endif
979 } /* for(;;) */
980 } else {
981 env_to_regs();
982 }
983 } /* for(;;) */
984
985
986#if defined(TARGET_I386)
987 /* restore flags in standard format */
988 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
989#elif defined(TARGET_ARM)
990 /* XXX: Save/restore host fpu exception state?. */
991#elif defined(TARGET_SPARC)
992#elif defined(TARGET_PPC)
993#elif defined(TARGET_M68K)
994 cpu_m68k_flush_flags(env, env->cc_op);
995 env->cc_op = CC_OP_FLAGS;
996 env->sr = (env->sr & 0xffe0)
997 | env->cc_dest | (env->cc_x << 4);
998#elif defined(TARGET_MIPS)
999#elif defined(TARGET_SH4)
1000#elif defined(TARGET_ALPHA)
1001#elif defined(TARGET_CRIS)
1002 /* XXXXX */
1003#else
1004#error unsupported target CPU
1005#endif
1006
1007 /* restore global registers */
1008#include "hostregs_helper.h"
1009
1010 /* fail safe : never use cpu_single_env outside cpu_exec() */
1011 cpu_single_env = NULL;
1012 return ret;
1013}
1014#endif /* !VBOX */
1015
1016/* must only be called from the generated code as an exception can be
1017 generated */
1018void tb_invalidate_page_range(target_ulong start, target_ulong end)
1019{
1020 /* XXX: cannot enable it yet because it yields to MMU exception
1021 where NIP != read address on PowerPC */
1022#if 0
1023 target_ulong phys_addr;
1024 phys_addr = get_phys_addr_code(env, start);
1025 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1026#endif
1027}
1028
1029#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1030
1031void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1032{
1033 CPUX86State *saved_env;
1034
1035 saved_env = env;
1036 env = s;
1037 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1038 selector &= 0xffff;
1039 cpu_x86_load_seg_cache(env, seg_reg, selector,
1040 (selector << 4), 0xffff, 0);
1041 } else {
1042 load_seg(seg_reg, selector);
1043 }
1044 env = saved_env;
1045}
1046
1047void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1048{
1049 CPUX86State *saved_env;
1050
1051 saved_env = env;
1052 env = s;
1053
1054 helper_fsave((target_ulong)ptr, data32);
1055
1056 env = saved_env;
1057}
1058
1059void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1060{
1061 CPUX86State *saved_env;
1062
1063 saved_env = env;
1064 env = s;
1065
1066 helper_frstor((target_ulong)ptr, data32);
1067
1068 env = saved_env;
1069}
1070
1071#endif /* TARGET_I386 */
1072
1073#if !defined(CONFIG_SOFTMMU)
1074
1075#if defined(TARGET_I386)
1076
1077/* 'pc' is the host PC at which the exception was raised. 'address' is
1078 the effective address of the memory exception. 'is_write' is 1 if a
1079 write caused the exception and otherwise 0'. 'old_set' is the
1080 signal set which should be restored */
1081static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1082 int is_write, sigset_t *old_set,
1083 void *puc)
1084{
1085 TranslationBlock *tb;
1086 int ret;
1087
1088 if (cpu_single_env)
1089 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1090#if defined(DEBUG_SIGNAL)
1091 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1092 pc, address, is_write, *(unsigned long *)old_set);
1093#endif
1094 /* XXX: locking issue */
1095 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1096 return 1;
1097 }
1098
1099 /* see if it is an MMU fault */
1100 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1101 ((env->hflags & HF_CPL_MASK) == 3), 0);
1102 if (ret < 0)
1103 return 0; /* not an MMU fault */
1104 if (ret == 0)
1105 return 1; /* the MMU fault was handled without causing real CPU fault */
1106 /* now we have a real cpu fault */
1107 tb = tb_find_pc(pc);
1108 if (tb) {
1109 /* the PC is inside the translated code. It means that we have
1110 a virtual CPU fault */
1111 cpu_restore_state(tb, env, pc, puc);
1112 }
1113 if (ret == 1) {
1114#if 0
1115 printf("PF exception: EIP=0x%RGv CR2=0x%RGv error=0x%x\n",
1116 env->eip, env->cr[2], env->error_code);
1117#endif
1118 /* we restore the process signal mask as the sigreturn should
1119 do it (XXX: use sigsetjmp) */
1120 sigprocmask(SIG_SETMASK, old_set, NULL);
1121 raise_exception_err(env->exception_index, env->error_code);
1122 } else {
1123 /* activate soft MMU for this block */
1124 env->hflags |= HF_SOFTMMU_MASK;
1125 cpu_resume_from_signal(env, puc);
1126 }
1127 /* never comes here */
1128 return 1;
1129}
1130
1131#elif defined(TARGET_ARM)
1132static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1133 int is_write, sigset_t *old_set,
1134 void *puc)
1135{
1136 TranslationBlock *tb;
1137 int ret;
1138
1139 if (cpu_single_env)
1140 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1141#if defined(DEBUG_SIGNAL)
1142 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1143 pc, address, is_write, *(unsigned long *)old_set);
1144#endif
1145 /* XXX: locking issue */
1146 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1147 return 1;
1148 }
1149 /* see if it is an MMU fault */
1150 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1151 if (ret < 0)
1152 return 0; /* not an MMU fault */
1153 if (ret == 0)
1154 return 1; /* the MMU fault was handled without causing real CPU fault */
1155 /* now we have a real cpu fault */
1156 tb = tb_find_pc(pc);
1157 if (tb) {
1158 /* the PC is inside the translated code. It means that we have
1159 a virtual CPU fault */
1160 cpu_restore_state(tb, env, pc, puc);
1161 }
1162 /* we restore the process signal mask as the sigreturn should
1163 do it (XXX: use sigsetjmp) */
1164 sigprocmask(SIG_SETMASK, old_set, NULL);
1165 cpu_loop_exit();
1166}
1167#elif defined(TARGET_SPARC)
1168static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1169 int is_write, sigset_t *old_set,
1170 void *puc)
1171{
1172 TranslationBlock *tb;
1173 int ret;
1174
1175 if (cpu_single_env)
1176 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1177#if defined(DEBUG_SIGNAL)
1178 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1179 pc, address, is_write, *(unsigned long *)old_set);
1180#endif
1181 /* XXX: locking issue */
1182 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1183 return 1;
1184 }
1185 /* see if it is an MMU fault */
1186 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1187 if (ret < 0)
1188 return 0; /* not an MMU fault */
1189 if (ret == 0)
1190 return 1; /* the MMU fault was handled without causing real CPU fault */
1191 /* now we have a real cpu fault */
1192 tb = tb_find_pc(pc);
1193 if (tb) {
1194 /* the PC is inside the translated code. It means that we have
1195 a virtual CPU fault */
1196 cpu_restore_state(tb, env, pc, puc);
1197 }
1198 /* we restore the process signal mask as the sigreturn should
1199 do it (XXX: use sigsetjmp) */
1200 sigprocmask(SIG_SETMASK, old_set, NULL);
1201 cpu_loop_exit();
1202}
1203#elif defined (TARGET_PPC)
1204static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1205 int is_write, sigset_t *old_set,
1206 void *puc)
1207{
1208 TranslationBlock *tb;
1209 int ret;
1210
1211 if (cpu_single_env)
1212 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1213#if defined(DEBUG_SIGNAL)
1214 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1215 pc, address, is_write, *(unsigned long *)old_set);
1216#endif
1217 /* XXX: locking issue */
1218 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1219 return 1;
1220 }
1221
1222 /* see if it is an MMU fault */
1223 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1224 if (ret < 0)
1225 return 0; /* not an MMU fault */
1226 if (ret == 0)
1227 return 1; /* the MMU fault was handled without causing real CPU fault */
1228
1229 /* now we have a real cpu fault */
1230 tb = tb_find_pc(pc);
1231 if (tb) {
1232 /* the PC is inside the translated code. It means that we have
1233 a virtual CPU fault */
1234 cpu_restore_state(tb, env, pc, puc);
1235 }
1236 if (ret == 1) {
1237#if 0
1238 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1239 env->nip, env->error_code, tb);
1240#endif
1241 /* we restore the process signal mask as the sigreturn should
1242 do it (XXX: use sigsetjmp) */
1243 sigprocmask(SIG_SETMASK, old_set, NULL);
1244 do_raise_exception_err(env->exception_index, env->error_code);
1245 } else {
1246 /* activate soft MMU for this block */
1247 cpu_resume_from_signal(env, puc);
1248 }
1249 /* never comes here */
1250 return 1;
1251}
1252
1253#elif defined(TARGET_M68K)
1254static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1255 int is_write, sigset_t *old_set,
1256 void *puc)
1257{
1258 TranslationBlock *tb;
1259 int ret;
1260
1261 if (cpu_single_env)
1262 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1263#if defined(DEBUG_SIGNAL)
1264 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1265 pc, address, is_write, *(unsigned long *)old_set);
1266#endif
1267 /* XXX: locking issue */
1268 if (is_write && page_unprotect(address, pc, puc)) {
1269 return 1;
1270 }
1271 /* see if it is an MMU fault */
1272 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1273 if (ret < 0)
1274 return 0; /* not an MMU fault */
1275 if (ret == 0)
1276 return 1; /* the MMU fault was handled without causing real CPU fault */
1277 /* now we have a real cpu fault */
1278 tb = tb_find_pc(pc);
1279 if (tb) {
1280 /* the PC is inside the translated code. It means that we have
1281 a virtual CPU fault */
1282 cpu_restore_state(tb, env, pc, puc);
1283 }
1284 /* we restore the process signal mask as the sigreturn should
1285 do it (XXX: use sigsetjmp) */
1286 sigprocmask(SIG_SETMASK, old_set, NULL);
1287 cpu_loop_exit();
1288 /* never comes here */
1289 return 1;
1290}
1291
1292#elif defined (TARGET_MIPS)
1293static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1294 int is_write, sigset_t *old_set,
1295 void *puc)
1296{
1297 TranslationBlock *tb;
1298 int ret;
1299
1300 if (cpu_single_env)
1301 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1302#if defined(DEBUG_SIGNAL)
1303 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1304 pc, address, is_write, *(unsigned long *)old_set);
1305#endif
1306 /* XXX: locking issue */
1307 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1308 return 1;
1309 }
1310
1311 /* see if it is an MMU fault */
1312 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1313 if (ret < 0)
1314 return 0; /* not an MMU fault */
1315 if (ret == 0)
1316 return 1; /* the MMU fault was handled without causing real CPU fault */
1317
1318 /* now we have a real cpu fault */
1319 tb = tb_find_pc(pc);
1320 if (tb) {
1321 /* the PC is inside the translated code. It means that we have
1322 a virtual CPU fault */
1323 cpu_restore_state(tb, env, pc, puc);
1324 }
1325 if (ret == 1) {
1326#if 0
1327 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1328 env->nip, env->error_code, tb);
1329#endif
1330 /* we restore the process signal mask as the sigreturn should
1331 do it (XXX: use sigsetjmp) */
1332 sigprocmask(SIG_SETMASK, old_set, NULL);
1333 do_raise_exception_err(env->exception_index, env->error_code);
1334 } else {
1335 /* activate soft MMU for this block */
1336 cpu_resume_from_signal(env, puc);
1337 }
1338 /* never comes here */
1339 return 1;
1340}
1341
1342#elif defined (TARGET_SH4)
1343static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1344 int is_write, sigset_t *old_set,
1345 void *puc)
1346{
1347 TranslationBlock *tb;
1348 int ret;
1349
1350 if (cpu_single_env)
1351 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1352#if defined(DEBUG_SIGNAL)
1353 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1354 pc, address, is_write, *(unsigned long *)old_set);
1355#endif
1356 /* XXX: locking issue */
1357 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1358 return 1;
1359 }
1360
1361 /* see if it is an MMU fault */
1362 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1363 if (ret < 0)
1364 return 0; /* not an MMU fault */
1365 if (ret == 0)
1366 return 1; /* the MMU fault was handled without causing real CPU fault */
1367
1368 /* now we have a real cpu fault */
1369 tb = tb_find_pc(pc);
1370 if (tb) {
1371 /* the PC is inside the translated code. It means that we have
1372 a virtual CPU fault */
1373 cpu_restore_state(tb, env, pc, puc);
1374 }
1375#if 0
1376 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1377 env->nip, env->error_code, tb);
1378#endif
1379 /* we restore the process signal mask as the sigreturn should
1380 do it (XXX: use sigsetjmp) */
1381 sigprocmask(SIG_SETMASK, old_set, NULL);
1382 cpu_loop_exit();
1383 /* never comes here */
1384 return 1;
1385}
1386#else
1387#error unsupported target CPU
1388#endif
1389
1390#if defined(__i386__)
1391
1392#if defined(__APPLE__)
1393# include <sys/ucontext.h>
1394
1395# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1396# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1397# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1398#else
1399# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1400# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1401# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1402#endif
1403
1404int cpu_signal_handler(int host_signum, void *pinfo,
1405 void *puc)
1406{
1407 siginfo_t *info = pinfo;
1408 struct ucontext *uc = puc;
1409 unsigned long pc;
1410 int trapno;
1411
1412#ifndef REG_EIP
1413/* for glibc 2.1 */
1414#define REG_EIP EIP
1415#define REG_ERR ERR
1416#define REG_TRAPNO TRAPNO
1417#endif
1418 pc = uc->uc_mcontext.gregs[REG_EIP];
1419 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1420#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1421 if (trapno == 0x00 || trapno == 0x05) {
1422 /* send division by zero or bound exception */
1423 cpu_send_trap(pc, trapno, uc);
1424 return 1;
1425 } else
1426#endif
1427 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1428 trapno == 0xe ?
1429 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1430 &uc->uc_sigmask, puc);
1431}
1432
1433#elif defined(__x86_64__)
1434
1435int cpu_signal_handler(int host_signum, void *pinfo,
1436 void *puc)
1437{
1438 siginfo_t *info = pinfo;
1439 struct ucontext *uc = puc;
1440 unsigned long pc;
1441
1442 pc = uc->uc_mcontext.gregs[REG_RIP];
1443 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1444 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1445 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1446 &uc->uc_sigmask, puc);
1447}
1448
1449#elif defined(__powerpc__)
1450
1451/***********************************************************************
1452 * signal context platform-specific definitions
1453 * From Wine
1454 */
1455#ifdef linux
1456/* All Registers access - only for local access */
1457# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1458/* Gpr Registers access */
1459# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1460# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1461# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1462# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1463# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1464# define LR_sig(context) REG_sig(link, context) /* Link register */
1465# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1466/* Float Registers access */
1467# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1468# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1469/* Exception Registers access */
1470# define DAR_sig(context) REG_sig(dar, context)
1471# define DSISR_sig(context) REG_sig(dsisr, context)
1472# define TRAP_sig(context) REG_sig(trap, context)
1473#endif /* linux */
1474
1475#ifdef __APPLE__
1476# include <sys/ucontext.h>
1477typedef struct ucontext SIGCONTEXT;
1478/* All Registers access - only for local access */
1479# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1480# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1481# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1482# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1483/* Gpr Registers access */
1484# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1485# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1486# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1487# define CTR_sig(context) REG_sig(ctr, context)
1488# define XER_sig(context) REG_sig(xer, context) /* Link register */
1489# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1490# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1491/* Float Registers access */
1492# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1493# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1494/* Exception Registers access */
1495# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1496# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1497# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1498#endif /* __APPLE__ */
1499
1500int cpu_signal_handler(int host_signum, void *pinfo,
1501 void *puc)
1502{
1503 siginfo_t *info = pinfo;
1504 struct ucontext *uc = puc;
1505 unsigned long pc;
1506 int is_write;
1507
1508 pc = IAR_sig(uc);
1509 is_write = 0;
1510#if 0
1511 /* ppc 4xx case */
1512 if (DSISR_sig(uc) & 0x00800000)
1513 is_write = 1;
1514#else
1515 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1516 is_write = 1;
1517#endif
1518 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1519 is_write, &uc->uc_sigmask, puc);
1520}
1521
1522#elif defined(__alpha__)
1523
1524int cpu_signal_handler(int host_signum, void *pinfo,
1525 void *puc)
1526{
1527 siginfo_t *info = pinfo;
1528 struct ucontext *uc = puc;
1529 uint32_t *pc = uc->uc_mcontext.sc_pc;
1530 uint32_t insn = *pc;
1531 int is_write = 0;
1532
1533 /* XXX: need kernel patch to get write flag faster */
1534 switch (insn >> 26) {
1535 case 0x0d: // stw
1536 case 0x0e: // stb
1537 case 0x0f: // stq_u
1538 case 0x24: // stf
1539 case 0x25: // stg
1540 case 0x26: // sts
1541 case 0x27: // stt
1542 case 0x2c: // stl
1543 case 0x2d: // stq
1544 case 0x2e: // stl_c
1545 case 0x2f: // stq_c
1546 is_write = 1;
1547 }
1548
1549 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1550 is_write, &uc->uc_sigmask, puc);
1551}
1552#elif defined(__sparc__)
1553
1554int cpu_signal_handler(int host_signum, void *pinfo,
1555 void *puc)
1556{
1557 siginfo_t *info = pinfo;
1558 uint32_t *regs = (uint32_t *)(info + 1);
1559 void *sigmask = (regs + 20);
1560 unsigned long pc;
1561 int is_write;
1562 uint32_t insn;
1563
1564 /* XXX: is there a standard glibc define ? */
1565 pc = regs[1];
1566 /* XXX: need kernel patch to get write flag faster */
1567 is_write = 0;
1568 insn = *(uint32_t *)pc;
1569 if ((insn >> 30) == 3) {
1570 switch((insn >> 19) & 0x3f) {
1571 case 0x05: // stb
1572 case 0x06: // sth
1573 case 0x04: // st
1574 case 0x07: // std
1575 case 0x24: // stf
1576 case 0x27: // stdf
1577 case 0x25: // stfsr
1578 is_write = 1;
1579 break;
1580 }
1581 }
1582 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1583 is_write, sigmask, NULL);
1584}
1585
1586#elif defined(__arm__)
1587
1588int cpu_signal_handler(int host_signum, void *pinfo,
1589 void *puc)
1590{
1591 siginfo_t *info = pinfo;
1592 struct ucontext *uc = puc;
1593 unsigned long pc;
1594 int is_write;
1595
1596 pc = uc->uc_mcontext.gregs[R15];
1597 /* XXX: compute is_write */
1598 is_write = 0;
1599 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1600 is_write,
1601 &uc->uc_sigmask, puc);
1602}
1603
1604#elif defined(__mc68000)
1605
1606int cpu_signal_handler(int host_signum, void *pinfo,
1607 void *puc)
1608{
1609 siginfo_t *info = pinfo;
1610 struct ucontext *uc = puc;
1611 unsigned long pc;
1612 int is_write;
1613
1614 pc = uc->uc_mcontext.gregs[16];
1615 /* XXX: compute is_write */
1616 is_write = 0;
1617 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1618 is_write,
1619 &uc->uc_sigmask, puc);
1620}
1621
1622#elif defined(__ia64)
1623
1624#ifndef __ISR_VALID
1625 /* This ought to be in <bits/siginfo.h>... */
1626# define __ISR_VALID 1
1627#endif
1628
1629int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1630{
1631 siginfo_t *info = pinfo;
1632 struct ucontext *uc = puc;
1633 unsigned long ip;
1634 int is_write = 0;
1635
1636 ip = uc->uc_mcontext.sc_ip;
1637 switch (host_signum) {
1638 case SIGILL:
1639 case SIGFPE:
1640 case SIGSEGV:
1641 case SIGBUS:
1642 case SIGTRAP:
1643 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1644 /* ISR.W (write-access) is bit 33: */
1645 is_write = (info->si_isr >> 33) & 1;
1646 break;
1647
1648 default:
1649 break;
1650 }
1651 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1652 is_write,
1653 &uc->uc_sigmask, puc);
1654}
1655
1656#elif defined(__s390__)
1657
1658int cpu_signal_handler(int host_signum, void *pinfo,
1659 void *puc)
1660{
1661 siginfo_t *info = pinfo;
1662 struct ucontext *uc = puc;
1663 unsigned long pc;
1664 int is_write;
1665
1666 pc = uc->uc_mcontext.psw.addr;
1667 /* XXX: compute is_write */
1668 is_write = 0;
1669 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1670 is_write,
1671 &uc->uc_sigmask, puc);
1672}
1673
1674#else
1675
1676#error host CPU specific signal handler needed
1677
1678#endif
1679
1680#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette