VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 37675

Last change on this file since 37675 was 37675, checked in by vboxsync, 14 years ago

rem: Synced with v0.12.5.

  • Property svn:eol-style set to native
File size: 56.9 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "config.h"
30#include "exec.h"
31#include "disas.h"
32#include "tcg.h"
33#include "kvm.h"
34
35#if !defined(CONFIG_SOFTMMU)
36#undef EAX
37#undef ECX
38#undef EDX
39#undef EBX
40#undef ESP
41#undef EBP
42#undef ESI
43#undef EDI
44#undef EIP
45#include <signal.h>
46#ifdef __linux__
47#include <sys/ucontext.h>
48#endif
49#endif
50
51#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
52// Work around ugly bugs in glibc that mangle global register contents
53#undef env
54#define env cpu_single_env
55#endif
56
57int tb_invalidated_flag;
58
59//#define CONFIG_DEBUG_EXEC
60//#define DEBUG_SIGNAL
61
62int qemu_cpu_has_work(CPUState *env)
63{
64 return cpu_has_work(env);
65}
66
67void cpu_loop_exit(void)
68{
69 /* NOTE: the register at this point must be saved by hand because
70 longjmp restore them */
71 regs_to_env();
72 longjmp(env->jmp_env, 1);
73}
74
75/* exit the current TB from a signal handler. The host registers are
76 restored in a state compatible with the CPU emulator
77 */
78void cpu_resume_from_signal(CPUState *env1, void *puc)
79{
80#if !defined(CONFIG_SOFTMMU)
81#ifdef __linux__
82 struct ucontext *uc = puc;
83#elif defined(__OpenBSD__)
84 struct sigcontext *uc = puc;
85#endif
86#endif
87
88 env = env1;
89
90 /* XXX: restore cpu registers saved in host registers */
91
92#if !defined(CONFIG_SOFTMMU)
93 if (puc) {
94 /* XXX: use siglongjmp ? */
95#ifdef __linux__
96 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
97#elif defined(__OpenBSD__)
98 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
99#endif
100 }
101#endif
102 env->exception_index = -1;
103 longjmp(env->jmp_env, 1);
104}
105
106/* Execute the code without caching the generated code. An interpreter
107 could be used if available. */
108static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
109{
110 unsigned long next_tb;
111 TranslationBlock *tb;
112
113 /* Should never happen.
114 We only end up here when an existing TB is too long. */
115 if (max_cycles > CF_COUNT_MASK)
116 max_cycles = CF_COUNT_MASK;
117
118 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
119 max_cycles);
120 env->current_tb = tb;
121 /* execute the generated code */
122#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
123 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
124#else
125 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
126#endif
127
128 if ((next_tb & 3) == 2) {
129 /* Restore PC. This may happen if async event occurs before
130 the TB starts executing. */
131 cpu_pc_from_tb(env, tb);
132 }
133 tb_phys_invalidate(tb, -1);
134 tb_free(tb);
135}
136
137static TranslationBlock *tb_find_slow(target_ulong pc,
138 target_ulong cs_base,
139 uint64_t flags)
140{
141 TranslationBlock *tb, **ptb1;
142 unsigned int h;
143 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
144
145 tb_invalidated_flag = 0;
146
147 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
148
149 /* find translated block using physical mappings */
150 phys_pc = get_phys_addr_code(env, pc);
151 phys_page1 = phys_pc & TARGET_PAGE_MASK;
152 phys_page2 = -1;
153 h = tb_phys_hash_func(phys_pc);
154 ptb1 = &tb_phys_hash[h];
155 for(;;) {
156 tb = *ptb1;
157 if (!tb)
158 goto not_found;
159 if (tb->pc == pc &&
160 tb->page_addr[0] == phys_page1 &&
161 tb->cs_base == cs_base &&
162 tb->flags == flags) {
163 /* check next page if needed */
164 if (tb->page_addr[1] != -1) {
165 virt_page2 = (pc & TARGET_PAGE_MASK) +
166 TARGET_PAGE_SIZE;
167 phys_page2 = get_phys_addr_code(env, virt_page2);
168 if (tb->page_addr[1] == phys_page2)
169 goto found;
170 } else {
171 goto found;
172 }
173 }
174 ptb1 = &tb->phys_hash_next;
175 }
176 not_found:
177 /* if no translated code available, then translate it now */
178 tb = tb_gen_code(env, pc, cs_base, flags, 0);
179
180 found:
181 /* we add the TB in the virtual pc hash table */
182 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
183 return tb;
184}
185
186static inline TranslationBlock *tb_find_fast(void)
187{
188 TranslationBlock *tb;
189 target_ulong cs_base, pc;
190 int flags;
191
192 /* we record a subset of the CPU state. It will
193 always be the same before a given translated block
194 is executed. */
195 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
196 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
197 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
198 tb->flags != flags)) {
199 tb = tb_find_slow(pc, cs_base, flags);
200 }
201 return tb;
202}
203
204static CPUDebugExcpHandler *debug_excp_handler;
205
206CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
207{
208 CPUDebugExcpHandler *old_handler = debug_excp_handler;
209
210 debug_excp_handler = handler;
211 return old_handler;
212}
213
214static void cpu_handle_debug_exception(CPUState *env)
215{
216 CPUWatchpoint *wp;
217
218 if (!env->watchpoint_hit)
219 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
220 wp->flags &= ~BP_WATCHPOINT_HIT;
221
222 if (debug_excp_handler)
223 debug_excp_handler(env);
224}
225
226/* main execution loop */
227
228#ifdef VBOX
229
230int cpu_exec(CPUState *env1)
231{
232#define DECLARE_HOST_REGS 1
233#include "hostregs_helper.h"
234 int ret = 0, interrupt_request;
235 TranslationBlock *tb;
236 uint8_t *tc_ptr;
237 unsigned long next_tb;
238
239 cpu_single_env = env1;
240
241 /* first we save global registers */
242#define SAVE_HOST_REGS 1
243#include "hostregs_helper.h"
244 env = env1;
245
246 env_to_regs();
247#if defined(TARGET_I386)
248 /* put eflags in CPU temporary format */
249 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
250 DF = 1 - (2 * ((env->eflags >> 10) & 1));
251 CC_OP = CC_OP_EFLAGS;
252 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
253#elif defined(TARGET_SPARC)
254#elif defined(TARGET_M68K)
255 env->cc_op = CC_OP_FLAGS;
256 env->cc_dest = env->sr & 0xf;
257 env->cc_x = (env->sr >> 4) & 1;
258#elif defined(TARGET_ALPHA)
259#elif defined(TARGET_ARM)
260#elif defined(TARGET_PPC)
261#elif defined(TARGET_MIPS)
262#elif defined(TARGET_SH4)
263#elif defined(TARGET_CRIS)
264 /* XXXXX */
265#else
266#error unsupported target CPU
267#endif
268#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
269 env->exception_index = -1;
270#endif
271
272 /* prepare setjmp context for exception handling */
273 for(;;) {
274 if (setjmp(env->jmp_env) == 0)
275 {
276 env->current_tb = NULL;
277
278 /*
279 * Check for fatal errors first
280 */
281 if (env->interrupt_request & CPU_INTERRUPT_RC) {
282 env->exception_index = EXCP_RC;
283 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
284 ret = env->exception_index;
285 cpu_loop_exit();
286 }
287
288 /* if an exception is pending, we execute it here */
289 if (env->exception_index >= 0) {
290 if (env->exception_index >= EXCP_INTERRUPT) {
291 /* exit request from the cpu execution loop */
292 ret = env->exception_index;
293 if (ret == EXCP_DEBUG)
294 cpu_handle_debug_exception(env);
295 break;
296 } else {
297 /* simulate a real cpu exception. On i386, it can
298 trigger new exceptions, but we do not handle
299 double or triple faults yet. */
300 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
301 Log(("do_interrupt %d %d %RGv\n", env->exception_index, env->exception_is_int, (RTGCPTR)env->exception_next_eip));
302 do_interrupt(env->exception_index,
303 env->exception_is_int,
304 env->error_code,
305 env->exception_next_eip, 0);
306 /* successfully delivered */
307 env->old_exception = -1;
308 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
309 }
310 env->exception_index = -1;
311 }
312
313 next_tb = 0; /* force lookup of first TB */
314 for(;;)
315 {
316 interrupt_request = env->interrupt_request;
317 if (unlikely(interrupt_request)) {
318 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
319 /* Mask out external interrupts for this step. */
320 interrupt_request &= ~(CPU_INTERRUPT_HARD |
321 CPU_INTERRUPT_FIQ |
322 CPU_INTERRUPT_SMI |
323 CPU_INTERRUPT_NMI);
324 }
325 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
326 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
327 env->exception_index = EXCP_DEBUG;
328 cpu_loop_exit();
329 }
330 /** @todo: reconcile with what QEMU really does */
331
332 /* Single instruction exec request, we execute it and return (one way or the other).
333 The caller will always reschedule after doing this operation! */
334 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
335 {
336 /* not in flight are we? (if we are, we trapped) */
337 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
338 {
339 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
340 env->exception_index = EXCP_SINGLE_INSTR;
341 if (emulate_single_instr(env) == -1)
342 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip));
343
344 /* When we receive an external interrupt during execution of this single
345 instruction, then we should stay here. We will leave when we're ready
346 for raw-mode or when interrupted by pending EMT requests. */
347 interrupt_request = env->interrupt_request; /* reload this! */
348 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
349 || !(env->eflags & IF_MASK)
350 || (env->hflags & HF_INHIBIT_IRQ_MASK)
351 || (env->state & CPU_RAW_HWACC)
352 )
353 {
354 env->exception_index = ret = EXCP_SINGLE_INSTR;
355 cpu_loop_exit();
356 }
357 }
358 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
359 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
360#ifdef IEM_VERIFICATION_MODE
361 env->exception_index = ret = EXCP_SINGLE_INSTR;
362 cpu_loop_exit();
363#endif
364 }
365
366 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
367 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
368 !(env->hflags & HF_SMM_MASK)) {
369 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
370 do_smm_enter();
371 next_tb = 0;
372 }
373 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
374 (env->eflags & IF_MASK) &&
375 !(env->hflags & HF_INHIBIT_IRQ_MASK))
376 {
377 /* if hardware interrupt pending, we execute it */
378 int intno;
379 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
380 intno = cpu_get_pic_interrupt(env);
381 if (intno >= 0)
382 {
383 Log(("do_interrupt %d\n", intno));
384 do_interrupt(intno, 0, 0, 0, 1);
385 }
386 /* ensure that no TB jump will be modified as
387 the program flow was changed */
388 next_tb = 0;
389 }
390 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
391 {
392 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
393 /* ensure that no TB jump will be modified as
394 the program flow was changed */
395 next_tb = 0;
396 }
397 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
398 if (interrupt_request & CPU_INTERRUPT_RC)
399 {
400 env->exception_index = EXCP_RC;
401 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
402 ret = env->exception_index;
403 cpu_loop_exit();
404 }
405 }
406 if (unlikely(env->exit_request)) {
407 env->exit_request = 0;
408 env->exception_index = EXCP_INTERRUPT;
409 cpu_loop_exit();
410 }
411
412 /*
413 * Check if we the CPU state allows us to execute the code in raw-mode.
414 */
415 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
416 if (remR3CanExecuteRaw(env,
417 env->eip + env->segs[R_CS].base,
418 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
419 &env->exception_index))
420 {
421 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
422 ret = env->exception_index;
423 cpu_loop_exit();
424 }
425 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
426
427{
428 RTGCPTR mypc = env->eip + env->segs[R_CS].base;
429if (mypc == 0x00fe0d2 || mypc == 0x00f19e9 || mypc == 0x000f0827 || mypc == 0x000fe090) {
430 RTLogFlags(NULL, "enabled");
431 loglevel = ~0;
432 Log(("BANG CRASH!\n"));
433}
434}
435#ifdef CONFIG_DEBUG_EXEC
436 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
437 /* restore flags in standard format */
438 regs_to_env();
439 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
440 log_cpu_state(env, X86_DUMP_CCOP);
441 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
442 }
443#endif
444 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
445 spin_lock(&tb_lock);
446 tb = tb_find_fast();
447 /* Note: we do it here to avoid a gcc bug on Mac OS X when
448 doing it in tb_find_slow */
449 if (tb_invalidated_flag) {
450 /* as some TB could have been invalidated because
451 of memory exceptions while generating the code, we
452 must recompute the hash index here */
453 next_tb = 0;
454 tb_invalidated_flag = 0;
455 }
456#ifdef CONFIG_DEBUG_EXEC
457 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s [sp=%RGv, bp=%RGv\n",
458 (long)tb->tc_ptr, tb->pc, lookup_symbol(tb->pc), (RTGCPTR)env->regs[R_ESP], (RTGCPTR)env->regs[R_EBP]);
459#endif
460
461
462 /* see if we can patch the calling TB. When the TB
463 spans two pages, we cannot safely do a direct
464 jump. */
465 if (next_tb != 0
466 && !(tb->cflags & CF_RAW_MODE)
467 && tb->page_addr[1] == -1)
468 {
469 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
470 }
471 spin_unlock(&tb_lock);
472 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
473
474 env->current_tb = tb;
475
476 /* cpu_interrupt might be called while translating the
477 TB, but before it is linked into a potentially
478 infinite loop and becomes env->current_tb. Avoid
479 starting execution if there is a pending interrupt. */
480 if (unlikely (env->exit_request))
481 env->current_tb = NULL;
482
483 while (env->current_tb) {
484 tc_ptr = tb->tc_ptr;
485 /* execute the generated code */
486 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
487#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
488 tcg_qemu_tb_exec(tc_ptr, next_tb);
489#else
490 next_tb = tcg_qemu_tb_exec(tc_ptr);
491#endif
492 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
493 env->current_tb = NULL;
494 if ((next_tb & 3) == 2) {
495 /* Instruction counter expired. */
496 int insns_left;
497 tb = (TranslationBlock *)(long)(next_tb & ~3);
498 /* Restore PC. */
499 cpu_pc_from_tb(env, tb);
500 insns_left = env->icount_decr.u32;
501 if (env->icount_extra && insns_left >= 0) {
502 /* Refill decrementer and continue execution. */
503 env->icount_extra += insns_left;
504 if (env->icount_extra > 0xffff) {
505 insns_left = 0xffff;
506 } else {
507 insns_left = env->icount_extra;
508 }
509 env->icount_extra -= insns_left;
510 env->icount_decr.u16.low = insns_left;
511 } else {
512 if (insns_left > 0) {
513 /* Execute remaining instructions. */
514 cpu_exec_nocache(insns_left, tb);
515 }
516 env->exception_index = EXCP_INTERRUPT;
517 next_tb = 0;
518 cpu_loop_exit();
519 }
520 }
521 }
522
523 /* reset soft MMU for next block (it can currently
524 only be set by a memory fault) */
525#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
526 if (env->hflags & HF_SOFTMMU_MASK) {
527 env->hflags &= ~HF_SOFTMMU_MASK;
528 /* do not allow linking to another block */
529 next_tb = 0;
530 }
531#endif
532 } /* for(;;) */
533 } else {
534 env_to_regs();
535 }
536#ifdef VBOX_HIGH_RES_TIMERS_HACK
537 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
538 unnecessary (like crashing during emulate single instruction).
539 Note! Don't use env1->pVM here, the code wouldn't run with
540 gcc-4.4/amd64 anymore, see #3883. */
541 env->current_tb = NULL;
542 if ( !(env->interrupt_request & ( CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
543 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
544 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
545 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
546 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
547 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
548 TMR3TimerQueuesDo(env->pVM);
549 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
550 }
551#endif
552 } /* for(;;) */
553
554#if defined(TARGET_I386)
555 /* restore flags in standard format */
556 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
557#else
558#error unsupported target CPU
559#endif
560#include "hostregs_helper.h"
561 return ret;
562}
563
564#else /* !VBOX */
565int cpu_exec(CPUState *env1)
566{
567#define DECLARE_HOST_REGS 1
568#include "hostregs_helper.h"
569 int ret, interrupt_request;
570 TranslationBlock *tb;
571 uint8_t *tc_ptr;
572 unsigned long next_tb;
573
574 if (cpu_halted(env1) == EXCP_HALTED)
575 return EXCP_HALTED;
576
577 cpu_single_env = env1;
578
579 /* first we save global registers */
580#define SAVE_HOST_REGS 1
581#include "hostregs_helper.h"
582 env = env1;
583
584 env_to_regs();
585#if defined(TARGET_I386)
586 if (!kvm_enabled()) {
587 /* put eflags in CPU temporary format */
588 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
589 DF = 1 - (2 * ((env->eflags >> 10) & 1));
590 CC_OP = CC_OP_EFLAGS;
591 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
592 }
593#elif defined(TARGET_SPARC)
594#elif defined(TARGET_M68K)
595 env->cc_op = CC_OP_FLAGS;
596 env->cc_dest = env->sr & 0xf;
597 env->cc_x = (env->sr >> 4) & 1;
598#elif defined(TARGET_ALPHA)
599#elif defined(TARGET_ARM)
600#elif defined(TARGET_PPC)
601#elif defined(TARGET_MICROBLAZE)
602#elif defined(TARGET_MIPS)
603#elif defined(TARGET_SH4)
604#elif defined(TARGET_CRIS)
605#elif defined(TARGET_S390X)
606 /* XXXXX */
607#else
608#error unsupported target CPU
609#endif
610 env->exception_index = -1;
611
612 /* prepare setjmp context for exception handling */
613 for(;;) {
614 if (setjmp(env->jmp_env) == 0) {
615#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
616#undef env
617 env = cpu_single_env;
618#define env cpu_single_env
619#endif
620 env->current_tb = NULL;
621 /* if an exception is pending, we execute it here */
622 if (env->exception_index >= 0) {
623 if (env->exception_index >= EXCP_INTERRUPT) {
624 /* exit request from the cpu execution loop */
625 ret = env->exception_index;
626 if (ret == EXCP_DEBUG)
627 cpu_handle_debug_exception(env);
628 break;
629 } else {
630#if defined(CONFIG_USER_ONLY)
631 /* if user mode only, we simulate a fake exception
632 which will be handled outside the cpu execution
633 loop */
634#if defined(TARGET_I386)
635 do_interrupt_user(env->exception_index,
636 env->exception_is_int,
637 env->error_code,
638 env->exception_next_eip);
639 /* successfully delivered */
640 env->old_exception = -1;
641#endif
642 ret = env->exception_index;
643 break;
644#else
645#if defined(TARGET_I386)
646 /* simulate a real cpu exception. On i386, it can
647 trigger new exceptions, but we do not handle
648 double or triple faults yet. */
649 do_interrupt(env->exception_index,
650 env->exception_is_int,
651 env->error_code,
652 env->exception_next_eip, 0);
653 /* successfully delivered */
654 env->old_exception = -1;
655#elif defined(TARGET_PPC)
656 do_interrupt(env);
657#elif defined(TARGET_MICROBLAZE)
658 do_interrupt(env);
659#elif defined(TARGET_MIPS)
660 do_interrupt(env);
661#elif defined(TARGET_SPARC)
662 do_interrupt(env);
663#elif defined(TARGET_ARM)
664 do_interrupt(env);
665#elif defined(TARGET_SH4)
666 do_interrupt(env);
667#elif defined(TARGET_ALPHA)
668 do_interrupt(env);
669#elif defined(TARGET_CRIS)
670 do_interrupt(env);
671#elif defined(TARGET_M68K)
672 do_interrupt(0);
673#endif
674#endif
675 }
676 env->exception_index = -1;
677 }
678
679 if (kvm_enabled()) {
680 kvm_cpu_exec(env);
681 longjmp(env->jmp_env, 1);
682 }
683
684 next_tb = 0; /* force lookup of first TB */
685 for(;;) {
686 interrupt_request = env->interrupt_request;
687 if (unlikely(interrupt_request)) {
688 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
689 /* Mask out external interrupts for this step. */
690 interrupt_request &= ~(CPU_INTERRUPT_HARD |
691 CPU_INTERRUPT_FIQ |
692 CPU_INTERRUPT_SMI |
693 CPU_INTERRUPT_NMI);
694 }
695 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
696 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
697 env->exception_index = EXCP_DEBUG;
698 cpu_loop_exit();
699 }
700#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
701 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
702 defined(TARGET_MICROBLAZE)
703 if (interrupt_request & CPU_INTERRUPT_HALT) {
704 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
705 env->halted = 1;
706 env->exception_index = EXCP_HLT;
707 cpu_loop_exit();
708 }
709#endif
710#if defined(TARGET_I386)
711 if (interrupt_request & CPU_INTERRUPT_INIT) {
712 svm_check_intercept(SVM_EXIT_INIT);
713 do_cpu_init(env);
714 env->exception_index = EXCP_HALTED;
715 cpu_loop_exit();
716 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
717 do_cpu_sipi(env);
718 } else if (env->hflags2 & HF2_GIF_MASK) {
719 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
720 !(env->hflags & HF_SMM_MASK)) {
721 svm_check_intercept(SVM_EXIT_SMI);
722 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
723 do_smm_enter();
724 next_tb = 0;
725 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
726 !(env->hflags2 & HF2_NMI_MASK)) {
727 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
728 env->hflags2 |= HF2_NMI_MASK;
729 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
730 next_tb = 0;
731 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
732 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
733 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
734 next_tb = 0;
735 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
736 (((env->hflags2 & HF2_VINTR_MASK) &&
737 (env->hflags2 & HF2_HIF_MASK)) ||
738 (!(env->hflags2 & HF2_VINTR_MASK) &&
739 (env->eflags & IF_MASK &&
740 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
741 int intno;
742 svm_check_intercept(SVM_EXIT_INTR);
743 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
744 intno = cpu_get_pic_interrupt(env);
745 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
746#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
747#undef env
748 env = cpu_single_env;
749#define env cpu_single_env
750#endif
751 do_interrupt(intno, 0, 0, 0, 1);
752 /* ensure that no TB jump will be modified as
753 the program flow was changed */
754 next_tb = 0;
755#if !defined(CONFIG_USER_ONLY)
756 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
757 (env->eflags & IF_MASK) &&
758 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
759 int intno;
760 /* FIXME: this should respect TPR */
761 svm_check_intercept(SVM_EXIT_VINTR);
762 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
763 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
764 do_interrupt(intno, 0, 0, 0, 1);
765 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
766 next_tb = 0;
767#endif
768 }
769 }
770#elif defined(TARGET_PPC)
771#if 0
772 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
773 cpu_reset(env);
774 }
775#endif
776 if (interrupt_request & CPU_INTERRUPT_HARD) {
777 ppc_hw_interrupt(env);
778 if (env->pending_interrupts == 0)
779 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
780 next_tb = 0;
781 }
782#elif defined(TARGET_MICROBLAZE)
783 if ((interrupt_request & CPU_INTERRUPT_HARD)
784 && (env->sregs[SR_MSR] & MSR_IE)
785 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
786 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
787 env->exception_index = EXCP_IRQ;
788 do_interrupt(env);
789 next_tb = 0;
790 }
791#elif defined(TARGET_MIPS)
792 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
793 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
794 (env->CP0_Status & (1 << CP0St_IE)) &&
795 !(env->CP0_Status & (1 << CP0St_EXL)) &&
796 !(env->CP0_Status & (1 << CP0St_ERL)) &&
797 !(env->hflags & MIPS_HFLAG_DM)) {
798 /* Raise it */
799 env->exception_index = EXCP_EXT_INTERRUPT;
800 env->error_code = 0;
801 do_interrupt(env);
802 next_tb = 0;
803 }
804#elif defined(TARGET_SPARC)
805 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
806 cpu_interrupts_enabled(env)) {
807 int pil = env->interrupt_index & 15;
808 int type = env->interrupt_index & 0xf0;
809
810 if (((type == TT_EXTINT) &&
811 (pil == 15 || pil > env->psrpil)) ||
812 type != TT_EXTINT) {
813 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
814 env->exception_index = env->interrupt_index;
815 do_interrupt(env);
816 env->interrupt_index = 0;
817 next_tb = 0;
818 }
819 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
820 //do_interrupt(0, 0, 0, 0, 0);
821 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
822 }
823#elif defined(TARGET_ARM)
824 if (interrupt_request & CPU_INTERRUPT_FIQ
825 && !(env->uncached_cpsr & CPSR_F)) {
826 env->exception_index = EXCP_FIQ;
827 do_interrupt(env);
828 next_tb = 0;
829 }
830 /* ARMv7-M interrupt return works by loading a magic value
831 into the PC. On real hardware the load causes the
832 return to occur. The qemu implementation performs the
833 jump normally, then does the exception return when the
834 CPU tries to execute code at the magic address.
835 This will cause the magic PC value to be pushed to
836 the stack if an interrupt occured at the wrong time.
837 We avoid this by disabling interrupts when
838 pc contains a magic address. */
839 if (interrupt_request & CPU_INTERRUPT_HARD
840 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
841 || !(env->uncached_cpsr & CPSR_I))) {
842 env->exception_index = EXCP_IRQ;
843 do_interrupt(env);
844 next_tb = 0;
845 }
846#elif defined(TARGET_SH4)
847 if (interrupt_request & CPU_INTERRUPT_HARD) {
848 do_interrupt(env);
849 next_tb = 0;
850 }
851#elif defined(TARGET_ALPHA)
852 if (interrupt_request & CPU_INTERRUPT_HARD) {
853 do_interrupt(env);
854 next_tb = 0;
855 }
856#elif defined(TARGET_CRIS)
857 if (interrupt_request & CPU_INTERRUPT_HARD
858 && (env->pregs[PR_CCS] & I_FLAG)) {
859 env->exception_index = EXCP_IRQ;
860 do_interrupt(env);
861 next_tb = 0;
862 }
863 if (interrupt_request & CPU_INTERRUPT_NMI
864 && (env->pregs[PR_CCS] & M_FLAG)) {
865 env->exception_index = EXCP_NMI;
866 do_interrupt(env);
867 next_tb = 0;
868 }
869#elif defined(TARGET_M68K)
870 if (interrupt_request & CPU_INTERRUPT_HARD
871 && ((env->sr & SR_I) >> SR_I_SHIFT)
872 < env->pending_level) {
873 /* Real hardware gets the interrupt vector via an
874 IACK cycle at this point. Current emulated
875 hardware doesn't rely on this, so we
876 provide/save the vector when the interrupt is
877 first signalled. */
878 env->exception_index = env->pending_vector;
879 do_interrupt(1);
880 next_tb = 0;
881 }
882#endif
883 /* Don't use the cached interupt_request value,
884 do_interrupt may have updated the EXITTB flag. */
885 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
886 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
887 /* ensure that no TB jump will be modified as
888 the program flow was changed */
889 next_tb = 0;
890 }
891 }
892 if (unlikely(env->exit_request)) {
893 env->exit_request = 0;
894 env->exception_index = EXCP_INTERRUPT;
895 cpu_loop_exit();
896 }
897#ifdef CONFIG_DEBUG_EXEC
898 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
899 /* restore flags in standard format */
900 regs_to_env();
901#if defined(TARGET_I386)
902 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
903 log_cpu_state(env, X86_DUMP_CCOP);
904 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
905#elif defined(TARGET_ARM)
906 log_cpu_state(env, 0);
907#elif defined(TARGET_SPARC)
908 log_cpu_state(env, 0);
909#elif defined(TARGET_PPC)
910 log_cpu_state(env, 0);
911#elif defined(TARGET_M68K)
912 cpu_m68k_flush_flags(env, env->cc_op);
913 env->cc_op = CC_OP_FLAGS;
914 env->sr = (env->sr & 0xffe0)
915 | env->cc_dest | (env->cc_x << 4);
916 log_cpu_state(env, 0);
917#elif defined(TARGET_MICROBLAZE)
918 log_cpu_state(env, 0);
919#elif defined(TARGET_MIPS)
920 log_cpu_state(env, 0);
921#elif defined(TARGET_SH4)
922 log_cpu_state(env, 0);
923#elif defined(TARGET_ALPHA)
924 log_cpu_state(env, 0);
925#elif defined(TARGET_CRIS)
926 log_cpu_state(env, 0);
927#else
928#error unsupported target CPU
929#endif
930 }
931#endif
932 spin_lock(&tb_lock);
933 tb = tb_find_fast();
934 /* Note: we do it here to avoid a gcc bug on Mac OS X when
935 doing it in tb_find_slow */
936 if (tb_invalidated_flag) {
937 /* as some TB could have been invalidated because
938 of memory exceptions while generating the code, we
939 must recompute the hash index here */
940 next_tb = 0;
941 tb_invalidated_flag = 0;
942 }
943#ifdef CONFIG_DEBUG_EXEC
944 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
945 (long)tb->tc_ptr, tb->pc,
946 lookup_symbol(tb->pc));
947#endif
948 /* see if we can patch the calling TB. When the TB
949 spans two pages, we cannot safely do a direct
950 jump. */
951 {
952 if (next_tb != 0 && tb->page_addr[1] == -1) {
953 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
954 }
955 }
956 spin_unlock(&tb_lock);
957 env->current_tb = tb;
958
959 /* cpu_interrupt might be called while translating the
960 TB, but before it is linked into a potentially
961 infinite loop and becomes env->current_tb. Avoid
962 starting execution if there is a pending interrupt. */
963 if (unlikely (env->exit_request))
964 env->current_tb = NULL;
965
966 while (env->current_tb) {
967 tc_ptr = tb->tc_ptr;
968 /* execute the generated code */
969#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
970#undef env
971 env = cpu_single_env;
972#define env cpu_single_env
973#endif
974 next_tb = tcg_qemu_tb_exec(tc_ptr);
975 env->current_tb = NULL;
976 if ((next_tb & 3) == 2) {
977 /* Instruction counter expired. */
978 int insns_left;
979 tb = (TranslationBlock *)(long)(next_tb & ~3);
980 /* Restore PC. */
981 cpu_pc_from_tb(env, tb);
982 insns_left = env->icount_decr.u32;
983 if (env->icount_extra && insns_left >= 0) {
984 /* Refill decrementer and continue execution. */
985 env->icount_extra += insns_left;
986 if (env->icount_extra > 0xffff) {
987 insns_left = 0xffff;
988 } else {
989 insns_left = env->icount_extra;
990 }
991 env->icount_extra -= insns_left;
992 env->icount_decr.u16.low = insns_left;
993 } else {
994 if (insns_left > 0) {
995 /* Execute remaining instructions. */
996 cpu_exec_nocache(insns_left, tb);
997 }
998 env->exception_index = EXCP_INTERRUPT;
999 next_tb = 0;
1000 cpu_loop_exit();
1001 }
1002 }
1003 }
1004 /* reset soft MMU for next block (it can currently
1005 only be set by a memory fault) */
1006 } /* for(;;) */
1007 } else {
1008 env_to_regs();
1009 }
1010 } /* for(;;) */
1011
1012
1013#if defined(TARGET_I386)
1014 /* restore flags in standard format */
1015 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
1016#elif defined(TARGET_ARM)
1017 /* XXX: Save/restore host fpu exception state?. */
1018#elif defined(TARGET_SPARC)
1019#elif defined(TARGET_PPC)
1020#elif defined(TARGET_M68K)
1021 cpu_m68k_flush_flags(env, env->cc_op);
1022 env->cc_op = CC_OP_FLAGS;
1023 env->sr = (env->sr & 0xffe0)
1024 | env->cc_dest | (env->cc_x << 4);
1025#elif defined(TARGET_MICROBLAZE)
1026#elif defined(TARGET_MIPS)
1027#elif defined(TARGET_SH4)
1028#elif defined(TARGET_ALPHA)
1029#elif defined(TARGET_CRIS)
1030#elif defined(TARGET_S390X)
1031 /* XXXXX */
1032#else
1033#error unsupported target CPU
1034#endif
1035
1036 /* restore global registers */
1037#include "hostregs_helper.h"
1038
1039 /* fail safe : never use cpu_single_env outside cpu_exec() */
1040 cpu_single_env = NULL;
1041 return ret;
1042}
1043
1044#endif /* !VBOX */
1045
1046/* must only be called from the generated code as an exception can be
1047 generated */
1048void tb_invalidate_page_range(target_ulong start, target_ulong end)
1049{
1050 /* XXX: cannot enable it yet because it yields to MMU exception
1051 where NIP != read address on PowerPC */
1052#if 0
1053 target_ulong phys_addr;
1054 phys_addr = get_phys_addr_code(env, start);
1055 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1056#endif
1057}
1058
1059#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1060
1061void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1062{
1063 CPUX86State *saved_env;
1064
1065 saved_env = env;
1066 env = s;
1067 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1068 selector &= 0xffff;
1069 cpu_x86_load_seg_cache(env, seg_reg, selector,
1070 (selector << 4), 0xffff, 0);
1071 } else {
1072 helper_load_seg(seg_reg, selector);
1073 }
1074 env = saved_env;
1075}
1076
1077void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
1078{
1079 CPUX86State *saved_env;
1080
1081 saved_env = env;
1082 env = s;
1083
1084 helper_fsave(ptr, data32);
1085
1086 env = saved_env;
1087}
1088
1089void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
1090{
1091 CPUX86State *saved_env;
1092
1093 saved_env = env;
1094 env = s;
1095
1096 helper_frstor(ptr, data32);
1097
1098 env = saved_env;
1099}
1100
1101#endif /* TARGET_I386 */
1102
1103#if !defined(CONFIG_SOFTMMU)
1104
1105#if defined(TARGET_I386)
1106#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
1107#else
1108#define EXCEPTION_ACTION cpu_loop_exit()
1109#endif
1110
1111/* 'pc' is the host PC at which the exception was raised. 'address' is
1112 the effective address of the memory exception. 'is_write' is 1 if a
1113 write caused the exception and otherwise 0'. 'old_set' is the
1114 signal set which should be restored */
1115static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1116 int is_write, sigset_t *old_set,
1117 void *puc)
1118{
1119 TranslationBlock *tb;
1120 int ret;
1121
1122 if (cpu_single_env)
1123 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1124#if defined(DEBUG_SIGNAL)
1125 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1126 pc, address, is_write, *(unsigned long *)old_set);
1127#endif
1128 /* XXX: locking issue */
1129 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1130 return 1;
1131 }
1132
1133 /* see if it is an MMU fault */
1134 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1135 if (ret < 0)
1136 return 0; /* not an MMU fault */
1137 if (ret == 0)
1138 return 1; /* the MMU fault was handled without causing real CPU fault */
1139 /* now we have a real cpu fault */
1140 tb = tb_find_pc(pc);
1141 if (tb) {
1142 /* the PC is inside the translated code. It means that we have
1143 a virtual CPU fault */
1144 cpu_restore_state(tb, env, pc, puc);
1145 }
1146
1147 /* we restore the process signal mask as the sigreturn should
1148 do it (XXX: use sigsetjmp) */
1149 sigprocmask(SIG_SETMASK, old_set, NULL);
1150 EXCEPTION_ACTION;
1151
1152 /* never comes here */
1153 return 1;
1154}
1155
1156#if defined(__i386__)
1157
1158#if defined(__APPLE__)
1159# include <sys/ucontext.h>
1160
1161# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1162# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1163# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1164# define MASK_sig(context) ((context)->uc_sigmask)
1165#elif defined (__NetBSD__)
1166# include <ucontext.h>
1167
1168# define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
1169# define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1170# define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1171# define MASK_sig(context) ((context)->uc_sigmask)
1172#elif defined (__FreeBSD__) || defined(__DragonFly__)
1173# include <ucontext.h>
1174
1175# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
1176# define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
1177# define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
1178# define MASK_sig(context) ((context)->uc_sigmask)
1179#elif defined(__OpenBSD__)
1180# define EIP_sig(context) ((context)->sc_eip)
1181# define TRAP_sig(context) ((context)->sc_trapno)
1182# define ERROR_sig(context) ((context)->sc_err)
1183# define MASK_sig(context) ((context)->sc_mask)
1184#else
1185# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1186# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1187# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1188# define MASK_sig(context) ((context)->uc_sigmask)
1189#endif
1190
1191int cpu_signal_handler(int host_signum, void *pinfo,
1192 void *puc)
1193{
1194 siginfo_t *info = pinfo;
1195#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1196 ucontext_t *uc = puc;
1197#elif defined(__OpenBSD__)
1198 struct sigcontext *uc = puc;
1199#else
1200 struct ucontext *uc = puc;
1201#endif
1202 unsigned long pc;
1203 int trapno;
1204
1205#ifndef REG_EIP
1206/* for glibc 2.1 */
1207#define REG_EIP EIP
1208#define REG_ERR ERR
1209#define REG_TRAPNO TRAPNO
1210#endif
1211 pc = EIP_sig(uc);
1212 trapno = TRAP_sig(uc);
1213 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1214 trapno == 0xe ?
1215 (ERROR_sig(uc) >> 1) & 1 : 0,
1216 &MASK_sig(uc), puc);
1217}
1218
1219#elif defined(__x86_64__)
1220
1221#ifdef __NetBSD__
1222#define PC_sig(context) _UC_MACHINE_PC(context)
1223#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1224#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1225#define MASK_sig(context) ((context)->uc_sigmask)
1226#elif defined(__OpenBSD__)
1227#define PC_sig(context) ((context)->sc_rip)
1228#define TRAP_sig(context) ((context)->sc_trapno)
1229#define ERROR_sig(context) ((context)->sc_err)
1230#define MASK_sig(context) ((context)->sc_mask)
1231#elif defined (__FreeBSD__) || defined(__DragonFly__)
1232#include <ucontext.h>
1233
1234#define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
1235#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
1236#define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
1237#define MASK_sig(context) ((context)->uc_sigmask)
1238#else
1239#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1240#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1241#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1242#define MASK_sig(context) ((context)->uc_sigmask)
1243#endif
1244
1245int cpu_signal_handler(int host_signum, void *pinfo,
1246 void *puc)
1247{
1248 siginfo_t *info = pinfo;
1249 unsigned long pc;
1250#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1251 ucontext_t *uc = puc;
1252#elif defined(__OpenBSD__)
1253 struct sigcontext *uc = puc;
1254#else
1255 struct ucontext *uc = puc;
1256#endif
1257
1258 pc = PC_sig(uc);
1259 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1260 TRAP_sig(uc) == 0xe ?
1261 (ERROR_sig(uc) >> 1) & 1 : 0,
1262 &MASK_sig(uc), puc);
1263}
1264
1265#elif defined(_ARCH_PPC)
1266
1267/***********************************************************************
1268 * signal context platform-specific definitions
1269 * From Wine
1270 */
1271#ifdef linux
1272/* All Registers access - only for local access */
1273# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1274/* Gpr Registers access */
1275# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1276# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1277# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1278# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1279# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1280# define LR_sig(context) REG_sig(link, context) /* Link register */
1281# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1282/* Float Registers access */
1283# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1284# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1285/* Exception Registers access */
1286# define DAR_sig(context) REG_sig(dar, context)
1287# define DSISR_sig(context) REG_sig(dsisr, context)
1288# define TRAP_sig(context) REG_sig(trap, context)
1289#endif /* linux */
1290
1291#ifdef __APPLE__
1292# include <sys/ucontext.h>
1293typedef struct ucontext SIGCONTEXT;
1294/* All Registers access - only for local access */
1295# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1296# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1297# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1298# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1299/* Gpr Registers access */
1300# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1301# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1302# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1303# define CTR_sig(context) REG_sig(ctr, context)
1304# define XER_sig(context) REG_sig(xer, context) /* Link register */
1305# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1306# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1307/* Float Registers access */
1308# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1309# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1310/* Exception Registers access */
1311# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1312# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1313# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1314#endif /* __APPLE__ */
1315
1316int cpu_signal_handler(int host_signum, void *pinfo,
1317 void *puc)
1318{
1319 siginfo_t *info = pinfo;
1320 struct ucontext *uc = puc;
1321 unsigned long pc;
1322 int is_write;
1323
1324 pc = IAR_sig(uc);
1325 is_write = 0;
1326#if 0
1327 /* ppc 4xx case */
1328 if (DSISR_sig(uc) & 0x00800000)
1329 is_write = 1;
1330#else
1331 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1332 is_write = 1;
1333#endif
1334 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1335 is_write, &uc->uc_sigmask, puc);
1336}
1337
1338#elif defined(__alpha__)
1339
1340int cpu_signal_handler(int host_signum, void *pinfo,
1341 void *puc)
1342{
1343 siginfo_t *info = pinfo;
1344 struct ucontext *uc = puc;
1345 uint32_t *pc = uc->uc_mcontext.sc_pc;
1346 uint32_t insn = *pc;
1347 int is_write = 0;
1348
1349 /* XXX: need kernel patch to get write flag faster */
1350 switch (insn >> 26) {
1351 case 0x0d: // stw
1352 case 0x0e: // stb
1353 case 0x0f: // stq_u
1354 case 0x24: // stf
1355 case 0x25: // stg
1356 case 0x26: // sts
1357 case 0x27: // stt
1358 case 0x2c: // stl
1359 case 0x2d: // stq
1360 case 0x2e: // stl_c
1361 case 0x2f: // stq_c
1362 is_write = 1;
1363 }
1364
1365 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1366 is_write, &uc->uc_sigmask, puc);
1367}
1368#elif defined(__sparc__)
1369
1370int cpu_signal_handler(int host_signum, void *pinfo,
1371 void *puc)
1372{
1373 siginfo_t *info = pinfo;
1374 int is_write;
1375 uint32_t insn;
1376#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1377 uint32_t *regs = (uint32_t *)(info + 1);
1378 void *sigmask = (regs + 20);
1379 /* XXX: is there a standard glibc define ? */
1380 unsigned long pc = regs[1];
1381#else
1382#ifdef __linux__
1383 struct sigcontext *sc = puc;
1384 unsigned long pc = sc->sigc_regs.tpc;
1385 void *sigmask = (void *)sc->sigc_mask;
1386#elif defined(__OpenBSD__)
1387 struct sigcontext *uc = puc;
1388 unsigned long pc = uc->sc_pc;
1389 void *sigmask = (void *)(long)uc->sc_mask;
1390#endif
1391#endif
1392
1393 /* XXX: need kernel patch to get write flag faster */
1394 is_write = 0;
1395 insn = *(uint32_t *)pc;
1396 if ((insn >> 30) == 3) {
1397 switch((insn >> 19) & 0x3f) {
1398 case 0x05: // stb
1399 case 0x15: // stba
1400 case 0x06: // sth
1401 case 0x16: // stha
1402 case 0x04: // st
1403 case 0x14: // sta
1404 case 0x07: // std
1405 case 0x17: // stda
1406 case 0x0e: // stx
1407 case 0x1e: // stxa
1408 case 0x24: // stf
1409 case 0x34: // stfa
1410 case 0x27: // stdf
1411 case 0x37: // stdfa
1412 case 0x26: // stqf
1413 case 0x36: // stqfa
1414 case 0x25: // stfsr
1415 case 0x3c: // casa
1416 case 0x3e: // casxa
1417 is_write = 1;
1418 break;
1419 }
1420 }
1421 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1422 is_write, sigmask, NULL);
1423}
1424
1425#elif defined(__arm__)
1426
1427int cpu_signal_handler(int host_signum, void *pinfo,
1428 void *puc)
1429{
1430 siginfo_t *info = pinfo;
1431 struct ucontext *uc = puc;
1432 unsigned long pc;
1433 int is_write;
1434
1435#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1436 pc = uc->uc_mcontext.gregs[R15];
1437#else
1438 pc = uc->uc_mcontext.arm_pc;
1439#endif
1440 /* XXX: compute is_write */
1441 is_write = 0;
1442 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1443 is_write,
1444 &uc->uc_sigmask, puc);
1445}
1446
1447#elif defined(__mc68000)
1448
1449int cpu_signal_handler(int host_signum, void *pinfo,
1450 void *puc)
1451{
1452 siginfo_t *info = pinfo;
1453 struct ucontext *uc = puc;
1454 unsigned long pc;
1455 int is_write;
1456
1457 pc = uc->uc_mcontext.gregs[16];
1458 /* XXX: compute is_write */
1459 is_write = 0;
1460 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1461 is_write,
1462 &uc->uc_sigmask, puc);
1463}
1464
1465#elif defined(__ia64)
1466
1467#ifndef __ISR_VALID
1468 /* This ought to be in <bits/siginfo.h>... */
1469# define __ISR_VALID 1
1470#endif
1471
1472int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1473{
1474 siginfo_t *info = pinfo;
1475 struct ucontext *uc = puc;
1476 unsigned long ip;
1477 int is_write = 0;
1478
1479 ip = uc->uc_mcontext.sc_ip;
1480 switch (host_signum) {
1481 case SIGILL:
1482 case SIGFPE:
1483 case SIGSEGV:
1484 case SIGBUS:
1485 case SIGTRAP:
1486 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1487 /* ISR.W (write-access) is bit 33: */
1488 is_write = (info->si_isr >> 33) & 1;
1489 break;
1490
1491 default:
1492 break;
1493 }
1494 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1495 is_write,
1496 &uc->uc_sigmask, puc);
1497}
1498
1499#elif defined(__s390__)
1500
1501int cpu_signal_handler(int host_signum, void *pinfo,
1502 void *puc)
1503{
1504 siginfo_t *info = pinfo;
1505 struct ucontext *uc = puc;
1506 unsigned long pc;
1507 int is_write;
1508
1509 pc = uc->uc_mcontext.psw.addr;
1510 /* XXX: compute is_write */
1511 is_write = 0;
1512 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1513 is_write, &uc->uc_sigmask, puc);
1514}
1515
1516#elif defined(__mips__)
1517
1518int cpu_signal_handler(int host_signum, void *pinfo,
1519 void *puc)
1520{
1521 siginfo_t *info = pinfo;
1522 struct ucontext *uc = puc;
1523 greg_t pc = uc->uc_mcontext.pc;
1524 int is_write;
1525
1526 /* XXX: compute is_write */
1527 is_write = 0;
1528 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1529 is_write, &uc->uc_sigmask, puc);
1530}
1531
1532#elif defined(__hppa__)
1533
1534int cpu_signal_handler(int host_signum, void *pinfo,
1535 void *puc)
1536{
1537 struct siginfo *info = pinfo;
1538 struct ucontext *uc = puc;
1539 unsigned long pc;
1540 int is_write;
1541
1542 pc = uc->uc_mcontext.sc_iaoq[0];
1543 /* FIXME: compute is_write */
1544 is_write = 0;
1545 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1546 is_write,
1547 &uc->uc_sigmask, puc);
1548}
1549
1550#else
1551
1552#error host CPU specific signal handler needed
1553
1554#endif
1555
1556#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette