VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 46310

Last change on this file since 46310 was 43394, checked in by vboxsync, 12 years ago

VMM: HM cleanup.

  • Property svn:eol-style set to native
File size: 52.6 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "config.h"
30#include "exec.h"
31#include "disas.h"
32#include "tcg.h"
33#include "kvm.h"
34#include "qemu-barrier.h"
35
36#if !defined(CONFIG_SOFTMMU)
37#undef EAX
38#undef ECX
39#undef EDX
40#undef EBX
41#undef ESP
42#undef EBP
43#undef ESI
44#undef EDI
45#undef EIP
46#include <signal.h>
47#ifdef __linux__
48#include <sys/ucontext.h>
49#endif
50#endif
51
52#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
53// Work around ugly bugs in glibc that mangle global register contents
54#undef env
55#define env cpu_single_env
56#endif
57
58int tb_invalidated_flag;
59
60//#define CONFIG_DEBUG_EXEC
61//#define DEBUG_SIGNAL
62
63int qemu_cpu_has_work(CPUState *env)
64{
65 return cpu_has_work(env);
66}
67
68void cpu_loop_exit(void)
69{
70 env->current_tb = NULL;
71 longjmp(env->jmp_env, 1);
72}
73
74/* exit the current TB from a signal handler. The host registers are
75 restored in a state compatible with the CPU emulator
76 */
77void cpu_resume_from_signal(CPUState *env1, void *puc)
78{
79#if !defined(CONFIG_SOFTMMU)
80#ifdef __linux__
81 struct ucontext *uc = puc;
82#elif defined(__OpenBSD__)
83 struct sigcontext *uc = puc;
84#endif
85#endif
86
87 env = env1;
88
89 /* XXX: restore cpu registers saved in host registers */
90
91#if !defined(CONFIG_SOFTMMU)
92 if (puc) {
93 /* XXX: use siglongjmp ? */
94#ifdef __linux__
95#ifdef __ia64
96 sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
97#else
98 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
99#endif
100#elif defined(__OpenBSD__)
101 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
102#endif
103 }
104#endif
105 env->exception_index = -1;
106 longjmp(env->jmp_env, 1);
107}
108
109/* Execute the code without caching the generated code. An interpreter
110 could be used if available. */
111static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
112{
113 uintptr_t next_tb;
114 TranslationBlock *tb;
115
116 /* Should never happen.
117 We only end up here when an existing TB is too long. */
118 if (max_cycles > CF_COUNT_MASK)
119 max_cycles = CF_COUNT_MASK;
120
121 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
122 max_cycles);
123 env->current_tb = tb;
124 /* execute the generated code */
125#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
126 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
127#else
128 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
129#endif
130 env->current_tb = NULL;
131
132 if ((next_tb & 3) == 2) {
133 /* Restore PC. This may happen if async event occurs before
134 the TB starts executing. */
135 cpu_pc_from_tb(env, tb);
136 }
137 tb_phys_invalidate(tb, -1);
138 tb_free(tb);
139}
140
141static TranslationBlock *tb_find_slow(target_ulong pc,
142 target_ulong cs_base,
143 uint64_t flags)
144{
145 TranslationBlock *tb, **ptb1;
146 unsigned int h;
147 tb_page_addr_t phys_pc, phys_page1, phys_page2;
148 target_ulong virt_page2;
149
150 tb_invalidated_flag = 0;
151
152 /* find translated block using physical mappings */
153 phys_pc = get_page_addr_code(env, pc);
154 phys_page1 = phys_pc & TARGET_PAGE_MASK;
155 phys_page2 = -1;
156 h = tb_phys_hash_func(phys_pc);
157 ptb1 = &tb_phys_hash[h];
158 for(;;) {
159 tb = *ptb1;
160 if (!tb)
161 goto not_found;
162 if (tb->pc == pc &&
163 tb->page_addr[0] == phys_page1 &&
164 tb->cs_base == cs_base &&
165 tb->flags == flags) {
166 /* check next page if needed */
167 if (tb->page_addr[1] != -1) {
168 virt_page2 = (pc & TARGET_PAGE_MASK) +
169 TARGET_PAGE_SIZE;
170 phys_page2 = get_page_addr_code(env, virt_page2);
171 if (tb->page_addr[1] == phys_page2)
172 goto found;
173 } else {
174 goto found;
175 }
176 }
177 ptb1 = &tb->phys_hash_next;
178 }
179 not_found:
180 /* if no translated code available, then translate it now */
181 tb = tb_gen_code(env, pc, cs_base, flags, 0);
182
183 found:
184 /* we add the TB in the virtual pc hash table */
185 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
186 return tb;
187}
188
189static inline TranslationBlock *tb_find_fast(void)
190{
191 TranslationBlock *tb;
192 target_ulong cs_base, pc;
193 int flags;
194
195 /* we record a subset of the CPU state. It will
196 always be the same before a given translated block
197 is executed. */
198 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
199 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
200 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
201 tb->flags != flags)) {
202 tb = tb_find_slow(pc, cs_base, flags);
203 }
204 return tb;
205}
206
207static CPUDebugExcpHandler *debug_excp_handler;
208
209CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
210{
211 CPUDebugExcpHandler *old_handler = debug_excp_handler;
212
213 debug_excp_handler = handler;
214 return old_handler;
215}
216
217static void cpu_handle_debug_exception(CPUState *env)
218{
219 CPUWatchpoint *wp;
220
221 if (!env->watchpoint_hit)
222 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
223 wp->flags &= ~BP_WATCHPOINT_HIT;
224
225 if (debug_excp_handler)
226 debug_excp_handler(env);
227}
228
229/* main execution loop */
230
231volatile sig_atomic_t exit_request;
232
233int cpu_exec(CPUState *env1)
234{
235 volatile host_reg_t saved_env_reg;
236 int ret VBOX_ONLY(= 0), interrupt_request;
237 TranslationBlock *tb;
238 uint8_t *tc_ptr;
239 uintptr_t next_tb;
240
241# ifndef VBOX
242 if (cpu_halted(env1) == EXCP_HALTED)
243 return EXCP_HALTED;
244# endif /* !VBOX */
245
246 cpu_single_env = env1;
247
248 /* the access to env below is actually saving the global register's
249 value, so that files not including target-xyz/exec.h are free to
250 use it. */
251 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
252 saved_env_reg = (host_reg_t) env;
253 barrier();
254 env = env1;
255
256 if (unlikely(exit_request)) {
257 env->exit_request = 1;
258 }
259
260#if defined(TARGET_I386)
261 if (!kvm_enabled()) {
262 /* put eflags in CPU temporary format */
263 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
264 DF = 1 - (2 * ((env->eflags >> 10) & 1));
265 CC_OP = CC_OP_EFLAGS;
266 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
267 }
268#elif defined(TARGET_SPARC)
269#elif defined(TARGET_M68K)
270 env->cc_op = CC_OP_FLAGS;
271 env->cc_dest = env->sr & 0xf;
272 env->cc_x = (env->sr >> 4) & 1;
273#elif defined(TARGET_ALPHA)
274#elif defined(TARGET_ARM)
275#elif defined(TARGET_PPC)
276#elif defined(TARGET_MICROBLAZE)
277#elif defined(TARGET_MIPS)
278#elif defined(TARGET_SH4)
279#elif defined(TARGET_CRIS)
280#elif defined(TARGET_S390X)
281 /* XXXXX */
282#else
283#error unsupported target CPU
284#endif
285#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
286 env->exception_index = -1;
287#endif /* !VBOX */
288
289 /* prepare setjmp context for exception handling */
290 for(;;) {
291 if (setjmp(env->jmp_env) == 0) {
292#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
293#undef env
294 env = cpu_single_env;
295#define env cpu_single_env
296#endif
297#ifdef VBOX
298 env->current_tb = NULL; /* probably not needed, but whatever... */
299
300 /*
301 * Check for fatal errors first
302 */
303 if (env->interrupt_request & CPU_INTERRUPT_RC) {
304 env->exception_index = EXCP_RC;
305 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
306 ret = env->exception_index;
307 cpu_loop_exit();
308 }
309#endif
310
311 /* if an exception is pending, we execute it here */
312 if (env->exception_index >= 0) {
313 if (env->exception_index >= EXCP_INTERRUPT) {
314 /* exit request from the cpu execution loop */
315 ret = env->exception_index;
316#ifdef VBOX /* because of the above stuff */
317 env->exception_index = -1;
318#endif
319 if (ret == EXCP_DEBUG)
320 cpu_handle_debug_exception(env);
321 break;
322 } else {
323#if defined(CONFIG_USER_ONLY)
324 /* if user mode only, we simulate a fake exception
325 which will be handled outside the cpu execution
326 loop */
327#if defined(TARGET_I386)
328 do_interrupt_user(env->exception_index,
329 env->exception_is_int,
330 env->error_code,
331 env->exception_next_eip);
332 /* successfully delivered */
333 env->old_exception = -1;
334#endif
335 ret = env->exception_index;
336 break;
337#else
338#if defined(TARGET_I386)
339 /* simulate a real cpu exception. On i386, it can
340 trigger new exceptions, but we do not handle
341 double or triple faults yet. */
342# ifdef VBOX
343 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
344 Log(("do_interrupt: vec=%#x int=%d pc=%04x:%RGv\n", env->exception_index, env->exception_is_int,
345 env->segs[R_CS].selector, (RTGCPTR)env->exception_next_eip));
346# endif /* VBOX */
347 do_interrupt(env->exception_index,
348 env->exception_is_int,
349 env->error_code,
350 env->exception_next_eip, 0);
351 /* successfully delivered */
352 env->old_exception = -1;
353# ifdef VBOX
354 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
355# endif /* VBOX */
356#elif defined(TARGET_PPC)
357 do_interrupt(env);
358#elif defined(TARGET_MICROBLAZE)
359 do_interrupt(env);
360#elif defined(TARGET_MIPS)
361 do_interrupt(env);
362#elif defined(TARGET_SPARC)
363 do_interrupt(env);
364#elif defined(TARGET_ARM)
365 do_interrupt(env);
366#elif defined(TARGET_SH4)
367 do_interrupt(env);
368#elif defined(TARGET_ALPHA)
369 do_interrupt(env);
370#elif defined(TARGET_CRIS)
371 do_interrupt(env);
372#elif defined(TARGET_M68K)
373 do_interrupt(0);
374#endif
375 env->exception_index = -1;
376#endif
377 }
378 }
379
380# ifndef VBOX
381 if (kvm_enabled()) {
382 kvm_cpu_exec(env);
383 longjmp(env->jmp_env, 1);
384 }
385# endif /* !VBOX */
386
387 next_tb = 0; /* force lookup of first TB */
388 for(;;) {
389 interrupt_request = env->interrupt_request;
390 if (unlikely(interrupt_request)) {
391 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
392 /* Mask out external interrupts for this step. */
393 interrupt_request &= ~(CPU_INTERRUPT_HARD |
394 CPU_INTERRUPT_FIQ |
395 CPU_INTERRUPT_SMI |
396 CPU_INTERRUPT_NMI);
397 }
398 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
399 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
400 env->exception_index = EXCP_DEBUG;
401 cpu_loop_exit();
402 }
403#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
404 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
405 defined(TARGET_MICROBLAZE)
406 if (interrupt_request & CPU_INTERRUPT_HALT) {
407 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
408 env->halted = 1;
409 env->exception_index = EXCP_HLT;
410 cpu_loop_exit();
411 }
412#endif
413#if defined(TARGET_I386)
414# ifdef VBOX
415 /* Memory registration may post a tlb flush request, process it ASAP. */
416 if (interrupt_request & (CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)) {
417 tlb_flush(env, true); /* (clears the flush flag) */
418 }
419
420 /* Single instruction exec request, we execute it and return (one way or the other).
421 The caller will always reschedule after doing this operation! */
422 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
423 {
424 /* not in flight are we? (if we are, we trapped) */
425 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
426 {
427 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
428 env->exception_index = EXCP_SINGLE_INSTR;
429 if (emulate_single_instr(env) == -1)
430 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip));
431
432 /* When we receive an external interrupt during execution of this single
433 instruction, then we should stay here. We will leave when we're ready
434 for raw-mode or when interrupted by pending EMT requests. */
435 interrupt_request = env->interrupt_request; /* reload this! */
436 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
437 || !(env->eflags & IF_MASK)
438 || (env->hflags & HF_INHIBIT_IRQ_MASK)
439 || (env->state & CPU_RAW_HM)
440 )
441 {
442 env->exception_index = ret = EXCP_SINGLE_INSTR;
443 cpu_loop_exit();
444 }
445 }
446 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
447 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
448# ifdef IEM_VERIFICATION_MODE
449 env->exception_index = ret = EXCP_SINGLE_INSTR;
450 cpu_loop_exit();
451# endif
452 }
453# endif /* VBOX */
454
455# ifndef VBOX /** @todo reconcile our code with the following... */
456 if (interrupt_request & CPU_INTERRUPT_INIT) {
457 svm_check_intercept(SVM_EXIT_INIT);
458 do_cpu_init(env);
459 env->exception_index = EXCP_HALTED;
460 cpu_loop_exit();
461 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
462 do_cpu_sipi(env);
463 } else if (env->hflags2 & HF2_GIF_MASK) {
464 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
465 !(env->hflags & HF_SMM_MASK)) {
466 svm_check_intercept(SVM_EXIT_SMI);
467 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
468 do_smm_enter();
469 next_tb = 0;
470 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
471 !(env->hflags2 & HF2_NMI_MASK)) {
472 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
473 env->hflags2 |= HF2_NMI_MASK;
474 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
475 next_tb = 0;
476 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
477 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
478 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
479 next_tb = 0;
480 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
481 (((env->hflags2 & HF2_VINTR_MASK) &&
482 (env->hflags2 & HF2_HIF_MASK)) ||
483 (!(env->hflags2 & HF2_VINTR_MASK) &&
484 (env->eflags & IF_MASK &&
485 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
486 int intno;
487 svm_check_intercept(SVM_EXIT_INTR);
488 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
489 intno = cpu_get_pic_interrupt(env);
490 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
491#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
492#undef env
493 env = cpu_single_env;
494#define env cpu_single_env
495#endif
496 do_interrupt(intno, 0, 0, 0, 1);
497 /* ensure that no TB jump will be modified as
498 the program flow was changed */
499 next_tb = 0;
500#if !defined(CONFIG_USER_ONLY)
501 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
502 (env->eflags & IF_MASK) &&
503 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
504 int intno;
505 /* FIXME: this should respect TPR */
506 svm_check_intercept(SVM_EXIT_VINTR);
507 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
508 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
509 do_interrupt(intno, 0, 0, 0, 1);
510 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
511 next_tb = 0;
512#endif
513 }
514 }
515# else /* VBOX */
516 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
517 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
518 !(env->hflags & HF_SMM_MASK)) {
519 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
520 do_smm_enter();
521 next_tb = 0;
522 }
523 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
524 (env->eflags & IF_MASK) &&
525 !(env->hflags & HF_INHIBIT_IRQ_MASK))
526 {
527 /* if hardware interrupt pending, we execute it */
528 int intno;
529 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
530 intno = cpu_get_pic_interrupt(env);
531 if (intno >= 0)
532 {
533 Log(("do_interrupt %d\n", intno));
534 do_interrupt(intno, 0, 0, 0, 1);
535 }
536 /* ensure that no TB jump will be modified as
537 the program flow was changed */
538 next_tb = 0;
539 }
540# endif /* VBOX */
541#elif defined(TARGET_PPC)
542#if 0
543 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
544 cpu_reset(env);
545 }
546#endif
547 if (interrupt_request & CPU_INTERRUPT_HARD) {
548 ppc_hw_interrupt(env);
549 if (env->pending_interrupts == 0)
550 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
551 next_tb = 0;
552 }
553#elif defined(TARGET_MICROBLAZE)
554 if ((interrupt_request & CPU_INTERRUPT_HARD)
555 && (env->sregs[SR_MSR] & MSR_IE)
556 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
557 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
558 env->exception_index = EXCP_IRQ;
559 do_interrupt(env);
560 next_tb = 0;
561 }
562#elif defined(TARGET_MIPS)
563 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
564 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
565 (env->CP0_Status & (1 << CP0St_IE)) &&
566 !(env->CP0_Status & (1 << CP0St_EXL)) &&
567 !(env->CP0_Status & (1 << CP0St_ERL)) &&
568 !(env->hflags & MIPS_HFLAG_DM)) {
569 /* Raise it */
570 env->exception_index = EXCP_EXT_INTERRUPT;
571 env->error_code = 0;
572 do_interrupt(env);
573 next_tb = 0;
574 }
575#elif defined(TARGET_SPARC)
576 if (interrupt_request & CPU_INTERRUPT_HARD) {
577 if (cpu_interrupts_enabled(env) &&
578 env->interrupt_index > 0) {
579 int pil = env->interrupt_index & 0xf;
580 int type = env->interrupt_index & 0xf0;
581
582 if (((type == TT_EXTINT) &&
583 cpu_pil_allowed(env, pil)) ||
584 type != TT_EXTINT) {
585 env->exception_index = env->interrupt_index;
586 do_interrupt(env);
587 next_tb = 0;
588 }
589 }
590 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
591 //do_interrupt(0, 0, 0, 0, 0);
592 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
593 }
594#elif defined(TARGET_ARM)
595 if (interrupt_request & CPU_INTERRUPT_FIQ
596 && !(env->uncached_cpsr & CPSR_F)) {
597 env->exception_index = EXCP_FIQ;
598 do_interrupt(env);
599 next_tb = 0;
600 }
601 /* ARMv7-M interrupt return works by loading a magic value
602 into the PC. On real hardware the load causes the
603 return to occur. The qemu implementation performs the
604 jump normally, then does the exception return when the
605 CPU tries to execute code at the magic address.
606 This will cause the magic PC value to be pushed to
607 the stack if an interrupt occured at the wrong time.
608 We avoid this by disabling interrupts when
609 pc contains a magic address. */
610 if (interrupt_request & CPU_INTERRUPT_HARD
611 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
612 || !(env->uncached_cpsr & CPSR_I))) {
613 env->exception_index = EXCP_IRQ;
614 do_interrupt(env);
615 next_tb = 0;
616 }
617#elif defined(TARGET_SH4)
618 if (interrupt_request & CPU_INTERRUPT_HARD) {
619 do_interrupt(env);
620 next_tb = 0;
621 }
622#elif defined(TARGET_ALPHA)
623 if (interrupt_request & CPU_INTERRUPT_HARD) {
624 do_interrupt(env);
625 next_tb = 0;
626 }
627#elif defined(TARGET_CRIS)
628 if (interrupt_request & CPU_INTERRUPT_HARD
629 && (env->pregs[PR_CCS] & I_FLAG)
630 && !env->locked_irq) {
631 env->exception_index = EXCP_IRQ;
632 do_interrupt(env);
633 next_tb = 0;
634 }
635 if (interrupt_request & CPU_INTERRUPT_NMI
636 && (env->pregs[PR_CCS] & M_FLAG)) {
637 env->exception_index = EXCP_NMI;
638 do_interrupt(env);
639 next_tb = 0;
640 }
641#elif defined(TARGET_M68K)
642 if (interrupt_request & CPU_INTERRUPT_HARD
643 && ((env->sr & SR_I) >> SR_I_SHIFT)
644 < env->pending_level) {
645 /* Real hardware gets the interrupt vector via an
646 IACK cycle at this point. Current emulated
647 hardware doesn't rely on this, so we
648 provide/save the vector when the interrupt is
649 first signalled. */
650 env->exception_index = env->pending_vector;
651 do_interrupt(1);
652 next_tb = 0;
653 }
654#endif
655 /* Don't use the cached interupt_request value,
656 do_interrupt may have updated the EXITTB flag. */
657 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
658#ifndef VBOX
659 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
660#else /* VBOX */
661 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
662#endif /* VBOX */
663 /* ensure that no TB jump will be modified as
664 the program flow was changed */
665 next_tb = 0;
666 }
667#ifdef VBOX
668 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
669 if (interrupt_request & CPU_INTERRUPT_RC) {
670 env->exception_index = EXCP_RC;
671 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
672 ret = env->exception_index;
673 cpu_loop_exit();
674 }
675 if (interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT)) {
676 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~(CPU_INTERRUPT_EXTERNAL_EXIT));
677 env->exit_request = 1;
678 }
679#endif
680 }
681 if (unlikely(env->exit_request)) {
682 env->exit_request = 0;
683 env->exception_index = EXCP_INTERRUPT;
684 cpu_loop_exit();
685 }
686
687#ifdef VBOX
688 /*
689 * Check if we the CPU state allows us to execute the code in raw-mode.
690 */
691 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
692 if (remR3CanExecuteRaw(env,
693 env->eip + env->segs[R_CS].base,
694 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
695 &env->exception_index))
696 {
697 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
698 ret = env->exception_index;
699 cpu_loop_exit();
700 }
701 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
702#endif /* VBOX */
703
704#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
705 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
706 /* restore flags in standard format */
707#if defined(TARGET_I386)
708 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
709 log_cpu_state(env, X86_DUMP_CCOP);
710 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
711#elif defined(TARGET_M68K)
712 cpu_m68k_flush_flags(env, env->cc_op);
713 env->cc_op = CC_OP_FLAGS;
714 env->sr = (env->sr & 0xffe0)
715 | env->cc_dest | (env->cc_x << 4);
716 log_cpu_state(env, 0);
717#else
718 log_cpu_state(env, 0);
719#endif
720 }
721#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
722#ifdef VBOX
723 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
724#endif /*VBOX*/
725 spin_lock(&tb_lock);
726 tb = tb_find_fast();
727 /* Note: we do it here to avoid a gcc bug on Mac OS X when
728 doing it in tb_find_slow */
729 if (tb_invalidated_flag) {
730 /* as some TB could have been invalidated because
731 of memory exceptions while generating the code, we
732 must recompute the hash index here */
733 next_tb = 0;
734 tb_invalidated_flag = 0;
735 }
736#ifdef CONFIG_DEBUG_EXEC
737 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
738 (void *)tb->tc_ptr, tb->pc,
739 lookup_symbol(tb->pc));
740#endif
741 /* see if we can patch the calling TB. When the TB
742 spans two pages, we cannot safely do a direct
743 jump. */
744#ifndef VBOX
745 if (next_tb != 0 && tb->page_addr[1] == -1) {
746#else /* VBOX */
747 if (next_tb != 0 && !(tb->cflags & CF_RAW_MODE) && tb->page_addr[1] == -1) {
748#endif /* VBOX */
749 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
750 }
751 spin_unlock(&tb_lock);
752#ifdef VBOX
753 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
754#endif
755
756 /* cpu_interrupt might be called while translating the
757 TB, but before it is linked into a potentially
758 infinite loop and becomes env->current_tb. Avoid
759 starting execution if there is a pending interrupt. */
760 env->current_tb = tb;
761 barrier();
762 if (likely(!env->exit_request)) {
763 tc_ptr = tb->tc_ptr;
764 /* execute the generated code */
765#ifdef VBOX
766 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
767#endif
768#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
769#undef env
770 env = cpu_single_env;
771#define env cpu_single_env
772#endif
773 Log5(("REM: tb=%p tc_ptr=%p %04x:%08RGv\n", tb, tc_ptr, env->segs[R_CS].selector, (RTGCPTR)env->eip));
774#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
775 tcg_qemu_tb_exec(tc_ptr, next_tb);
776#else
777 next_tb = tcg_qemu_tb_exec(tc_ptr);
778#endif
779 if (next_tb)
780 Log5(("REM: next_tb=%p %04x:%08RGv\n", next_tb, env->segs[R_CS].selector, (RTGCPTR)env->eip));
781#ifdef VBOX
782 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
783#endif
784 if ((next_tb & 3) == 2) {
785 /* Instruction counter expired. */
786 int insns_left;
787 tb = (TranslationBlock *)(uintptr_t)(next_tb & ~3);
788 /* Restore PC. */
789 cpu_pc_from_tb(env, tb);
790 insns_left = env->icount_decr.u32;
791 if (env->icount_extra && insns_left >= 0) {
792 /* Refill decrementer and continue execution. */
793 env->icount_extra += insns_left;
794 if (env->icount_extra > 0xffff) {
795 insns_left = 0xffff;
796 } else {
797 insns_left = env->icount_extra;
798 }
799 env->icount_extra -= insns_left;
800 env->icount_decr.u16.low = insns_left;
801 } else {
802 if (insns_left > 0) {
803 /* Execute remaining instructions. */
804 cpu_exec_nocache(insns_left, tb);
805 }
806 env->exception_index = EXCP_INTERRUPT;
807 next_tb = 0;
808 cpu_loop_exit();
809 }
810 }
811 }
812 env->current_tb = NULL;
813 /* reset soft MMU for next block (it can currently
814 only be set by a memory fault) */
815 } /* for(;;) */
816 }
817#ifdef VBOX_HIGH_RES_TIMERS_HACK
818 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
819 unnecessary (like crashing during emulate single instruction).
820 Note! Don't use env1->pVM here, the code wouldn't run with
821 gcc-4.4/amd64 anymore, see #3883. */
822 env->current_tb = NULL;
823 if ( !(env->interrupt_request & ( CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
824 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
825 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
826 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
827 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
828 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
829 TMR3TimerQueuesDo(env->pVM);
830 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
831 }
832#endif
833 } /* for(;;) */
834
835
836#if defined(TARGET_I386)
837 /* restore flags in standard format */
838 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
839#elif defined(TARGET_ARM)
840 /* XXX: Save/restore host fpu exception state?. */
841#elif defined(TARGET_SPARC)
842#elif defined(TARGET_PPC)
843#elif defined(TARGET_M68K)
844 cpu_m68k_flush_flags(env, env->cc_op);
845 env->cc_op = CC_OP_FLAGS;
846 env->sr = (env->sr & 0xffe0)
847 | env->cc_dest | (env->cc_x << 4);
848#elif defined(TARGET_MICROBLAZE)
849#elif defined(TARGET_MIPS)
850#elif defined(TARGET_SH4)
851#elif defined(TARGET_ALPHA)
852#elif defined(TARGET_CRIS)
853#elif defined(TARGET_S390X)
854 /* XXXXX */
855#else
856#error unsupported target CPU
857#endif
858
859 /* restore global registers */
860 barrier();
861 env = (void *) saved_env_reg;
862
863# ifndef VBOX /* we might be using elsewhere, we only have one. */
864 /* fail safe : never use cpu_single_env outside cpu_exec() */
865 cpu_single_env = NULL;
866# endif
867 return ret;
868}
869
870/* must only be called from the generated code as an exception can be
871 generated */
872void tb_invalidate_page_range(target_ulong start, target_ulong end)
873{
874 /* XXX: cannot enable it yet because it yields to MMU exception
875 where NIP != read address on PowerPC */
876#if 0
877 target_ulong phys_addr;
878 phys_addr = get_phys_addr_code(env, start);
879 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
880#endif
881}
882
883#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
884
885void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
886{
887 CPUX86State *saved_env;
888
889 saved_env = env;
890 env = s;
891 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
892 selector &= 0xffff;
893 cpu_x86_load_seg_cache(env, seg_reg, selector,
894 (selector << 4), 0xffff, 0);
895 } else {
896 helper_load_seg(seg_reg, selector);
897 }
898 env = saved_env;
899}
900
901void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
902{
903 CPUX86State *saved_env;
904
905 saved_env = env;
906 env = s;
907
908 helper_fsave(ptr, data32);
909
910 env = saved_env;
911}
912
913void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
914{
915 CPUX86State *saved_env;
916
917 saved_env = env;
918 env = s;
919
920 helper_frstor(ptr, data32);
921
922 env = saved_env;
923}
924
925#endif /* TARGET_I386 */
926
927#if !defined(CONFIG_SOFTMMU)
928
929#if defined(TARGET_I386)
930#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
931#else
932#define EXCEPTION_ACTION cpu_loop_exit()
933#endif
934
935/* 'pc' is the host PC at which the exception was raised. 'address' is
936 the effective address of the memory exception. 'is_write' is 1 if a
937 write caused the exception and otherwise 0'. 'old_set' is the
938 signal set which should be restored */
939static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
940 int is_write, sigset_t *old_set,
941 void *puc)
942{
943 TranslationBlock *tb;
944 int ret;
945
946 if (cpu_single_env)
947 env = cpu_single_env; /* XXX: find a correct solution for multithread */
948#if defined(DEBUG_SIGNAL)
949 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
950 pc, address, is_write, *(unsigned long *)old_set);
951#endif
952 /* XXX: locking issue */
953 if (is_write && page_unprotect(h2g(address), pc, puc)) {
954 return 1;
955 }
956
957 /* see if it is an MMU fault */
958 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
959 if (ret < 0)
960 return 0; /* not an MMU fault */
961 if (ret == 0)
962 return 1; /* the MMU fault was handled without causing real CPU fault */
963 /* now we have a real cpu fault */
964 tb = tb_find_pc(pc);
965 if (tb) {
966 /* the PC is inside the translated code. It means that we have
967 a virtual CPU fault */
968 cpu_restore_state(tb, env, pc, puc);
969 }
970
971 /* we restore the process signal mask as the sigreturn should
972 do it (XXX: use sigsetjmp) */
973 sigprocmask(SIG_SETMASK, old_set, NULL);
974 EXCEPTION_ACTION;
975
976 /* never comes here */
977 return 1;
978}
979
980#if defined(__i386__)
981
982#if defined(__APPLE__)
983# include <sys/ucontext.h>
984
985# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
986# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
987# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
988# define MASK_sig(context) ((context)->uc_sigmask)
989#elif defined (__NetBSD__)
990# include <ucontext.h>
991
992# define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
993# define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
994# define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
995# define MASK_sig(context) ((context)->uc_sigmask)
996#elif defined (__FreeBSD__) || defined(__DragonFly__)
997# include <ucontext.h>
998
999# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
1000# define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
1001# define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
1002# define MASK_sig(context) ((context)->uc_sigmask)
1003#elif defined(__OpenBSD__)
1004# define EIP_sig(context) ((context)->sc_eip)
1005# define TRAP_sig(context) ((context)->sc_trapno)
1006# define ERROR_sig(context) ((context)->sc_err)
1007# define MASK_sig(context) ((context)->sc_mask)
1008#else
1009# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1010# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1011# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1012# define MASK_sig(context) ((context)->uc_sigmask)
1013#endif
1014
1015int cpu_signal_handler(int host_signum, void *pinfo,
1016 void *puc)
1017{
1018 siginfo_t *info = pinfo;
1019#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1020 ucontext_t *uc = puc;
1021#elif defined(__OpenBSD__)
1022 struct sigcontext *uc = puc;
1023#else
1024 struct ucontext *uc = puc;
1025#endif
1026 uintptr_t pc;
1027 int trapno;
1028
1029#ifndef REG_EIP
1030/* for glibc 2.1 */
1031#define REG_EIP EIP
1032#define REG_ERR ERR
1033#define REG_TRAPNO TRAPNO
1034#endif
1035 pc = EIP_sig(uc);
1036 trapno = TRAP_sig(uc);
1037 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1038 trapno == 0xe ?
1039 (ERROR_sig(uc) >> 1) & 1 : 0,
1040 &MASK_sig(uc), puc);
1041}
1042
1043#elif defined(__x86_64__)
1044
1045#ifdef __NetBSD__
1046#define PC_sig(context) _UC_MACHINE_PC(context)
1047#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1048#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1049#define MASK_sig(context) ((context)->uc_sigmask)
1050#elif defined(__OpenBSD__)
1051#define PC_sig(context) ((context)->sc_rip)
1052#define TRAP_sig(context) ((context)->sc_trapno)
1053#define ERROR_sig(context) ((context)->sc_err)
1054#define MASK_sig(context) ((context)->sc_mask)
1055#elif defined (__FreeBSD__) || defined(__DragonFly__)
1056#include <ucontext.h>
1057
1058#define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
1059#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
1060#define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
1061#define MASK_sig(context) ((context)->uc_sigmask)
1062#else
1063#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1064#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1065#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1066#define MASK_sig(context) ((context)->uc_sigmask)
1067#endif
1068
1069int cpu_signal_handler(int host_signum, void *pinfo,
1070 void *puc)
1071{
1072 siginfo_t *info = pinfo;
1073 uintptr_t pc;
1074#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1075 ucontext_t *uc = puc;
1076#elif defined(__OpenBSD__)
1077 struct sigcontext *uc = puc;
1078#else
1079 struct ucontext *uc = puc;
1080#endif
1081
1082 pc = PC_sig(uc);
1083 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1084 TRAP_sig(uc) == 0xe ?
1085 (ERROR_sig(uc) >> 1) & 1 : 0,
1086 &MASK_sig(uc), puc);
1087}
1088
1089#elif defined(_ARCH_PPC)
1090
1091/***********************************************************************
1092 * signal context platform-specific definitions
1093 * From Wine
1094 */
1095#ifdef linux
1096/* All Registers access - only for local access */
1097# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1098/* Gpr Registers access */
1099# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1100# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1101# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1102# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1103# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1104# define LR_sig(context) REG_sig(link, context) /* Link register */
1105# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1106/* Float Registers access */
1107# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1108# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1109/* Exception Registers access */
1110# define DAR_sig(context) REG_sig(dar, context)
1111# define DSISR_sig(context) REG_sig(dsisr, context)
1112# define TRAP_sig(context) REG_sig(trap, context)
1113#endif /* linux */
1114
1115#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1116#include <ucontext.h>
1117# define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
1118# define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
1119# define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
1120# define XER_sig(context) ((context)->uc_mcontext.mc_xer)
1121# define LR_sig(context) ((context)->uc_mcontext.mc_lr)
1122# define CR_sig(context) ((context)->uc_mcontext.mc_cr)
1123/* Exception Registers access */
1124# define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
1125# define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
1126# define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
1127#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
1128
1129#ifdef __APPLE__
1130# include <sys/ucontext.h>
1131typedef struct ucontext SIGCONTEXT;
1132/* All Registers access - only for local access */
1133# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1134# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1135# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1136# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1137/* Gpr Registers access */
1138# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1139# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1140# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1141# define CTR_sig(context) REG_sig(ctr, context)
1142# define XER_sig(context) REG_sig(xer, context) /* Link register */
1143# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1144# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1145/* Float Registers access */
1146# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1147# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1148/* Exception Registers access */
1149# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1150# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1151# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1152#endif /* __APPLE__ */
1153
1154int cpu_signal_handler(int host_signum, void *pinfo,
1155 void *puc)
1156{
1157 siginfo_t *info = pinfo;
1158#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1159 ucontext_t *uc = puc;
1160#else
1161 struct ucontext *uc = puc;
1162#endif
1163 uintptr_t pc;
1164 int is_write;
1165
1166 pc = IAR_sig(uc);
1167 is_write = 0;
1168#if 0
1169 /* ppc 4xx case */
1170 if (DSISR_sig(uc) & 0x00800000)
1171 is_write = 1;
1172#else
1173 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1174 is_write = 1;
1175#endif
1176 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1177 is_write, &uc->uc_sigmask, puc);
1178}
1179
1180#elif defined(__alpha__)
1181
1182int cpu_signal_handler(int host_signum, void *pinfo,
1183 void *puc)
1184{
1185 siginfo_t *info = pinfo;
1186 struct ucontext *uc = puc;
1187 uint32_t *pc = uc->uc_mcontext.sc_pc;
1188 uint32_t insn = *pc;
1189 int is_write = 0;
1190
1191 /* XXX: need kernel patch to get write flag faster */
1192 switch (insn >> 26) {
1193 case 0x0d: // stw
1194 case 0x0e: // stb
1195 case 0x0f: // stq_u
1196 case 0x24: // stf
1197 case 0x25: // stg
1198 case 0x26: // sts
1199 case 0x27: // stt
1200 case 0x2c: // stl
1201 case 0x2d: // stq
1202 case 0x2e: // stl_c
1203 case 0x2f: // stq_c
1204 is_write = 1;
1205 }
1206
1207 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1208 is_write, &uc->uc_sigmask, puc);
1209}
1210#elif defined(__sparc__)
1211
1212int cpu_signal_handler(int host_signum, void *pinfo,
1213 void *puc)
1214{
1215 siginfo_t *info = pinfo;
1216 int is_write;
1217 uint32_t insn;
1218#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1219 uint32_t *regs = (uint32_t *)(info + 1);
1220 void *sigmask = (regs + 20);
1221 /* XXX: is there a standard glibc define ? */
1222 uintptr_t pc = regs[1];
1223#else
1224#ifdef __linux__
1225 struct sigcontext *sc = puc;
1226 uintptr_t pc = sc->sigc_regs.tpc;
1227 void *sigmask = (void *)sc->sigc_mask;
1228#elif defined(__OpenBSD__)
1229 struct sigcontext *uc = puc;
1230 uintptr_t pc = uc->sc_pc;
1231 void *sigmask = (void *)(uintptr_t)uc->sc_mask;
1232#endif
1233#endif
1234
1235 /* XXX: need kernel patch to get write flag faster */
1236 is_write = 0;
1237 insn = *(uint32_t *)pc;
1238 if ((insn >> 30) == 3) {
1239 switch((insn >> 19) & 0x3f) {
1240 case 0x05: // stb
1241 case 0x15: // stba
1242 case 0x06: // sth
1243 case 0x16: // stha
1244 case 0x04: // st
1245 case 0x14: // sta
1246 case 0x07: // std
1247 case 0x17: // stda
1248 case 0x0e: // stx
1249 case 0x1e: // stxa
1250 case 0x24: // stf
1251 case 0x34: // stfa
1252 case 0x27: // stdf
1253 case 0x37: // stdfa
1254 case 0x26: // stqf
1255 case 0x36: // stqfa
1256 case 0x25: // stfsr
1257 case 0x3c: // casa
1258 case 0x3e: // casxa
1259 is_write = 1;
1260 break;
1261 }
1262 }
1263 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1264 is_write, sigmask, NULL);
1265}
1266
1267#elif defined(__arm__)
1268
1269int cpu_signal_handler(int host_signum, void *pinfo,
1270 void *puc)
1271{
1272 siginfo_t *info = pinfo;
1273 struct ucontext *uc = puc;
1274 uintptr_t pc;
1275 int is_write;
1276
1277#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1278 pc = uc->uc_mcontext.gregs[R15];
1279#else
1280 pc = uc->uc_mcontext.arm_pc;
1281#endif
1282 /* XXX: compute is_write */
1283 is_write = 0;
1284 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1285 is_write,
1286 &uc->uc_sigmask, puc);
1287}
1288
1289#elif defined(__mc68000)
1290
1291int cpu_signal_handler(int host_signum, void *pinfo,
1292 void *puc)
1293{
1294 siginfo_t *info = pinfo;
1295 struct ucontext *uc = puc;
1296 uintptr_t pc;
1297 int is_write;
1298
1299 pc = uc->uc_mcontext.gregs[16];
1300 /* XXX: compute is_write */
1301 is_write = 0;
1302 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1303 is_write,
1304 &uc->uc_sigmask, puc);
1305}
1306
1307#elif defined(__ia64)
1308
1309#ifndef __ISR_VALID
1310 /* This ought to be in <bits/siginfo.h>... */
1311# define __ISR_VALID 1
1312#endif
1313
1314int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1315{
1316 siginfo_t *info = pinfo;
1317 struct ucontext *uc = puc;
1318 uintptr_t ip;
1319 int is_write = 0;
1320
1321 ip = uc->uc_mcontext.sc_ip;
1322 switch (host_signum) {
1323 case SIGILL:
1324 case SIGFPE:
1325 case SIGSEGV:
1326 case SIGBUS:
1327 case SIGTRAP:
1328 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1329 /* ISR.W (write-access) is bit 33: */
1330 is_write = (info->si_isr >> 33) & 1;
1331 break;
1332
1333 default:
1334 break;
1335 }
1336 return handle_cpu_signal(ip, (uintptr_t)info->si_addr,
1337 is_write,
1338 (sigset_t *)&uc->uc_sigmask, puc);
1339}
1340
1341#elif defined(__s390__)
1342
1343int cpu_signal_handler(int host_signum, void *pinfo,
1344 void *puc)
1345{
1346 siginfo_t *info = pinfo;
1347 struct ucontext *uc = puc;
1348 uintptr_t pc;
1349 uint16_t *pinsn;
1350 int is_write = 0;
1351
1352 pc = uc->uc_mcontext.psw.addr;
1353
1354 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1355 of the normal 2 arguments. The 3rd argument contains the "int_code"
1356 from the hardware which does in fact contain the is_write value.
1357 The rt signal handler, as far as I can tell, does not give this value
1358 at all. Not that we could get to it from here even if it were. */
1359 /* ??? This is not even close to complete, since it ignores all
1360 of the read-modify-write instructions. */
1361 pinsn = (uint16_t *)pc;
1362 switch (pinsn[0] >> 8) {
1363 case 0x50: /* ST */
1364 case 0x42: /* STC */
1365 case 0x40: /* STH */
1366 is_write = 1;
1367 break;
1368 case 0xc4: /* RIL format insns */
1369 switch (pinsn[0] & 0xf) {
1370 case 0xf: /* STRL */
1371 case 0xb: /* STGRL */
1372 case 0x7: /* STHRL */
1373 is_write = 1;
1374 }
1375 break;
1376 case 0xe3: /* RXY format insns */
1377 switch (pinsn[2] & 0xff) {
1378 case 0x50: /* STY */
1379 case 0x24: /* STG */
1380 case 0x72: /* STCY */
1381 case 0x70: /* STHY */
1382 case 0x8e: /* STPQ */
1383 case 0x3f: /* STRVH */
1384 case 0x3e: /* STRV */
1385 case 0x2f: /* STRVG */
1386 is_write = 1;
1387 }
1388 break;
1389 }
1390 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1391 is_write, &uc->uc_sigmask, puc);
1392}
1393
1394#elif defined(__mips__)
1395
1396int cpu_signal_handler(int host_signum, void *pinfo,
1397 void *puc)
1398{
1399 siginfo_t *info = pinfo;
1400 struct ucontext *uc = puc;
1401 greg_t pc = uc->uc_mcontext.pc;
1402 int is_write;
1403
1404 /* XXX: compute is_write */
1405 is_write = 0;
1406 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1407 is_write, &uc->uc_sigmask, puc);
1408}
1409
1410#elif defined(__hppa__)
1411
1412int cpu_signal_handler(int host_signum, void *pinfo,
1413 void *puc)
1414{
1415 struct siginfo *info = pinfo;
1416 struct ucontext *uc = puc;
1417 uintptr_t pc = uc->uc_mcontext.sc_iaoq[0];
1418 uint32_t insn = *(uint32_t *)pc;
1419 int is_write = 0;
1420
1421 /* XXX: need kernel patch to get write flag faster. */
1422 switch (insn >> 26) {
1423 case 0x1a: /* STW */
1424 case 0x19: /* STH */
1425 case 0x18: /* STB */
1426 case 0x1b: /* STWM */
1427 is_write = 1;
1428 break;
1429
1430 case 0x09: /* CSTWX, FSTWX, FSTWS */
1431 case 0x0b: /* CSTDX, FSTDX, FSTDS */
1432 /* Distinguish from coprocessor load ... */
1433 is_write = (insn >> 9) & 1;
1434 break;
1435
1436 case 0x03:
1437 switch ((insn >> 6) & 15) {
1438 case 0xa: /* STWS */
1439 case 0x9: /* STHS */
1440 case 0x8: /* STBS */
1441 case 0xe: /* STWAS */
1442 case 0xc: /* STBYS */
1443 is_write = 1;
1444 }
1445 break;
1446 }
1447
1448 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1449 is_write, &uc->uc_sigmask, puc);
1450}
1451
1452#else
1453
1454#error host CPU specific signal handler needed
1455
1456#endif
1457
1458#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette