VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 9663

Last change on this file since 9663 was 9541, checked in by vboxsync, 17 years ago

Loggign changes

  • Property svn:eol-style set to native
File size: 69.1 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#include "exec.h"
22#include "disas.h"
23
24#if !defined(CONFIG_SOFTMMU)
25#undef EAX
26#undef ECX
27#undef EDX
28#undef EBX
29#undef ESP
30#undef EBP
31#undef ESI
32#undef EDI
33#undef EIP
34#include <signal.h>
35#include <sys/ucontext.h>
36#endif
37
38int tb_invalidated_flag;
39
40//#define DEBUG_EXEC
41//#define DEBUG_SIGNAL
42
43#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_M68K)
44/* XXX: unify with i386 target */
45void cpu_loop_exit(void)
46{
47 longjmp(env->jmp_env, 1);
48}
49#endif
50#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
51#define reg_T2
52#endif
53
54/* exit the current TB from a signal handler. The host registers are
55 restored in a state compatible with the CPU emulator
56 */
57void cpu_resume_from_signal(CPUState *env1, void *puc)
58{
59#if !defined(CONFIG_SOFTMMU)
60 struct ucontext *uc = puc;
61#endif
62
63 env = env1;
64
65 /* XXX: restore cpu registers saved in host registers */
66
67#if !defined(CONFIG_SOFTMMU)
68 if (puc) {
69 /* XXX: use siglongjmp ? */
70 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
71 }
72#endif
73 longjmp(env->jmp_env, 1);
74}
75
76
77static TranslationBlock *tb_find_slow(target_ulong pc,
78 target_ulong cs_base,
79 unsigned int flags)
80{
81 TranslationBlock *tb, **ptb1;
82 int code_gen_size;
83 unsigned int h;
84 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
85 uint8_t *tc_ptr;
86
87 spin_lock(&tb_lock);
88
89 tb_invalidated_flag = 0;
90
91 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
92
93 /* find translated block using physical mappings */
94 phys_pc = get_phys_addr_code(env, pc);
95 phys_page1 = phys_pc & TARGET_PAGE_MASK;
96 phys_page2 = -1;
97 h = tb_phys_hash_func(phys_pc);
98 ptb1 = &tb_phys_hash[h];
99 for(;;) {
100 tb = *ptb1;
101 if (!tb)
102 goto not_found;
103 if (tb->pc == pc &&
104 tb->page_addr[0] == phys_page1 &&
105 tb->cs_base == cs_base &&
106 tb->flags == flags) {
107 /* check next page if needed */
108 if (tb->page_addr[1] != -1) {
109 virt_page2 = (pc & TARGET_PAGE_MASK) +
110 TARGET_PAGE_SIZE;
111 phys_page2 = get_phys_addr_code(env, virt_page2);
112 if (tb->page_addr[1] == phys_page2)
113 goto found;
114 } else {
115 goto found;
116 }
117 }
118 ptb1 = &tb->phys_hash_next;
119 }
120 not_found:
121 /* if no translated code available, then translate it now */
122 tb = tb_alloc(pc);
123 if (!tb) {
124 /* flush must be done */
125 tb_flush(env);
126 /* cannot fail at this point */
127 tb = tb_alloc(pc);
128 /* don't forget to invalidate previous TB info */
129 tb_invalidated_flag = 1;
130 }
131 tc_ptr = code_gen_ptr;
132 tb->tc_ptr = tc_ptr;
133 tb->cs_base = cs_base;
134 tb->flags = flags;
135 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
136 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
137
138 /* check next page if needed */
139 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
140 phys_page2 = -1;
141 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
142 phys_page2 = get_phys_addr_code(env, virt_page2);
143 }
144 tb_link_phys(tb, phys_pc, phys_page2);
145
146 found:
147 /* we add the TB in the virtual pc hash table */
148 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
149 spin_unlock(&tb_lock);
150 return tb;
151}
152
153static inline TranslationBlock *tb_find_fast(void)
154{
155 TranslationBlock *tb;
156 target_ulong cs_base, pc;
157 unsigned int flags;
158
159 /* we record a subset of the CPU state. It will
160 always be the same before a given translated block
161 is executed. */
162#if defined(TARGET_I386)
163 flags = env->hflags;
164 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
165 cs_base = env->segs[R_CS].base;
166 pc = cs_base + env->eip;
167#elif defined(TARGET_ARM)
168 flags = env->thumb | (env->vfp.vec_len << 1)
169 | (env->vfp.vec_stride << 4);
170 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
171 flags |= (1 << 6);
172 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
173 flags |= (1 << 7);
174 cs_base = 0;
175 pc = env->regs[15];
176#elif defined(TARGET_SPARC)
177#ifdef TARGET_SPARC64
178 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
179 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
180 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
181#else
182 // FPU enable . MMU enabled . MMU no-fault . Supervisor
183 flags = (env->psref << 3) | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1)
184 | env->psrs;
185#endif
186 cs_base = env->npc;
187 pc = env->pc;
188#elif defined(TARGET_PPC)
189 flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |
190 (msr_se << MSR_SE) | (msr_le << MSR_LE);
191 cs_base = 0;
192 pc = env->nip;
193#elif defined(TARGET_MIPS)
194 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
195 cs_base = 0;
196 pc = env->PC;
197#elif defined(TARGET_M68K)
198 flags = env->fpcr & M68K_FPCR_PREC;
199 cs_base = 0;
200 pc = env->pc;
201#elif defined(TARGET_SH4)
202 flags = env->sr & (SR_MD | SR_RB);
203 cs_base = 0; /* XXXXX */
204 pc = env->pc;
205#else
206#error unsupported CPU
207#endif
208 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
209 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
210 tb->flags != flags, 0)) {
211 tb = tb_find_slow(pc, cs_base, flags);
212 /* Note: we do it here to avoid a gcc bug on Mac OS X when
213 doing it in tb_find_slow */
214 if (tb_invalidated_flag) {
215 /* as some TB could have been invalidated because
216 of memory exceptions while generating the code, we
217 must recompute the hash index here */
218 T0 = 0;
219 }
220 }
221 return tb;
222}
223
224
225/* main execution loop */
226
227#ifdef VBOX
228
229int cpu_exec(CPUState *env1)
230{
231#define DECLARE_HOST_REGS 1
232#include "hostregs_helper.h"
233 int ret, interrupt_request;
234 void (*gen_func)(void);
235 TranslationBlock *tb;
236 uint8_t *tc_ptr;
237
238#if defined(TARGET_I386)
239 /* handle exit of HALTED state */
240 if (env1->hflags & HF_HALTED_MASK) {
241 /* disable halt condition */
242 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
243 (env1->eflags & IF_MASK)) {
244 env1->hflags &= ~HF_HALTED_MASK;
245 } else {
246 return EXCP_HALTED;
247 }
248 }
249#elif defined(TARGET_PPC)
250 if (env1->halted) {
251 if (env1->msr[MSR_EE] &&
252 (env1->interrupt_request &
253 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER))) {
254 env1->halted = 0;
255 } else {
256 return EXCP_HALTED;
257 }
258 }
259#elif defined(TARGET_SPARC)
260 if (env1->halted) {
261 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
262 (env1->psret != 0)) {
263 env1->halted = 0;
264 } else {
265 return EXCP_HALTED;
266 }
267 }
268#elif defined(TARGET_ARM)
269 if (env1->halted) {
270 /* An interrupt wakes the CPU even if the I and F CPSR bits are
271 set. */
272 if (env1->interrupt_request
273 & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) {
274 env1->halted = 0;
275 } else {
276 return EXCP_HALTED;
277 }
278 }
279#elif defined(TARGET_MIPS)
280 if (env1->halted) {
281 if (env1->interrupt_request &
282 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) {
283 env1->halted = 0;
284 } else {
285 return EXCP_HALTED;
286 }
287 }
288#endif
289
290 cpu_single_env = env1;
291
292 /* first we save global registers */
293#define SAVE_HOST_REGS 1
294#include "hostregs_helper.h"
295 env = env1;
296#if defined(__sparc__) && !defined(HOST_SOLARIS)
297 /* we also save i7 because longjmp may not restore it */
298 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
299#endif
300
301#if defined(TARGET_I386)
302
303 env_to_regs();
304 /* put eflags in CPU temporary format */
305 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
306 DF = 1 - (2 * ((env->eflags >> 10) & 1));
307 CC_OP = CC_OP_EFLAGS;
308 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
309#elif defined(TARGET_ARM)
310#elif defined(TARGET_SPARC)
311#if defined(reg_REGWPTR)
312 saved_regwptr = REGWPTR;
313#endif
314#elif defined(TARGET_PPC)
315#elif defined(TARGET_MIPS)
316#elif defined(TARGET_SH4)
317 /* XXXXX */
318#else
319#error unsupported target CPU
320#endif
321#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
322 env->exception_index = -1;
323#endif
324
325 /* prepare setjmp context for exception handling */
326 for(;;) {
327 if (setjmp(env->jmp_env) == 0)
328 {
329 env->current_tb = NULL;
330 VMMR3Unlock(env->pVM);
331 VMMR3Lock(env->pVM);
332
333 /*
334 * Check for fatal errors first
335 */
336 if (env->interrupt_request & CPU_INTERRUPT_RC) {
337 env->exception_index = EXCP_RC;
338 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
339 ret = env->exception_index;
340 cpu_loop_exit();
341 }
342
343 /* if an exception is pending, we execute it here */
344 if (env->exception_index >= 0) {
345 Assert(!env->user_mode_only);
346 if (env->exception_index >= EXCP_INTERRUPT) {
347 /* exit request from the cpu execution loop */
348 ret = env->exception_index;
349 break;
350 } else {
351 /* simulate a real cpu exception. On i386, it can
352 trigger new exceptions, but we do not handle
353 double or triple faults yet. */
354 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
355 Log(("do_interrupt %d %d %08x\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
356 do_interrupt(env->exception_index,
357 env->exception_is_int,
358 env->error_code,
359 env->exception_next_eip, 0);
360 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
361 }
362 env->exception_index = -1;
363 }
364
365 T0 = 0; /* force lookup of first TB */
366 for(;;)
367 {
368 interrupt_request = env->interrupt_request;
369 if (__builtin_expect(interrupt_request, 0))
370 {
371 /* Single instruction exec request, we execute it and return (one way or the other).
372 The caller will always reschedule after doing this operation! */
373 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
374 {
375 /* not in flight are we? (if we are, we trapped) */
376 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
377 {
378 ASMAtomicOrS32(&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
379 env->exception_index = EXCP_SINGLE_INSTR;
380 if (emulate_single_instr(env) == -1)
381 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%08x!!\n", env->eip));
382
383 /* When we receive an external interrupt during execution of this single
384 instruction, then we should stay here. We will leave when we're ready
385 for raw-mode or when interrupted by pending EMT requests. */
386 interrupt_request = env->interrupt_request; /* reload this! */
387 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
388 || !(env->eflags & IF_MASK)
389 || (env->hflags & HF_INHIBIT_IRQ_MASK)
390 || (env->state & CPU_RAW_HWACC)
391 )
392 {
393 env->exception_index = ret = EXCP_SINGLE_INSTR;
394 cpu_loop_exit();
395 }
396 }
397 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
398 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
399 }
400
401 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
402 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
403 !(env->hflags & HF_SMM_MASK)) {
404 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
405 do_smm_enter();
406 T0 = 0;
407 }
408 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
409 (env->eflags & IF_MASK) &&
410 !(env->hflags & HF_INHIBIT_IRQ_MASK))
411 {
412 /* if hardware interrupt pending, we execute it */
413 int intno;
414 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_HARD);
415 intno = cpu_get_pic_interrupt(env);
416 if (intno >= 0)
417 {
418 Log(("do_interrupt %d\n", intno));
419 do_interrupt(intno, 0, 0, 0, 1);
420 }
421 /* ensure that no TB jump will be modified as
422 the program flow was changed */
423 T0 = 0;
424 }
425 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
426 {
427 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
428 /* ensure that no TB jump will be modified as
429 the program flow was changed */
430 T0 = 0;
431 }
432 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
433 if (interrupt_request & CPU_INTERRUPT_EXIT)
434 {
435 env->exception_index = EXCP_INTERRUPT;
436 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
437 ret = env->exception_index;
438 cpu_loop_exit();
439 }
440 if (interrupt_request & CPU_INTERRUPT_RC)
441 {
442 env->exception_index = EXCP_RC;
443 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
444 ret = env->exception_index;
445 cpu_loop_exit();
446 }
447 }
448
449 /*
450 * Check if we the CPU state allows us to execute the code in raw-mode.
451 */
452 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
453 if (remR3CanExecuteRaw(env,
454 env->eip + env->segs[R_CS].base,
455 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
456 &env->exception_index))
457 {
458 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
459 ret = env->exception_index;
460 cpu_loop_exit();
461 }
462 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
463
464 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
465 tb = tb_find_fast();
466
467 /* see if we can patch the calling TB. When the TB
468 spans two pages, we cannot safely do a direct
469 jump. */
470 if (T0 != 0
471 && !(tb->cflags & CF_RAW_MODE)
472 && tb->page_addr[1] == -1)
473 {
474 spin_lock(&tb_lock);
475 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
476 spin_unlock(&tb_lock);
477 }
478 tc_ptr = tb->tc_ptr;
479 env->current_tb = tb;
480 /* execute the generated code */
481 gen_func = (void *)tc_ptr;
482 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
483
484#if defined(DEBUG) && defined(VBOX) && !defined(DEBUG_dmik)
485#if !defined(DEBUG_bird)
486 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
487 {
488 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
489 {
490 Log(("EMR0: %VGv ESP=%VGv IF=%d TF=%d CPL=%d\n", env->eip, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3));
491 }
492 }
493 else
494 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
495 {
496 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
497 {
498 if(env->eflags & VM_MASK)
499 {
500 Log(("EMV86: %04X:%04X IF=%d TF=%d CPL=%d CR0=%08X\n", env->segs[R_CS].selector, env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0]));
501 }
502 else
503 {
504 Log(("EMR3: %VGv ESP=%VGv IF=%d TF=%d CPL=%d IOPL=%d CR0=%08X\n", env->eip, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, ((env->eflags >> IOPL_SHIFT) & 3), env->cr[0]));
505 }
506 }
507 }
508 else
509 {
510 /* Seriously slows down realmode booting. */
511 LogFlow(("EMRM: %04X:%08X SS:ESP=%04X:%08X IF=%d TF=%d CPL=%d PE=%d PG=%d\n", env->segs[R_CS].selector, env->eip, env->segs[R_SS].selector, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0] & X86_CR0_PE, env->cr[0] & X86_CR0_PG));
512 }
513#endif /* !DEBUG_bird */
514 if(env->state & CPU_EMULATE_SINGLE_STEP)
515 {
516#ifdef DEBUG_bird
517 static int s_cTimes = 0;
518 if (s_cTimes++ > 1000000)
519 {
520 RTLogPrintf("Enough stepping!\n");
521 #if 0
522 env->exception_index = EXCP_DEBUG;
523 ret = env->exception_index;
524 cpu_loop_exit();
525 #else
526 env->state &= ~CPU_EMULATE_SINGLE_STEP;
527 #endif
528 }
529#endif
530 TMCpuTickPause(env->pVM);
531 remR3DisasInstr(env, -1, NULL);
532 TMCpuTickResume(env->pVM);
533 if(emulate_single_instr(env) == -1)
534 {
535 Log(("emulate_single_instr failed for EIP=%08X!!\n", env->eip));
536 }
537 }
538 else
539 {
540 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
541 gen_func();
542 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
543 }
544#else /* !DEBUG || !VBOX || DEBUG_dmik */
545
546 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
547 gen_func();
548 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
549
550#endif /* !DEBUG || !VBOX || DEBUG_dmik */
551 env->current_tb = NULL;
552 /* reset soft MMU for next block (it can currently
553 only be set by a memory fault) */
554#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
555 if (env->hflags & HF_SOFTMMU_MASK) {
556 env->hflags &= ~HF_SOFTMMU_MASK;
557 /* do not allow linking to another block */
558 T0 = 0;
559 }
560#endif
561 }
562 } else {
563 env_to_regs();
564 }
565#ifdef VBOX_HIGH_RES_TIMERS_HACK
566 /* NULL the current_tb here so cpu_interrupt() doesn't do
567 anything unnecessary (like crashing during emulate single instruction). */
568 env->current_tb = NULL;
569 TMTimerPoll(env1->pVM);
570#endif
571 } /* for(;;) */
572
573#if defined(TARGET_I386)
574 /* restore flags in standard format */
575 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
576#else
577#error unsupported target CPU
578#endif
579#include "hostregs_helper.h"
580 return ret;
581}
582
583
584#else /* !VBOX */
585
586
587int cpu_exec(CPUState *env1)
588{
589#define DECLARE_HOST_REGS 1
590#include "hostregs_helper.h"
591#if defined(__sparc__) && !defined(HOST_SOLARIS)
592 int saved_i7;
593 target_ulong tmp_T0;
594#endif
595 int ret, interrupt_request;
596 void (*gen_func)(void);
597 TranslationBlock *tb;
598 uint8_t *tc_ptr;
599
600#if defined(TARGET_I386)
601 /* handle exit of HALTED state */
602 if (env1->hflags & HF_HALTED_MASK) {
603 /* disable halt condition */
604 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
605 (env1->eflags & IF_MASK)) {
606 env1->hflags &= ~HF_HALTED_MASK;
607 } else {
608 return EXCP_HALTED;
609 }
610 }
611#elif defined(TARGET_PPC)
612 if (env1->halted) {
613 if (env1->msr[MSR_EE] &&
614 (env1->interrupt_request &
615 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER))) {
616 env1->halted = 0;
617 } else {
618 return EXCP_HALTED;
619 }
620 }
621#elif defined(TARGET_SPARC)
622 if (env1->halted) {
623 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
624 (env1->psret != 0)) {
625 env1->halted = 0;
626 } else {
627 return EXCP_HALTED;
628 }
629 }
630#elif defined(TARGET_ARM)
631 if (env1->halted) {
632 /* An interrupt wakes the CPU even if the I and F CPSR bits are
633 set. */
634 if (env1->interrupt_request
635 & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) {
636 env1->halted = 0;
637 } else {
638 return EXCP_HALTED;
639 }
640 }
641#elif defined(TARGET_MIPS)
642 if (env1->halted) {
643 if (env1->interrupt_request &
644 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) {
645 env1->halted = 0;
646 } else {
647 return EXCP_HALTED;
648 }
649 }
650#endif
651
652 cpu_single_env = env1;
653
654 /* first we save global registers */
655#define SAVE_HOST_REGS 1
656#include "hostregs_helper.h"
657 env = env1;
658#if defined(__sparc__) && !defined(HOST_SOLARIS)
659 /* we also save i7 because longjmp may not restore it */
660 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
661#endif
662
663#if defined(TARGET_I386)
664 env_to_regs();
665 /* put eflags in CPU temporary format */
666 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
667 DF = 1 - (2 * ((env->eflags >> 10) & 1));
668 CC_OP = CC_OP_EFLAGS;
669 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
670#elif defined(TARGET_ARM)
671#elif defined(TARGET_SPARC)
672#if defined(reg_REGWPTR)
673 saved_regwptr = REGWPTR;
674#endif
675#elif defined(TARGET_PPC)
676#elif defined(TARGET_M68K)
677 env->cc_op = CC_OP_FLAGS;
678 env->cc_dest = env->sr & 0xf;
679 env->cc_x = (env->sr >> 4) & 1;
680#elif defined(TARGET_MIPS)
681#elif defined(TARGET_SH4)
682 /* XXXXX */
683#else
684#error unsupported target CPU
685#endif
686#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
687 env->exception_index = -1;
688#endif
689
690 /* prepare setjmp context for exception handling */
691 for(;;) {
692 if (setjmp(env->jmp_env) == 0) {
693 env->current_tb = NULL;
694#ifdef VBOX
695 VMMR3Unlock(env->pVM);
696 VMMR3Lock(env->pVM);
697
698 /* Check for high priority requests first (like fatal
699 errors). */
700 if (env->interrupt_request & CPU_INTERRUPT_RC) {
701 env->exception_index = EXCP_RC;
702 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
703 ret = env->exception_index;
704 cpu_loop_exit();
705 }
706#endif /* VBOX */
707
708
709 /* if an exception is pending, we execute it here */
710 if (env->exception_index >= 0) {
711 if (env->exception_index >= EXCP_INTERRUPT) {
712 /* exit request from the cpu execution loop */
713 ret = env->exception_index;
714 break;
715 } else if (env->user_mode_only) {
716 /* if user mode only, we simulate a fake exception
717 which will be handled outside the cpu execution
718 loop */
719#if defined(TARGET_I386)
720 do_interrupt_user(env->exception_index,
721 env->exception_is_int,
722 env->error_code,
723 env->exception_next_eip);
724#endif
725 ret = env->exception_index;
726 break;
727 } else {
728#if defined(TARGET_I386)
729 /* simulate a real cpu exception. On i386, it can
730 trigger new exceptions, but we do not handle
731 double or triple faults yet. */
732 do_interrupt(env->exception_index,
733 env->exception_is_int,
734 env->error_code,
735 env->exception_next_eip, 0);
736#elif defined(TARGET_PPC)
737 do_interrupt(env);
738#elif defined(TARGET_MIPS)
739 do_interrupt(env);
740#elif defined(TARGET_SPARC)
741 do_interrupt(env->exception_index);
742#elif defined(TARGET_ARM)
743 do_interrupt(env);
744#elif defined(TARGET_SH4)
745 do_interrupt(env);
746#endif
747 }
748 env->exception_index = -1;
749 }
750#ifdef USE_KQEMU
751 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
752 int ret;
753 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
754 ret = kqemu_cpu_exec(env);
755 /* put eflags in CPU temporary format */
756 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
757 DF = 1 - (2 * ((env->eflags >> 10) & 1));
758 CC_OP = CC_OP_EFLAGS;
759 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
760 if (ret == 1) {
761 /* exception */
762 longjmp(env->jmp_env, 1);
763 } else if (ret == 2) {
764 /* softmmu execution needed */
765 } else {
766 if (env->interrupt_request != 0) {
767 /* hardware interrupt will be executed just after */
768 } else {
769 /* otherwise, we restart */
770 longjmp(env->jmp_env, 1);
771 }
772 }
773 }
774#endif
775
776 T0 = 0; /* force lookup of first TB */
777 for(;;) {
778#if defined(__sparc__) && !defined(HOST_SOLARIS)
779 /* g1 can be modified by some libc? functions */
780 tmp_T0 = T0;
781#endif
782 interrupt_request = env->interrupt_request;
783 if (__builtin_expect(interrupt_request, 0)) {
784#ifdef VBOX
785 /* Single instruction exec request, we execute it and return (one way or the other).
786 The caller will always reschedule after doing this operation! */
787 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
788 {
789 /* not in flight are we? */
790 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
791 {
792 ASMAtomicOrS32(&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
793 env->exception_index = EXCP_SINGLE_INSTR;
794 if (emulate_single_instr(env) == -1)
795 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%08x!!\n", env->eip));
796
797 /* When we receive an external interrupt during execution of this single
798 instruction, then we should stay here. We will leave when we're ready
799 for raw-mode or when interrupted by pending EMT requests. */
800 interrupt_request = env->interrupt_request; /* reload this! */
801 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
802 || !(env->eflags & IF_MASK)
803 || (env->hflags & HF_INHIBIT_IRQ_MASK)
804 )
805 {
806 env->exception_index = ret = EXCP_SINGLE_INSTR;
807 cpu_loop_exit();
808 }
809 }
810 env->exception_index = EXCP_SINGLE_INSTR;
811 cpu_loop_exit();
812 }
813
814 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
815#endif /* VBOX */
816#if defined(TARGET_I386)
817 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
818 !(env->hflags & HF_SMM_MASK)) {
819 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
820 do_smm_enter();
821#if defined(__sparc__) && !defined(HOST_SOLARIS)
822 tmp_T0 = 0;
823#else
824 T0 = 0;
825#endif
826 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
827 (env->eflags & IF_MASK) &&
828 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
829 int intno;
830#if defined(VBOX)
831 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_HARD);
832#else
833 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
834#endif
835 intno = cpu_get_pic_interrupt(env);
836 if (loglevel & CPU_LOG_TB_IN_ASM) {
837 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
838 }
839#if defined(VBOX)
840 if (intno >= 0)
841#endif
842 do_interrupt(intno, 0, 0, 0, 1);
843 /* ensure that no TB jump will be modified as
844 the program flow was changed */
845#if defined(__sparc__) && !defined(HOST_SOLARIS)
846 tmp_T0 = 0;
847#else
848 T0 = 0;
849#endif
850 }
851#elif defined(TARGET_PPC)
852#if 0
853 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
854 cpu_ppc_reset(env);
855 }
856#endif
857 if (msr_ee != 0) {
858 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
859 /* Raise it */
860 env->exception_index = EXCP_EXTERNAL;
861 env->error_code = 0;
862 do_interrupt(env);
863 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
864#if defined(__sparc__) && !defined(HOST_SOLARIS)
865 tmp_T0 = 0;
866#else
867 T0 = 0;
868#endif
869 } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
870 /* Raise it */
871 env->exception_index = EXCP_DECR;
872 env->error_code = 0;
873 do_interrupt(env);
874 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
875#if defined(__sparc__) && !defined(HOST_SOLARIS)
876 tmp_T0 = 0;
877#else
878 T0 = 0;
879#endif
880 }
881 }
882#elif defined(TARGET_MIPS)
883 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
884 (env->CP0_Status & (1 << CP0St_IE)) &&
885 (env->CP0_Status & env->CP0_Cause & 0x0000FF00) &&
886 !(env->hflags & MIPS_HFLAG_EXL) &&
887 !(env->hflags & MIPS_HFLAG_ERL) &&
888 !(env->hflags & MIPS_HFLAG_DM)) {
889 /* Raise it */
890 env->exception_index = EXCP_EXT_INTERRUPT;
891 env->error_code = 0;
892 do_interrupt(env);
893#if defined(__sparc__) && !defined(HOST_SOLARIS)
894 tmp_T0 = 0;
895#else
896 T0 = 0;
897#endif
898 }
899#elif defined(TARGET_SPARC)
900 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
901 (env->psret != 0)) {
902 int pil = env->interrupt_index & 15;
903 int type = env->interrupt_index & 0xf0;
904
905 if (((type == TT_EXTINT) &&
906 (pil == 15 || pil > env->psrpil)) ||
907 type != TT_EXTINT) {
908 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
909 do_interrupt(env->interrupt_index);
910 env->interrupt_index = 0;
911#if defined(__sparc__) && !defined(HOST_SOLARIS)
912 tmp_T0 = 0;
913#else
914 T0 = 0;
915#endif
916 }
917 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
918 //do_interrupt(0, 0, 0, 0, 0);
919 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
920 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
921 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
922 env->halted = 1;
923 env->exception_index = EXCP_HLT;
924 cpu_loop_exit();
925 }
926#elif defined(TARGET_ARM)
927 if (interrupt_request & CPU_INTERRUPT_FIQ
928 && !(env->uncached_cpsr & CPSR_F)) {
929 env->exception_index = EXCP_FIQ;
930 do_interrupt(env);
931 }
932 if (interrupt_request & CPU_INTERRUPT_HARD
933 && !(env->uncached_cpsr & CPSR_I)) {
934 env->exception_index = EXCP_IRQ;
935 do_interrupt(env);
936 }
937#elif defined(TARGET_SH4)
938 /* XXXXX */
939#endif
940 /* Don't use the cached interupt_request value,
941 do_interrupt may have updated the EXITTB flag. */
942 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
943#if defined(VBOX)
944 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
945#else
946 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
947#endif
948 /* ensure that no TB jump will be modified as
949 the program flow was changed */
950#if defined(__sparc__) && !defined(HOST_SOLARIS)
951 tmp_T0 = 0;
952#else
953 T0 = 0;
954#endif
955 }
956#ifdef VBOX
957 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
958#endif
959 if (interrupt_request & CPU_INTERRUPT_EXIT) {
960#if defined(VBOX)
961 env->exception_index = EXCP_INTERRUPT;
962 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
963#else
964 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
965 env->exception_index = EXCP_INTERRUPT;
966#endif
967 cpu_loop_exit();
968 }
969#if defined(VBOX)
970 if (interrupt_request & CPU_INTERRUPT_RC) {
971 env->exception_index = EXCP_RC;
972 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
973 cpu_loop_exit();
974 }
975#endif
976 }
977#ifdef DEBUG_EXEC
978 if ((loglevel & CPU_LOG_TB_CPU)) {
979#if defined(TARGET_I386)
980 /* restore flags in standard format */
981#ifdef reg_EAX
982 env->regs[R_EAX] = EAX;
983#endif
984#ifdef reg_EBX
985 env->regs[R_EBX] = EBX;
986#endif
987#ifdef reg_ECX
988 env->regs[R_ECX] = ECX;
989#endif
990#ifdef reg_EDX
991 env->regs[R_EDX] = EDX;
992#endif
993#ifdef reg_ESI
994 env->regs[R_ESI] = ESI;
995#endif
996#ifdef reg_EDI
997 env->regs[R_EDI] = EDI;
998#endif
999#ifdef reg_EBP
1000 env->regs[R_EBP] = EBP;
1001#endif
1002#ifdef reg_ESP
1003 env->regs[R_ESP] = ESP;
1004#endif
1005 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
1006 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1007 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1008#elif defined(TARGET_ARM)
1009 cpu_dump_state(env, logfile, fprintf, 0);
1010#elif defined(TARGET_SPARC)
1011 REGWPTR = env->regbase + (env->cwp * 16);
1012 env->regwptr = REGWPTR;
1013 cpu_dump_state(env, logfile, fprintf, 0);
1014#elif defined(TARGET_PPC)
1015 cpu_dump_state(env, logfile, fprintf, 0);
1016#elif defined(TARGET_M68K)
1017 cpu_m68k_flush_flags(env, env->cc_op);
1018 env->cc_op = CC_OP_FLAGS;
1019 env->sr = (env->sr & 0xffe0)
1020 | env->cc_dest | (env->cc_x << 4);
1021 cpu_dump_state(env, logfile, fprintf, 0);
1022#elif defined(TARGET_MIPS)
1023 cpu_dump_state(env, logfile, fprintf, 0);
1024#elif defined(TARGET_SH4)
1025 cpu_dump_state(env, logfile, fprintf, 0);
1026#else
1027#error unsupported target CPU
1028#endif
1029 }
1030#endif
1031#ifdef VBOX
1032 /*
1033 * Check if we the CPU state allows us to execute the code in raw-mode.
1034 */
1035 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
1036 if (remR3CanExecuteRaw(env,
1037 env->eip + env->segs[R_CS].base,
1038 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK))
1039 flags, &env->exception_index))
1040 {
1041 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
1042 ret = env->exception_index;
1043 cpu_loop_exit();
1044 }
1045 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
1046#endif /* VBOX */
1047 tb = tb_find_fast();
1048#ifdef DEBUG_EXEC
1049 if ((loglevel & CPU_LOG_EXEC)) {
1050 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
1051 (long)tb->tc_ptr, tb->pc,
1052 lookup_symbol(tb->pc));
1053 }
1054#endif
1055#if defined(__sparc__) && !defined(HOST_SOLARIS)
1056 T0 = tmp_T0;
1057#endif
1058 /* see if we can patch the calling TB. When the TB
1059 spans two pages, we cannot safely do a direct
1060 jump. */
1061 {
1062 if (T0 != 0 &&
1063#if USE_KQEMU
1064 (env->kqemu_enabled != 2) &&
1065#endif
1066#ifdef VBOX
1067 !(tb->cflags & CF_RAW_MODE) &&
1068#endif
1069 tb->page_addr[1] == -1
1070#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1071 && (tb->cflags & CF_CODE_COPY) ==
1072 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
1073#endif
1074 ) {
1075 spin_lock(&tb_lock);
1076 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
1077#if defined(USE_CODE_COPY)
1078 /* propagates the FP use info */
1079 ((TranslationBlock *)(T0 & ~3))->cflags |=
1080 (tb->cflags & CF_FP_USED);
1081#endif
1082 spin_unlock(&tb_lock);
1083 }
1084 }
1085 tc_ptr = tb->tc_ptr;
1086 env->current_tb = tb;
1087 /* execute the generated code */
1088 gen_func = (void *)tc_ptr;
1089#if defined(__sparc__)
1090 __asm__ __volatile__("call %0\n\t"
1091 "mov %%o7,%%i0"
1092 : /* no outputs */
1093 : "r" (gen_func)
1094 : "i0", "i1", "i2", "i3", "i4", "i5",
1095 "l0", "l1", "l2", "l3", "l4", "l5",
1096 "l6", "l7");
1097#elif defined(__arm__)
1098 asm volatile ("mov pc, %0\n\t"
1099 ".global exec_loop\n\t"
1100 "exec_loop:\n\t"
1101 : /* no outputs */
1102 : "r" (gen_func)
1103 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
1104#elif defined(TARGET_I386) && defined(USE_CODE_COPY)
1105{
1106 if (!(tb->cflags & CF_CODE_COPY)) {
1107 if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
1108 save_native_fp_state(env);
1109 }
1110 gen_func();
1111 } else {
1112 if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
1113 restore_native_fp_state(env);
1114 }
1115 /* we work with native eflags */
1116 CC_SRC = cc_table[CC_OP].compute_all();
1117 CC_OP = CC_OP_EFLAGS;
1118 asm(".globl exec_loop\n"
1119 "\n"
1120 "debug1:\n"
1121 " pushl %%ebp\n"
1122 " fs movl %10, %9\n"
1123 " fs movl %11, %%eax\n"
1124 " andl $0x400, %%eax\n"
1125 " fs orl %8, %%eax\n"
1126 " pushl %%eax\n"
1127 " popf\n"
1128 " fs movl %%esp, %12\n"
1129 " fs movl %0, %%eax\n"
1130 " fs movl %1, %%ecx\n"
1131 " fs movl %2, %%edx\n"
1132 " fs movl %3, %%ebx\n"
1133 " fs movl %4, %%esp\n"
1134 " fs movl %5, %%ebp\n"
1135 " fs movl %6, %%esi\n"
1136 " fs movl %7, %%edi\n"
1137 " fs jmp *%9\n"
1138 "exec_loop:\n"
1139 " fs movl %%esp, %4\n"
1140 " fs movl %12, %%esp\n"
1141 " fs movl %%eax, %0\n"
1142 " fs movl %%ecx, %1\n"
1143 " fs movl %%edx, %2\n"
1144 " fs movl %%ebx, %3\n"
1145 " fs movl %%ebp, %5\n"
1146 " fs movl %%esi, %6\n"
1147 " fs movl %%edi, %7\n"
1148 " pushf\n"
1149 " popl %%eax\n"
1150 " movl %%eax, %%ecx\n"
1151 " andl $0x400, %%ecx\n"
1152 " shrl $9, %%ecx\n"
1153 " andl $0x8d5, %%eax\n"
1154 " fs movl %%eax, %8\n"
1155 " movl $1, %%eax\n"
1156 " subl %%ecx, %%eax\n"
1157 " fs movl %%eax, %11\n"
1158 " fs movl %9, %%ebx\n" /* get T0 value */
1159 " popl %%ebp\n"
1160 :
1161 : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
1162 "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
1163 "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
1164 "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
1165 "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
1166 "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
1167 "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
1168 "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
1169 "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
1170 "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
1171 "a" (gen_func),
1172 "m" (*(uint8_t *)offsetof(CPUState, df)),
1173 "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
1174 : "%ecx", "%edx"
1175 );
1176 }
1177}
1178#elif defined(__ia64)
1179 struct fptr {
1180 void *ip;
1181 void *gp;
1182 } fp;
1183
1184 fp.ip = tc_ptr;
1185 fp.gp = code_gen_buffer + 2 * (1 << 20);
1186 (*(void (*)(void)) &fp)();
1187#else
1188#if defined(DEBUG) && defined(VBOX) && !defined(DEBUG_dmik)
1189#if !defined(DEBUG_bird)
1190 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
1191 {
1192 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
1193 {
1194 Log(("EMR0: %08X IF=%d TF=%d CPL=%d\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3));
1195 }
1196 }
1197 else
1198 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
1199 {
1200 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
1201 {
1202 if(env->eflags & VM_MASK)
1203 {
1204 Log(("EMV86: %08X IF=%d TF=%d CPL=%d flags=%08X CR0=%08X\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, flags, env->cr[0]));
1205 }
1206 else
1207 {
1208 Log(("EMR3: %08X IF=%d TF=%d CPL=%d IOPL=%d flags=%08X CR0=%08X\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, ((env->eflags >> IOPL_SHIFT) & 3), flags, env->cr[0]));
1209 }
1210 }
1211 }
1212#endif /* !DEBUG_bird */
1213 if(env->state & CPU_EMULATE_SINGLE_STEP)
1214 {
1215#ifdef DEBUG_bird
1216 static int s_cTimes = 0;
1217 if (s_cTimes++ > 1000000) /* 1 million */
1218 {
1219 RTLogPrintf("Enough stepping!\n");
1220 #if 0
1221 env->exception_index = EXCP_DEBUG;
1222 cpu_loop_exit();
1223 #else
1224 env->state &= ~CPU_EMULATE_SINGLE_STEP;
1225 #endif
1226 }
1227#endif
1228 TMCpuTickPause(env->pVM);
1229 remR3DisasInstr(env, -1, NULL);
1230 TMCpuTickResume(env->pVM);
1231 if(emulate_single_instr(env) == -1)
1232 {
1233 printf("emulate_single_instr failed for EIP=%08X!!\n", env->eip);
1234 }
1235 }
1236 else
1237 {
1238 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
1239 gen_func();
1240 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
1241 }
1242#else /* !DEBUG || !VBOX || DEBUG_dmik */
1243
1244#ifdef VBOX
1245 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
1246 gen_func();
1247 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
1248#else /* !VBOX */
1249 gen_func();
1250#endif /* !VBOX */
1251
1252#endif /* !DEBUG || !VBOX || DEBUG_dmik */
1253#endif
1254 env->current_tb = NULL;
1255 /* reset soft MMU for next block (it can currently
1256 only be set by a memory fault) */
1257#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
1258 if (env->hflags & HF_SOFTMMU_MASK) {
1259 env->hflags &= ~HF_SOFTMMU_MASK;
1260 /* do not allow linking to another block */
1261 T0 = 0;
1262 }
1263#endif
1264#if defined(USE_KQEMU)
1265#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
1266 if (kqemu_is_ok(env) &&
1267 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
1268 cpu_loop_exit();
1269 }
1270#endif
1271 }
1272 } else {
1273 env_to_regs();
1274 }
1275 } /* for(;;) */
1276
1277
1278#if defined(TARGET_I386)
1279#if defined(USE_CODE_COPY)
1280 if (env->native_fp_regs) {
1281 save_native_fp_state(env);
1282 }
1283#endif
1284 /* restore flags in standard format */
1285 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
1286#elif defined(TARGET_ARM)
1287 /* XXX: Save/restore host fpu exception state?. */
1288#elif defined(TARGET_SPARC)
1289#if defined(reg_REGWPTR)
1290 REGWPTR = saved_regwptr;
1291#endif
1292#elif defined(TARGET_PPC)
1293#elif defined(TARGET_M68K)
1294 cpu_m68k_flush_flags(env, env->cc_op);
1295 env->cc_op = CC_OP_FLAGS;
1296 env->sr = (env->sr & 0xffe0)
1297 | env->cc_dest | (env->cc_x << 4);
1298#elif defined(TARGET_MIPS)
1299#elif defined(TARGET_SH4)
1300 /* XXXXX */
1301#else
1302#error unsupported target CPU
1303#endif
1304#if defined(__sparc__) && !defined(HOST_SOLARIS)
1305 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
1306#endif
1307#include "hostregs_helper.h"
1308
1309 /* fail safe : never use cpu_single_env outside cpu_exec() */
1310 cpu_single_env = NULL;
1311 return ret;
1312}
1313
1314#endif /* !VBOX */
1315
1316/* must only be called from the generated code as an exception can be
1317 generated */
1318void tb_invalidate_page_range(target_ulong start, target_ulong end)
1319{
1320 /* XXX: cannot enable it yet because it yields to MMU exception
1321 where NIP != read address on PowerPC */
1322#if 0
1323 target_ulong phys_addr;
1324 phys_addr = get_phys_addr_code(env, start);
1325 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1326#endif
1327}
1328
1329#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1330
1331void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1332{
1333 CPUX86State *saved_env;
1334
1335 saved_env = env;
1336 env = s;
1337 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1338 selector &= 0xffff;
1339 cpu_x86_load_seg_cache(env, seg_reg, selector,
1340 (selector << 4), 0xffff, 0);
1341 } else {
1342 load_seg(seg_reg, selector);
1343 }
1344 env = saved_env;
1345}
1346
1347void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1348{
1349 CPUX86State *saved_env;
1350
1351 saved_env = env;
1352 env = s;
1353
1354 helper_fsave((target_ulong)ptr, data32);
1355
1356 env = saved_env;
1357}
1358
1359void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1360{
1361 CPUX86State *saved_env;
1362
1363 saved_env = env;
1364 env = s;
1365
1366 helper_frstor((target_ulong)ptr, data32);
1367
1368 env = saved_env;
1369}
1370
1371#endif /* TARGET_I386 */
1372
1373#if !defined(CONFIG_SOFTMMU)
1374
1375#if defined(TARGET_I386)
1376
1377/* 'pc' is the host PC at which the exception was raised. 'address' is
1378 the effective address of the memory exception. 'is_write' is 1 if a
1379 write caused the exception and otherwise 0'. 'old_set' is the
1380 signal set which should be restored */
1381static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1382 int is_write, sigset_t *old_set,
1383 void *puc)
1384{
1385 TranslationBlock *tb;
1386 int ret;
1387
1388 if (cpu_single_env)
1389 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1390#if defined(DEBUG_SIGNAL)
1391 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1392 pc, address, is_write, *(unsigned long *)old_set);
1393#endif
1394 /* XXX: locking issue */
1395 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1396 return 1;
1397 }
1398
1399 /* see if it is an MMU fault */
1400 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1401 ((env->hflags & HF_CPL_MASK) == 3), 0);
1402 if (ret < 0)
1403 return 0; /* not an MMU fault */
1404 if (ret == 0)
1405 return 1; /* the MMU fault was handled without causing real CPU fault */
1406 /* now we have a real cpu fault */
1407 tb = tb_find_pc(pc);
1408 if (tb) {
1409 /* the PC is inside the translated code. It means that we have
1410 a virtual CPU fault */
1411 cpu_restore_state(tb, env, pc, puc);
1412 }
1413 if (ret == 1) {
1414#if 0
1415 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
1416 env->eip, env->cr[2], env->error_code);
1417#endif
1418 /* we restore the process signal mask as the sigreturn should
1419 do it (XXX: use sigsetjmp) */
1420 sigprocmask(SIG_SETMASK, old_set, NULL);
1421 raise_exception_err(env->exception_index, env->error_code);
1422 } else {
1423 /* activate soft MMU for this block */
1424 env->hflags |= HF_SOFTMMU_MASK;
1425 cpu_resume_from_signal(env, puc);
1426 }
1427 /* never comes here */
1428 return 1;
1429}
1430
1431#elif defined(TARGET_ARM)
1432static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1433 int is_write, sigset_t *old_set,
1434 void *puc)
1435{
1436 TranslationBlock *tb;
1437 int ret;
1438
1439 if (cpu_single_env)
1440 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1441#if defined(DEBUG_SIGNAL)
1442 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1443 pc, address, is_write, *(unsigned long *)old_set);
1444#endif
1445 /* XXX: locking issue */
1446 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1447 return 1;
1448 }
1449 /* see if it is an MMU fault */
1450 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1451 if (ret < 0)
1452 return 0; /* not an MMU fault */
1453 if (ret == 0)
1454 return 1; /* the MMU fault was handled without causing real CPU fault */
1455 /* now we have a real cpu fault */
1456 tb = tb_find_pc(pc);
1457 if (tb) {
1458 /* the PC is inside the translated code. It means that we have
1459 a virtual CPU fault */
1460 cpu_restore_state(tb, env, pc, puc);
1461 }
1462 /* we restore the process signal mask as the sigreturn should
1463 do it (XXX: use sigsetjmp) */
1464 sigprocmask(SIG_SETMASK, old_set, NULL);
1465 cpu_loop_exit();
1466}
1467#elif defined(TARGET_SPARC)
1468static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1469 int is_write, sigset_t *old_set,
1470 void *puc)
1471{
1472 TranslationBlock *tb;
1473 int ret;
1474
1475 if (cpu_single_env)
1476 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1477#if defined(DEBUG_SIGNAL)
1478 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1479 pc, address, is_write, *(unsigned long *)old_set);
1480#endif
1481 /* XXX: locking issue */
1482 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1483 return 1;
1484 }
1485 /* see if it is an MMU fault */
1486 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1487 if (ret < 0)
1488 return 0; /* not an MMU fault */
1489 if (ret == 0)
1490 return 1; /* the MMU fault was handled without causing real CPU fault */
1491 /* now we have a real cpu fault */
1492 tb = tb_find_pc(pc);
1493 if (tb) {
1494 /* the PC is inside the translated code. It means that we have
1495 a virtual CPU fault */
1496 cpu_restore_state(tb, env, pc, puc);
1497 }
1498 /* we restore the process signal mask as the sigreturn should
1499 do it (XXX: use sigsetjmp) */
1500 sigprocmask(SIG_SETMASK, old_set, NULL);
1501 cpu_loop_exit();
1502}
1503#elif defined (TARGET_PPC)
1504static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1505 int is_write, sigset_t *old_set,
1506 void *puc)
1507{
1508 TranslationBlock *tb;
1509 int ret;
1510
1511 if (cpu_single_env)
1512 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1513#if defined(DEBUG_SIGNAL)
1514 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1515 pc, address, is_write, *(unsigned long *)old_set);
1516#endif
1517 /* XXX: locking issue */
1518 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1519 return 1;
1520 }
1521
1522 /* see if it is an MMU fault */
1523 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1524 if (ret < 0)
1525 return 0; /* not an MMU fault */
1526 if (ret == 0)
1527 return 1; /* the MMU fault was handled without causing real CPU fault */
1528
1529 /* now we have a real cpu fault */
1530 tb = tb_find_pc(pc);
1531 if (tb) {
1532 /* the PC is inside the translated code. It means that we have
1533 a virtual CPU fault */
1534 cpu_restore_state(tb, env, pc, puc);
1535 }
1536 if (ret == 1) {
1537#if 0
1538 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1539 env->nip, env->error_code, tb);
1540#endif
1541 /* we restore the process signal mask as the sigreturn should
1542 do it (XXX: use sigsetjmp) */
1543 sigprocmask(SIG_SETMASK, old_set, NULL);
1544 do_raise_exception_err(env->exception_index, env->error_code);
1545 } else {
1546 /* activate soft MMU for this block */
1547 cpu_resume_from_signal(env, puc);
1548 }
1549 /* never comes here */
1550 return 1;
1551}
1552
1553#elif defined(TARGET_M68K)
1554static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1555 int is_write, sigset_t *old_set,
1556 void *puc)
1557{
1558 TranslationBlock *tb;
1559 int ret;
1560
1561 if (cpu_single_env)
1562 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1563#if defined(DEBUG_SIGNAL)
1564 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1565 pc, address, is_write, *(unsigned long *)old_set);
1566#endif
1567 /* XXX: locking issue */
1568 if (is_write && page_unprotect(address, pc, puc)) {
1569 return 1;
1570 }
1571 /* see if it is an MMU fault */
1572 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1573 if (ret < 0)
1574 return 0; /* not an MMU fault */
1575 if (ret == 0)
1576 return 1; /* the MMU fault was handled without causing real CPU fault */
1577 /* now we have a real cpu fault */
1578 tb = tb_find_pc(pc);
1579 if (tb) {
1580 /* the PC is inside the translated code. It means that we have
1581 a virtual CPU fault */
1582 cpu_restore_state(tb, env, pc, puc);
1583 }
1584 /* we restore the process signal mask as the sigreturn should
1585 do it (XXX: use sigsetjmp) */
1586 sigprocmask(SIG_SETMASK, old_set, NULL);
1587 cpu_loop_exit();
1588 /* never comes here */
1589 return 1;
1590}
1591
1592#elif defined (TARGET_MIPS)
1593static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1594 int is_write, sigset_t *old_set,
1595 void *puc)
1596{
1597 TranslationBlock *tb;
1598 int ret;
1599
1600 if (cpu_single_env)
1601 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1602#if defined(DEBUG_SIGNAL)
1603 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1604 pc, address, is_write, *(unsigned long *)old_set);
1605#endif
1606 /* XXX: locking issue */
1607 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1608 return 1;
1609 }
1610
1611 /* see if it is an MMU fault */
1612 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1613 if (ret < 0)
1614 return 0; /* not an MMU fault */
1615 if (ret == 0)
1616 return 1; /* the MMU fault was handled without causing real CPU fault */
1617
1618 /* now we have a real cpu fault */
1619 tb = tb_find_pc(pc);
1620 if (tb) {
1621 /* the PC is inside the translated code. It means that we have
1622 a virtual CPU fault */
1623 cpu_restore_state(tb, env, pc, puc);
1624 }
1625 if (ret == 1) {
1626#if 0
1627 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1628 env->nip, env->error_code, tb);
1629#endif
1630 /* we restore the process signal mask as the sigreturn should
1631 do it (XXX: use sigsetjmp) */
1632 sigprocmask(SIG_SETMASK, old_set, NULL);
1633 do_raise_exception_err(env->exception_index, env->error_code);
1634 } else {
1635 /* activate soft MMU for this block */
1636 cpu_resume_from_signal(env, puc);
1637 }
1638 /* never comes here */
1639 return 1;
1640}
1641
1642#elif defined (TARGET_SH4)
1643static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1644 int is_write, sigset_t *old_set,
1645 void *puc)
1646{
1647 TranslationBlock *tb;
1648 int ret;
1649
1650 if (cpu_single_env)
1651 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1652#if defined(DEBUG_SIGNAL)
1653 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1654 pc, address, is_write, *(unsigned long *)old_set);
1655#endif
1656 /* XXX: locking issue */
1657 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1658 return 1;
1659 }
1660
1661 /* see if it is an MMU fault */
1662 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1663 if (ret < 0)
1664 return 0; /* not an MMU fault */
1665 if (ret == 0)
1666 return 1; /* the MMU fault was handled without causing real CPU fault */
1667
1668 /* now we have a real cpu fault */
1669 tb = tb_find_pc(pc);
1670 if (tb) {
1671 /* the PC is inside the translated code. It means that we have
1672 a virtual CPU fault */
1673 cpu_restore_state(tb, env, pc, puc);
1674 }
1675#if 0
1676 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1677 env->nip, env->error_code, tb);
1678#endif
1679 /* we restore the process signal mask as the sigreturn should
1680 do it (XXX: use sigsetjmp) */
1681 sigprocmask(SIG_SETMASK, old_set, NULL);
1682 cpu_loop_exit();
1683 /* never comes here */
1684 return 1;
1685}
1686#else
1687#error unsupported target CPU
1688#endif
1689
1690#if defined(__i386__)
1691
1692#if defined(USE_CODE_COPY)
1693static void cpu_send_trap(unsigned long pc, int trap,
1694 struct ucontext *uc)
1695{
1696 TranslationBlock *tb;
1697
1698 if (cpu_single_env)
1699 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1700 /* now we have a real cpu fault */
1701 tb = tb_find_pc(pc);
1702 if (tb) {
1703 /* the PC is inside the translated code. It means that we have
1704 a virtual CPU fault */
1705 cpu_restore_state(tb, env, pc, uc);
1706 }
1707 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1708 raise_exception_err(trap, env->error_code);
1709}
1710#endif
1711
1712int cpu_signal_handler(int host_signum, void *pinfo,
1713 void *puc)
1714{
1715 siginfo_t *info = pinfo;
1716 struct ucontext *uc = puc;
1717 unsigned long pc;
1718 int trapno;
1719
1720#ifndef REG_EIP
1721/* for glibc 2.1 */
1722#define REG_EIP EIP
1723#define REG_ERR ERR
1724#define REG_TRAPNO TRAPNO
1725#endif
1726 pc = uc->uc_mcontext.gregs[REG_EIP];
1727 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1728#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1729 if (trapno == 0x00 || trapno == 0x05) {
1730 /* send division by zero or bound exception */
1731 cpu_send_trap(pc, trapno, uc);
1732 return 1;
1733 } else
1734#endif
1735 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1736 trapno == 0xe ?
1737 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1738 &uc->uc_sigmask, puc);
1739}
1740
1741#elif defined(__x86_64__)
1742
1743int cpu_signal_handler(int host_signum, void *pinfo,
1744 void *puc)
1745{
1746 siginfo_t *info = pinfo;
1747 struct ucontext *uc = puc;
1748 unsigned long pc;
1749
1750 pc = uc->uc_mcontext.gregs[REG_RIP];
1751 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1752 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1753 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1754 &uc->uc_sigmask, puc);
1755}
1756
1757#elif defined(__powerpc__)
1758
1759/***********************************************************************
1760 * signal context platform-specific definitions
1761 * From Wine
1762 */
1763#ifdef linux
1764/* All Registers access - only for local access */
1765# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1766/* Gpr Registers access */
1767# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1768# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1769# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1770# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1771# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1772# define LR_sig(context) REG_sig(link, context) /* Link register */
1773# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1774/* Float Registers access */
1775# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1776# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1777/* Exception Registers access */
1778# define DAR_sig(context) REG_sig(dar, context)
1779# define DSISR_sig(context) REG_sig(dsisr, context)
1780# define TRAP_sig(context) REG_sig(trap, context)
1781#endif /* linux */
1782
1783#ifdef __APPLE__
1784# include <sys/ucontext.h>
1785typedef struct ucontext SIGCONTEXT;
1786/* All Registers access - only for local access */
1787# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1788# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1789# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1790# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1791/* Gpr Registers access */
1792# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1793# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1794# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1795# define CTR_sig(context) REG_sig(ctr, context)
1796# define XER_sig(context) REG_sig(xer, context) /* Link register */
1797# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1798# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1799/* Float Registers access */
1800# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1801# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1802/* Exception Registers access */
1803# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1804# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1805# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1806#endif /* __APPLE__ */
1807
1808int cpu_signal_handler(int host_signum, void *pinfo,
1809 void *puc)
1810{
1811 siginfo_t *info = pinfo;
1812 struct ucontext *uc = puc;
1813 unsigned long pc;
1814 int is_write;
1815
1816 pc = IAR_sig(uc);
1817 is_write = 0;
1818#if 0
1819 /* ppc 4xx case */
1820 if (DSISR_sig(uc) & 0x00800000)
1821 is_write = 1;
1822#else
1823 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1824 is_write = 1;
1825#endif
1826 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1827 is_write, &uc->uc_sigmask, puc);
1828}
1829
1830#elif defined(__alpha__)
1831
1832int cpu_signal_handler(int host_signum, void *pinfo,
1833 void *puc)
1834{
1835 siginfo_t *info = pinfo;
1836 struct ucontext *uc = puc;
1837 uint32_t *pc = uc->uc_mcontext.sc_pc;
1838 uint32_t insn = *pc;
1839 int is_write = 0;
1840
1841 /* XXX: need kernel patch to get write flag faster */
1842 switch (insn >> 26) {
1843 case 0x0d: // stw
1844 case 0x0e: // stb
1845 case 0x0f: // stq_u
1846 case 0x24: // stf
1847 case 0x25: // stg
1848 case 0x26: // sts
1849 case 0x27: // stt
1850 case 0x2c: // stl
1851 case 0x2d: // stq
1852 case 0x2e: // stl_c
1853 case 0x2f: // stq_c
1854 is_write = 1;
1855 }
1856
1857 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1858 is_write, &uc->uc_sigmask, puc);
1859}
1860#elif defined(__sparc__)
1861
1862int cpu_signal_handler(int host_signum, void *pinfo,
1863 void *puc)
1864{
1865 siginfo_t *info = pinfo;
1866 uint32_t *regs = (uint32_t *)(info + 1);
1867 void *sigmask = (regs + 20);
1868 unsigned long pc;
1869 int is_write;
1870 uint32_t insn;
1871
1872 /* XXX: is there a standard glibc define ? */
1873 pc = regs[1];
1874 /* XXX: need kernel patch to get write flag faster */
1875 is_write = 0;
1876 insn = *(uint32_t *)pc;
1877 if ((insn >> 30) == 3) {
1878 switch((insn >> 19) & 0x3f) {
1879 case 0x05: // stb
1880 case 0x06: // sth
1881 case 0x04: // st
1882 case 0x07: // std
1883 case 0x24: // stf
1884 case 0x27: // stdf
1885 case 0x25: // stfsr
1886 is_write = 1;
1887 break;
1888 }
1889 }
1890 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1891 is_write, sigmask, NULL);
1892}
1893
1894#elif defined(__arm__)
1895
1896int cpu_signal_handler(int host_signum, void *pinfo,
1897 void *puc)
1898{
1899 siginfo_t *info = pinfo;
1900 struct ucontext *uc = puc;
1901 unsigned long pc;
1902 int is_write;
1903
1904 pc = uc->uc_mcontext.gregs[R15];
1905 /* XXX: compute is_write */
1906 is_write = 0;
1907 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1908 is_write,
1909 &uc->uc_sigmask, puc);
1910}
1911
1912#elif defined(__mc68000)
1913
1914int cpu_signal_handler(int host_signum, void *pinfo,
1915 void *puc)
1916{
1917 siginfo_t *info = pinfo;
1918 struct ucontext *uc = puc;
1919 unsigned long pc;
1920 int is_write;
1921
1922 pc = uc->uc_mcontext.gregs[16];
1923 /* XXX: compute is_write */
1924 is_write = 0;
1925 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1926 is_write,
1927 &uc->uc_sigmask, puc);
1928}
1929
1930#elif defined(__ia64)
1931
1932#ifndef __ISR_VALID
1933 /* This ought to be in <bits/siginfo.h>... */
1934# define __ISR_VALID 1
1935#endif
1936
1937int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1938{
1939 siginfo_t *info = pinfo;
1940 struct ucontext *uc = puc;
1941 unsigned long ip;
1942 int is_write = 0;
1943
1944 ip = uc->uc_mcontext.sc_ip;
1945 switch (host_signum) {
1946 case SIGILL:
1947 case SIGFPE:
1948 case SIGSEGV:
1949 case SIGBUS:
1950 case SIGTRAP:
1951 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1952 /* ISR.W (write-access) is bit 33: */
1953 is_write = (info->si_isr >> 33) & 1;
1954 break;
1955
1956 default:
1957 break;
1958 }
1959 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1960 is_write,
1961 &uc->uc_sigmask, puc);
1962}
1963
1964#elif defined(__s390__)
1965
1966int cpu_signal_handler(int host_signum, void *pinfo,
1967 void *puc)
1968{
1969 siginfo_t *info = pinfo;
1970 struct ucontext *uc = puc;
1971 unsigned long pc;
1972 int is_write;
1973
1974 pc = uc->uc_mcontext.psw.addr;
1975 /* XXX: compute is_write */
1976 is_write = 0;
1977 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1978 is_write,
1979 &uc->uc_sigmask, puc);
1980}
1981
1982#else
1983
1984#error host CPU specific signal handler needed
1985
1986#endif
1987
1988#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette