VirtualBox

source: vbox/trunk/src/recompiler/new/exec.c@ 1111

Last change on this file since 1111 was 1111, checked in by vboxsync, 18 years ago

Wrong changes in old sync

  • Property svn:eol-style set to native
File size: 77.9 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#ifndef VBOX
22#ifdef _WIN32
23#include <windows.h>
24#else
25#include <sys/types.h>
26#include <sys/mman.h>
27#endif
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35#else /* VBOX */
36# include <stdlib.h>
37# include <stdio.h>
38# include <inttypes.h>
39# include <iprt/alloc.h>
40# include <iprt/string.h>
41# include <iprt/param.h>
42#endif /* VBOX */
43
44#include "cpu.h"
45#include "exec-all.h"
46#if defined(CONFIG_USER_ONLY)
47#include <qemu.h>
48#endif
49
50//#define DEBUG_TB_INVALIDATE
51//#define DEBUG_FLUSH
52//#define DEBUG_TLB
53//#define DEBUG_UNASSIGNED
54
55/* make various TB consistency checks */
56//#define DEBUG_TB_CHECK
57//#define DEBUG_TLB_CHECK
58
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
64/* threshold to flush the translated code buffer */
65#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
66
67#define SMC_BITMAP_USE_THRESHOLD 10
68
69#define MMAP_AREA_START 0x00000000
70#define MMAP_AREA_END 0xa8000000
71
72#if defined(TARGET_SPARC64)
73#define TARGET_PHYS_ADDR_SPACE_BITS 41
74#elif defined(TARGET_PPC64)
75#define TARGET_PHYS_ADDR_SPACE_BITS 42
76#else
77/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
78#define TARGET_PHYS_ADDR_SPACE_BITS 32
79#endif
80
81TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
82TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
83int nb_tbs;
84/* any access to the tbs or the page table must use this lock */
85spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
86
87uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]
88#if defined(__MINGW32__)
89 __attribute__((aligned (16)));
90#else
91 __attribute__((aligned (32)));
92#endif
93uint8_t *code_gen_ptr;
94
95#ifndef VBOX
96int phys_ram_size;
97int phys_ram_fd;
98int phys_ram_size;
99#else /* VBOX */
100RTGCPHYS phys_ram_size;
101/* we have memory ranges (the high PC-BIOS mapping) which
102 causes some pages to fall outside the dirty map here. */
103uint32_t phys_ram_dirty_size;
104#endif /* VBOX */
105#ifndef VBOX
106uint8_t *phys_ram_base;
107#endif
108uint8_t *phys_ram_dirty;
109
110CPUState *first_cpu;
111/* current CPU in the current thread. It is only valid inside
112 cpu_exec() */
113CPUState *cpu_single_env;
114
115typedef struct PageDesc {
116 /* list of TBs intersecting this ram page */
117 TranslationBlock *first_tb;
118 /* in order to optimize self modifying code, we count the number
119 of lookups we do to a given page to use a bitmap */
120 unsigned int code_write_count;
121 uint8_t *code_bitmap;
122#if defined(CONFIG_USER_ONLY)
123 unsigned long flags;
124#endif
125} PageDesc;
126
127typedef struct PhysPageDesc {
128 /* offset in host memory of the page + io_index in the low 12 bits */
129 uint32_t phys_offset;
130} PhysPageDesc;
131
132#define L2_BITS 10
133#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
134
135#define L1_SIZE (1 << L1_BITS)
136#define L2_SIZE (1 << L2_BITS)
137
138static void io_mem_init(void);
139
140unsigned long qemu_real_host_page_size;
141unsigned long qemu_host_page_bits;
142unsigned long qemu_host_page_size;
143unsigned long qemu_host_page_mask;
144
145/* XXX: for system emulation, it could just be an array */
146static PageDesc *l1_map[L1_SIZE];
147PhysPageDesc **l1_phys_map;
148
149/* io memory support */
150CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
151CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
152void *io_mem_opaque[IO_MEM_NB_ENTRIES];
153static int io_mem_nb;
154
155#ifndef VBOX
156/* log support */
157char *logfilename = "/tmp/qemu.log";
158#endif /* !VBOX */
159FILE *logfile;
160int loglevel;
161
162/* statistics */
163static int tlb_flush_count;
164static int tb_flush_count;
165#ifndef VBOX
166static int tb_phys_invalidate_count;
167#endif /* !VBOX */
168
169static void page_init(void)
170{
171 /* NOTE: we can always suppose that qemu_host_page_size >=
172 TARGET_PAGE_SIZE */
173#ifdef VBOX
174 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
175 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
176 qemu_real_host_page_size = PAGE_SIZE;
177#else /* !VBOX */
178#ifdef _WIN32
179 {
180 SYSTEM_INFO system_info;
181 DWORD old_protect;
182
183 GetSystemInfo(&system_info);
184 qemu_real_host_page_size = system_info.dwPageSize;
185
186 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
187 PAGE_EXECUTE_READWRITE, &old_protect);
188 }
189#else
190 qemu_real_host_page_size = getpagesize();
191 {
192 unsigned long start, end;
193
194 start = (unsigned long)code_gen_buffer;
195 start &= ~(qemu_real_host_page_size - 1);
196
197 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
198 end += qemu_real_host_page_size - 1;
199 end &= ~(qemu_real_host_page_size - 1);
200
201 mprotect((void *)start, end - start,
202 PROT_READ | PROT_WRITE | PROT_EXEC);
203 }
204#endif
205#endif /* !VBOX */
206
207 if (qemu_host_page_size == 0)
208 qemu_host_page_size = qemu_real_host_page_size;
209 if (qemu_host_page_size < TARGET_PAGE_SIZE)
210 qemu_host_page_size = TARGET_PAGE_SIZE;
211 qemu_host_page_bits = 0;
212 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
213 qemu_host_page_bits++;
214 qemu_host_page_mask = ~(qemu_host_page_size - 1);
215 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
216 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
217}
218
219static inline PageDesc *page_find_alloc(unsigned int index)
220{
221 PageDesc **lp, *p;
222
223 lp = &l1_map[index >> L2_BITS];
224 p = *lp;
225 if (!p) {
226 /* allocate if not found */
227 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
228 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
229 *lp = p;
230 }
231 return p + (index & (L2_SIZE - 1));
232}
233
234static inline PageDesc *page_find(unsigned int index)
235{
236 PageDesc *p;
237
238 p = l1_map[index >> L2_BITS];
239 if (!p)
240 return 0;
241 return p + (index & (L2_SIZE - 1));
242}
243
244static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
245{
246 void **lp, **p;
247 PhysPageDesc *pd;
248
249 p = (void **)l1_phys_map;
250#if TARGET_PHYS_ADDR_SPACE_BITS > 32
251
252#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
253#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
254#endif
255 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
256 p = *lp;
257 if (!p) {
258 /* allocate if not found */
259 if (!alloc)
260 return NULL;
261 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
262 memset(p, 0, sizeof(void *) * L1_SIZE);
263 *lp = p;
264 }
265#endif
266 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
267 pd = *lp;
268 if (!pd) {
269 int i;
270 /* allocate if not found */
271 if (!alloc)
272 return NULL;
273 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
274 *lp = pd;
275 for (i = 0; i < L2_SIZE; i++)
276 pd[i].phys_offset = IO_MEM_UNASSIGNED;
277 }
278#ifdef VBOX
279 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
280 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
281 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
282 return pd;
283#else
284 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
285#endif
286}
287
288static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
289{
290 return phys_page_find_alloc(index, 0);
291}
292
293#if !defined(CONFIG_USER_ONLY)
294static void tlb_protect_code(ram_addr_t ram_addr);
295static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
296 target_ulong vaddr);
297#endif
298
299void cpu_exec_init(CPUState *env)
300{
301 CPUState **penv;
302 int cpu_index;
303
304 if (!code_gen_ptr) {
305 code_gen_ptr = code_gen_buffer;
306 page_init();
307 io_mem_init();
308 }
309 env->next_cpu = NULL;
310 penv = &first_cpu;
311 cpu_index = 0;
312 while (*penv != NULL) {
313 penv = (CPUState **)&(*penv)->next_cpu;
314 cpu_index++;
315 }
316 env->cpu_index = cpu_index;
317 *penv = env;
318}
319
320static inline void invalidate_page_bitmap(PageDesc *p)
321{
322 if (p->code_bitmap) {
323 qemu_free(p->code_bitmap);
324 p->code_bitmap = NULL;
325 }
326 p->code_write_count = 0;
327}
328
329/* set to NULL all the 'first_tb' fields in all PageDescs */
330static void page_flush_tb(void)
331{
332 int i, j;
333 PageDesc *p;
334
335 for(i = 0; i < L1_SIZE; i++) {
336 p = l1_map[i];
337 if (p) {
338 for(j = 0; j < L2_SIZE; j++) {
339 p->first_tb = NULL;
340 invalidate_page_bitmap(p);
341 p++;
342 }
343 }
344 }
345}
346
347/* flush all the translation blocks */
348/* XXX: tb_flush is currently not thread safe */
349void tb_flush(CPUState *env1)
350{
351 CPUState *env;
352#if defined(DEBUG_FLUSH)
353 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
354 code_gen_ptr - code_gen_buffer,
355 nb_tbs,
356 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
357#endif
358 nb_tbs = 0;
359
360 for(env = first_cpu; env != NULL; env = env->next_cpu) {
361 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
362 }
363
364 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
365 page_flush_tb();
366
367 code_gen_ptr = code_gen_buffer;
368 /* XXX: flush processor icache at this point if cache flush is
369 expensive */
370 tb_flush_count++;
371}
372
373#ifdef DEBUG_TB_CHECK
374
375static void tb_invalidate_check(unsigned long address)
376{
377 TranslationBlock *tb;
378 int i;
379 address &= TARGET_PAGE_MASK;
380 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
381 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
382 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
383 address >= tb->pc + tb->size)) {
384 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
385 address, (long)tb->pc, tb->size);
386 }
387 }
388 }
389}
390
391/* verify that all the pages have correct rights for code */
392static void tb_page_check(void)
393{
394 TranslationBlock *tb;
395 int i, flags1, flags2;
396
397 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
398 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
399 flags1 = page_get_flags(tb->pc);
400 flags2 = page_get_flags(tb->pc + tb->size - 1);
401 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
402 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
403 (long)tb->pc, tb->size, flags1, flags2);
404 }
405 }
406 }
407}
408
409void tb_jmp_check(TranslationBlock *tb)
410{
411 TranslationBlock *tb1;
412 unsigned int n1;
413
414 /* suppress any remaining jumps to this TB */
415 tb1 = tb->jmp_first;
416 for(;;) {
417 n1 = (long)tb1 & 3;
418 tb1 = (TranslationBlock *)((long)tb1 & ~3);
419 if (n1 == 2)
420 break;
421 tb1 = tb1->jmp_next[n1];
422 }
423 /* check end of list */
424 if (tb1 != tb) {
425 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
426 }
427}
428
429#endif
430
431/* invalidate one TB */
432static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
433 int next_offset)
434{
435 TranslationBlock *tb1;
436 for(;;) {
437 tb1 = *ptb;
438 if (tb1 == tb) {
439 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
440 break;
441 }
442 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
443 }
444}
445
446static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
447{
448 TranslationBlock *tb1;
449 unsigned int n1;
450
451 for(;;) {
452 tb1 = *ptb;
453 n1 = (long)tb1 & 3;
454 tb1 = (TranslationBlock *)((long)tb1 & ~3);
455 if (tb1 == tb) {
456 *ptb = tb1->page_next[n1];
457 break;
458 }
459 ptb = &tb1->page_next[n1];
460 }
461}
462
463static inline void tb_jmp_remove(TranslationBlock *tb, int n)
464{
465 TranslationBlock *tb1, **ptb;
466 unsigned int n1;
467
468 ptb = &tb->jmp_next[n];
469 tb1 = *ptb;
470 if (tb1) {
471 /* find tb(n) in circular list */
472 for(;;) {
473 tb1 = *ptb;
474 n1 = (long)tb1 & 3;
475 tb1 = (TranslationBlock *)((long)tb1 & ~3);
476 if (n1 == n && tb1 == tb)
477 break;
478 if (n1 == 2) {
479 ptb = &tb1->jmp_first;
480 } else {
481 ptb = &tb1->jmp_next[n1];
482 }
483 }
484 /* now we can suppress tb(n) from the list */
485 *ptb = tb->jmp_next[n];
486
487 tb->jmp_next[n] = NULL;
488 }
489}
490
491/* reset the jump entry 'n' of a TB so that it is not chained to
492 another TB */
493static inline void tb_reset_jump(TranslationBlock *tb, int n)
494{
495 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
496}
497
498static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
499{
500 CPUState *env;
501 PageDesc *p;
502 unsigned int h, n1;
503 target_ulong phys_pc;
504 TranslationBlock *tb1, *tb2;
505
506 /* remove the TB from the hash list */
507 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
508 h = tb_phys_hash_func(phys_pc);
509 tb_remove(&tb_phys_hash[h], tb,
510 offsetof(TranslationBlock, phys_hash_next));
511
512 /* remove the TB from the page list */
513 if (tb->page_addr[0] != page_addr) {
514 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
515 tb_page_remove(&p->first_tb, tb);
516 invalidate_page_bitmap(p);
517 }
518 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
519 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
520 tb_page_remove(&p->first_tb, tb);
521 invalidate_page_bitmap(p);
522 }
523
524 tb_invalidated_flag = 1;
525
526 /* remove the TB from the hash list */
527 h = tb_jmp_cache_hash_func(tb->pc);
528 for(env = first_cpu; env != NULL; env = env->next_cpu) {
529 if (env->tb_jmp_cache[h] == tb)
530 env->tb_jmp_cache[h] = NULL;
531 }
532
533 /* suppress this TB from the two jump lists */
534 tb_jmp_remove(tb, 0);
535 tb_jmp_remove(tb, 1);
536
537 /* suppress any remaining jumps to this TB */
538 tb1 = tb->jmp_first;
539 for(;;) {
540 n1 = (long)tb1 & 3;
541 if (n1 == 2)
542 break;
543 tb1 = (TranslationBlock *)((long)tb1 & ~3);
544 tb2 = tb1->jmp_next[n1];
545 tb_reset_jump(tb1, n1);
546 tb1->jmp_next[n1] = NULL;
547 tb1 = tb2;
548 }
549 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
550
551#ifndef VBOX
552 tb_phys_invalidate_count++;
553#endif /* !VBOX */
554}
555
556#ifdef VBOX
557void tb_invalidate_virt(CPUState *env, uint32_t eip)
558{
559# if 1
560 tb_flush(env);
561# else
562 uint8_t *cs_base, *pc;
563 unsigned int flags, h, phys_pc;
564 TranslationBlock *tb, **ptb;
565
566 flags = env->hflags;
567 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
568 cs_base = env->segs[R_CS].base;
569 pc = cs_base + eip;
570
571 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
572 flags);
573
574 if(tb)
575 {
576# ifdef DEBUG
577 printf("invalidating TB (%08X) at %08X\n", tb, eip);
578# endif
579 tb_invalidate(tb);
580 //Note: this will leak TBs, but the whole cache will be flushed
581 // when it happens too often
582 tb->pc = 0;
583 tb->cs_base = 0;
584 tb->flags = 0;
585 }
586# endif
587}
588
589# ifdef VBOX_STRICT
590/**
591 * Gets the page offset.
592 */
593unsigned long get_phys_page_offset(target_ulong addr)
594{
595 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
596 return p ? p->phys_offset : 0;
597}
598# endif /* VBOX_STRICT */
599#endif /* VBOX */
600
601static inline void set_bits(uint8_t *tab, int start, int len)
602{
603 int end, mask, end1;
604
605 end = start + len;
606 tab += start >> 3;
607 mask = 0xff << (start & 7);
608 if ((start & ~7) == (end & ~7)) {
609 if (start < end) {
610 mask &= ~(0xff << (end & 7));
611 *tab |= mask;
612 }
613 } else {
614 *tab++ |= mask;
615 start = (start + 8) & ~7;
616 end1 = end & ~7;
617 while (start < end1) {
618 *tab++ = 0xff;
619 start += 8;
620 }
621 if (start < end) {
622 mask = ~(0xff << (end & 7));
623 *tab |= mask;
624 }
625 }
626}
627
628static void build_page_bitmap(PageDesc *p)
629{
630 int n, tb_start, tb_end;
631 TranslationBlock *tb;
632
633 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
634 if (!p->code_bitmap)
635 return;
636 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
637
638 tb = p->first_tb;
639 while (tb != NULL) {
640 n = (long)tb & 3;
641 tb = (TranslationBlock *)((long)tb & ~3);
642 /* NOTE: this is subtle as a TB may span two physical pages */
643 if (n == 0) {
644 /* NOTE: tb_end may be after the end of the page, but
645 it is not a problem */
646 tb_start = tb->pc & ~TARGET_PAGE_MASK;
647 tb_end = tb_start + tb->size;
648 if (tb_end > TARGET_PAGE_SIZE)
649 tb_end = TARGET_PAGE_SIZE;
650 } else {
651 tb_start = 0;
652 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
653 }
654 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
655 tb = tb->page_next[n];
656 }
657}
658
659#ifdef TARGET_HAS_PRECISE_SMC
660
661static void tb_gen_code(CPUState *env,
662 target_ulong pc, target_ulong cs_base, int flags,
663 int cflags)
664{
665 TranslationBlock *tb;
666 uint8_t *tc_ptr;
667 target_ulong phys_pc, phys_page2, virt_page2;
668 int code_gen_size;
669
670 phys_pc = get_phys_addr_code(env, pc);
671 tb = tb_alloc(pc);
672 if (!tb) {
673 /* flush must be done */
674 tb_flush(env);
675 /* cannot fail at this point */
676 tb = tb_alloc(pc);
677 }
678 tc_ptr = code_gen_ptr;
679 tb->tc_ptr = tc_ptr;
680 tb->cs_base = cs_base;
681 tb->flags = flags;
682 tb->cflags = cflags;
683 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
684 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
685
686 /* check next page if needed */
687 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
688 phys_page2 = -1;
689 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
690 phys_page2 = get_phys_addr_code(env, virt_page2);
691 }
692 tb_link_phys(tb, phys_pc, phys_page2);
693}
694#endif
695
696/* invalidate all TBs which intersect with the target physical page
697 starting in range [start;end[. NOTE: start and end must refer to
698 the same physical page. 'is_cpu_write_access' should be true if called
699 from a real cpu write access: the virtual CPU will exit the current
700 TB if code is modified inside this TB. */
701void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
702 int is_cpu_write_access)
703{
704 int n, current_tb_modified, current_tb_not_found, current_flags;
705 CPUState *env = cpu_single_env;
706 PageDesc *p;
707 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
708 target_ulong tb_start, tb_end;
709 target_ulong current_pc, current_cs_base;
710
711 p = page_find(start >> TARGET_PAGE_BITS);
712 if (!p)
713 return;
714 if (!p->code_bitmap &&
715 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
716 is_cpu_write_access) {
717 /* build code bitmap */
718 build_page_bitmap(p);
719 }
720
721 /* we remove all the TBs in the range [start, end[ */
722 /* XXX: see if in some cases it could be faster to invalidate all the code */
723 current_tb_not_found = is_cpu_write_access;
724 current_tb_modified = 0;
725 current_tb = NULL; /* avoid warning */
726 current_pc = 0; /* avoid warning */
727 current_cs_base = 0; /* avoid warning */
728 current_flags = 0; /* avoid warning */
729 tb = p->first_tb;
730 while (tb != NULL) {
731 n = (long)tb & 3;
732 tb = (TranslationBlock *)((long)tb & ~3);
733 tb_next = tb->page_next[n];
734 /* NOTE: this is subtle as a TB may span two physical pages */
735 if (n == 0) {
736 /* NOTE: tb_end may be after the end of the page, but
737 it is not a problem */
738 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
739 tb_end = tb_start + tb->size;
740 } else {
741 tb_start = tb->page_addr[1];
742 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
743 }
744 if (!(tb_end <= start || tb_start >= end)) {
745#ifdef TARGET_HAS_PRECISE_SMC
746 if (current_tb_not_found) {
747 current_tb_not_found = 0;
748 current_tb = NULL;
749 if (env->mem_write_pc) {
750 /* now we have a real cpu fault */
751 current_tb = tb_find_pc(env->mem_write_pc);
752 }
753 }
754 if (current_tb == tb &&
755 !(current_tb->cflags & CF_SINGLE_INSN)) {
756 /* If we are modifying the current TB, we must stop
757 its execution. We could be more precise by checking
758 that the modification is after the current PC, but it
759 would require a specialized function to partially
760 restore the CPU state */
761
762 current_tb_modified = 1;
763 cpu_restore_state(current_tb, env,
764 env->mem_write_pc, NULL);
765#if defined(TARGET_I386)
766 current_flags = env->hflags;
767 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
768 current_cs_base = (target_ulong)env->segs[R_CS].base;
769 current_pc = current_cs_base + env->eip;
770#else
771#error unsupported CPU
772#endif
773 }
774#endif /* TARGET_HAS_PRECISE_SMC */
775 /* we need to do that to handle the case where a signal
776 occurs while doing tb_phys_invalidate() */
777 saved_tb = NULL;
778 if (env) {
779 saved_tb = env->current_tb;
780 env->current_tb = NULL;
781 }
782 tb_phys_invalidate(tb, -1);
783 if (env) {
784 env->current_tb = saved_tb;
785 if (env->interrupt_request && env->current_tb)
786 cpu_interrupt(env, env->interrupt_request);
787 }
788 }
789 tb = tb_next;
790 }
791#if !defined(CONFIG_USER_ONLY)
792 /* if no code remaining, no need to continue to use slow writes */
793 if (!p->first_tb) {
794 invalidate_page_bitmap(p);
795 if (is_cpu_write_access) {
796 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
797 }
798 }
799#endif
800#ifdef TARGET_HAS_PRECISE_SMC
801 if (current_tb_modified) {
802 /* we generate a block containing just the instruction
803 modifying the memory. It will ensure that it cannot modify
804 itself */
805 env->current_tb = NULL;
806 tb_gen_code(env, current_pc, current_cs_base, current_flags,
807 CF_SINGLE_INSN);
808 cpu_resume_from_signal(env, NULL);
809 }
810#endif
811}
812
813/* len must be <= 8 and start must be a multiple of len */
814static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
815{
816 PageDesc *p;
817 int offset, b;
818#if 0
819 if (1) {
820 if (loglevel) {
821 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
822 cpu_single_env->mem_write_vaddr, len,
823 cpu_single_env->eip,
824 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
825 }
826 }
827#endif
828 p = page_find(start >> TARGET_PAGE_BITS);
829 if (!p)
830 return;
831 if (p->code_bitmap) {
832 offset = start & ~TARGET_PAGE_MASK;
833 b = p->code_bitmap[offset >> 3] >> (offset & 7);
834 if (b & ((1 << len) - 1))
835 goto do_invalidate;
836 } else {
837 do_invalidate:
838 tb_invalidate_phys_page_range(start, start + len, 1);
839 }
840}
841
842#if !defined(CONFIG_SOFTMMU)
843static void tb_invalidate_phys_page(target_ulong addr,
844 unsigned long pc, void *puc)
845{
846 int n, current_flags, current_tb_modified;
847 target_ulong current_pc, current_cs_base;
848 PageDesc *p;
849 TranslationBlock *tb, *current_tb;
850#ifdef TARGET_HAS_PRECISE_SMC
851 CPUState *env = cpu_single_env;
852#endif
853
854 addr &= TARGET_PAGE_MASK;
855 p = page_find(addr >> TARGET_PAGE_BITS);
856 if (!p)
857 return;
858 tb = p->first_tb;
859 current_tb_modified = 0;
860 current_tb = NULL;
861 current_pc = 0; /* avoid warning */
862 current_cs_base = 0; /* avoid warning */
863 current_flags = 0; /* avoid warning */
864#ifdef TARGET_HAS_PRECISE_SMC
865 if (tb && pc != 0) {
866 current_tb = tb_find_pc(pc);
867 }
868#endif
869 while (tb != NULL) {
870 n = (long)tb & 3;
871 tb = (TranslationBlock *)((long)tb & ~3);
872#ifdef TARGET_HAS_PRECISE_SMC
873 if (current_tb == tb &&
874 !(current_tb->cflags & CF_SINGLE_INSN)) {
875 /* If we are modifying the current TB, we must stop
876 its execution. We could be more precise by checking
877 that the modification is after the current PC, but it
878 would require a specialized function to partially
879 restore the CPU state */
880
881 current_tb_modified = 1;
882 cpu_restore_state(current_tb, env, pc, puc);
883#if defined(TARGET_I386)
884 current_flags = env->hflags;
885 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
886 current_cs_base = (target_ulong)env->segs[R_CS].base;
887 current_pc = current_cs_base + env->eip;
888#else
889#error unsupported CPU
890#endif
891 }
892#endif /* TARGET_HAS_PRECISE_SMC */
893 tb_phys_invalidate(tb, addr);
894 tb = tb->page_next[n];
895 }
896 p->first_tb = NULL;
897#ifdef TARGET_HAS_PRECISE_SMC
898 if (current_tb_modified) {
899 /* we generate a block containing just the instruction
900 modifying the memory. It will ensure that it cannot modify
901 itself */
902 env->current_tb = NULL;
903 tb_gen_code(env, current_pc, current_cs_base, current_flags,
904 CF_SINGLE_INSN);
905 cpu_resume_from_signal(env, puc);
906 }
907#endif
908}
909#endif
910
911/* add the tb in the target page and protect it if necessary */
912static inline void tb_alloc_page(TranslationBlock *tb,
913 unsigned int n, target_ulong page_addr)
914{
915 PageDesc *p;
916 TranslationBlock *last_first_tb;
917
918 tb->page_addr[n] = page_addr;
919 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
920 tb->page_next[n] = p->first_tb;
921 last_first_tb = p->first_tb;
922 p->first_tb = (TranslationBlock *)((long)tb | n);
923 invalidate_page_bitmap(p);
924
925#if defined(TARGET_HAS_SMC) || 1
926
927#if defined(CONFIG_USER_ONLY)
928 if (p->flags & PAGE_WRITE) {
929 target_ulong addr;
930 PageDesc *p2;
931 int prot;
932
933 /* force the host page as non writable (writes will have a
934 page fault + mprotect overhead) */
935 page_addr &= qemu_host_page_mask;
936 prot = 0;
937 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
938 addr += TARGET_PAGE_SIZE) {
939
940 p2 = page_find (addr >> TARGET_PAGE_BITS);
941 if (!p2)
942 continue;
943 prot |= p2->flags;
944 p2->flags &= ~PAGE_WRITE;
945 page_get_flags(addr);
946 }
947 mprotect(g2h(page_addr), qemu_host_page_size,
948 (prot & PAGE_BITS) & ~PAGE_WRITE);
949#ifdef DEBUG_TB_INVALIDATE
950 printf("protecting code page: 0x%08lx\n",
951 page_addr);
952#endif
953 }
954#else
955 /* if some code is already present, then the pages are already
956 protected. So we handle the case where only the first TB is
957 allocated in a physical page */
958 if (!last_first_tb) {
959 tlb_protect_code(page_addr);
960 }
961#endif
962
963#endif /* TARGET_HAS_SMC */
964}
965
966/* Allocate a new translation block. Flush the translation buffer if
967 too many translation blocks or too much generated code. */
968TranslationBlock *tb_alloc(target_ulong pc)
969{
970 TranslationBlock *tb;
971
972 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
973 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
974 return NULL;
975 tb = &tbs[nb_tbs++];
976 tb->pc = pc;
977 tb->cflags = 0;
978 return tb;
979}
980
981/* add a new TB and link it to the physical page tables. phys_page2 is
982 (-1) to indicate that only one page contains the TB. */
983void tb_link_phys(TranslationBlock *tb,
984 target_ulong phys_pc, target_ulong phys_page2)
985{
986 unsigned int h;
987 TranslationBlock **ptb;
988
989 /* add in the physical hash table */
990 h = tb_phys_hash_func(phys_pc);
991 ptb = &tb_phys_hash[h];
992 tb->phys_hash_next = *ptb;
993 *ptb = tb;
994
995 /* add in the page list */
996 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
997 if (phys_page2 != -1)
998 tb_alloc_page(tb, 1, phys_page2);
999 else
1000 tb->page_addr[1] = -1;
1001
1002 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1003 tb->jmp_next[0] = NULL;
1004 tb->jmp_next[1] = NULL;
1005#ifdef USE_CODE_COPY
1006 tb->cflags &= ~CF_FP_USED;
1007 if (tb->cflags & CF_TB_FP_USED)
1008 tb->cflags |= CF_FP_USED;
1009#endif
1010
1011 /* init original jump addresses */
1012 if (tb->tb_next_offset[0] != 0xffff)
1013 tb_reset_jump(tb, 0);
1014 if (tb->tb_next_offset[1] != 0xffff)
1015 tb_reset_jump(tb, 1);
1016
1017#ifdef DEBUG_TB_CHECK
1018 tb_page_check();
1019#endif
1020}
1021
1022/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1023 tb[1].tc_ptr. Return NULL if not found */
1024TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1025{
1026 int m_min, m_max, m;
1027 unsigned long v;
1028 TranslationBlock *tb;
1029
1030 if (nb_tbs <= 0)
1031 return NULL;
1032 if (tc_ptr < (unsigned long)code_gen_buffer ||
1033 tc_ptr >= (unsigned long)code_gen_ptr)
1034 return NULL;
1035 /* binary search (cf Knuth) */
1036 m_min = 0;
1037 m_max = nb_tbs - 1;
1038 while (m_min <= m_max) {
1039 m = (m_min + m_max) >> 1;
1040 tb = &tbs[m];
1041 v = (unsigned long)tb->tc_ptr;
1042 if (v == tc_ptr)
1043 return tb;
1044 else if (tc_ptr < v) {
1045 m_max = m - 1;
1046 } else {
1047 m_min = m + 1;
1048 }
1049 }
1050 return &tbs[m_max];
1051}
1052
1053static void tb_reset_jump_recursive(TranslationBlock *tb);
1054
1055static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1056{
1057 TranslationBlock *tb1, *tb_next, **ptb;
1058 unsigned int n1;
1059
1060 tb1 = tb->jmp_next[n];
1061 if (tb1 != NULL) {
1062 /* find head of list */
1063 for(;;) {
1064 n1 = (long)tb1 & 3;
1065 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1066 if (n1 == 2)
1067 break;
1068 tb1 = tb1->jmp_next[n1];
1069 }
1070 /* we are now sure now that tb jumps to tb1 */
1071 tb_next = tb1;
1072
1073 /* remove tb from the jmp_first list */
1074 ptb = &tb_next->jmp_first;
1075 for(;;) {
1076 tb1 = *ptb;
1077 n1 = (long)tb1 & 3;
1078 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1079 if (n1 == n && tb1 == tb)
1080 break;
1081 ptb = &tb1->jmp_next[n1];
1082 }
1083 *ptb = tb->jmp_next[n];
1084 tb->jmp_next[n] = NULL;
1085
1086 /* suppress the jump to next tb in generated code */
1087 tb_reset_jump(tb, n);
1088
1089 /* suppress jumps in the tb on which we could have jumped */
1090 tb_reset_jump_recursive(tb_next);
1091 }
1092}
1093
1094static void tb_reset_jump_recursive(TranslationBlock *tb)
1095{
1096 tb_reset_jump_recursive2(tb, 0);
1097 tb_reset_jump_recursive2(tb, 1);
1098}
1099
1100#if defined(TARGET_HAS_ICE)
1101static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1102{
1103 target_ulong addr, pd;
1104 ram_addr_t ram_addr;
1105 PhysPageDesc *p;
1106
1107 addr = cpu_get_phys_page_debug(env, pc);
1108 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1109 if (!p) {
1110 pd = IO_MEM_UNASSIGNED;
1111 } else {
1112 pd = p->phys_offset;
1113 }
1114 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1115 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1116}
1117#endif
1118
1119/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1120 breakpoint is reached */
1121int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1122{
1123#if defined(TARGET_HAS_ICE)
1124 int i;
1125
1126 for(i = 0; i < env->nb_breakpoints; i++) {
1127 if (env->breakpoints[i] == pc)
1128 return 0;
1129 }
1130
1131 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1132 return -1;
1133 env->breakpoints[env->nb_breakpoints++] = pc;
1134
1135 breakpoint_invalidate(env, pc);
1136 return 0;
1137#else
1138 return -1;
1139#endif
1140}
1141
1142/* remove a breakpoint */
1143int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1144{
1145#if defined(TARGET_HAS_ICE)
1146 int i;
1147 for(i = 0; i < env->nb_breakpoints; i++) {
1148 if (env->breakpoints[i] == pc)
1149 goto found;
1150 }
1151 return -1;
1152 found:
1153 env->nb_breakpoints--;
1154 if (i < env->nb_breakpoints)
1155 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1156
1157 breakpoint_invalidate(env, pc);
1158 return 0;
1159#else
1160 return -1;
1161#endif
1162}
1163
1164/* enable or disable single step mode. EXCP_DEBUG is returned by the
1165 CPU loop after each instruction */
1166void cpu_single_step(CPUState *env, int enabled)
1167{
1168#if defined(TARGET_HAS_ICE)
1169 if (env->singlestep_enabled != enabled) {
1170 env->singlestep_enabled = enabled;
1171 /* must flush all the translated code to avoid inconsistancies */
1172 /* XXX: only flush what is necessary */
1173 tb_flush(env);
1174 }
1175#endif
1176}
1177
1178#ifndef VBOX
1179/* enable or disable low levels log */
1180void cpu_set_log(int log_flags)
1181{
1182 loglevel = log_flags;
1183 if (loglevel && !logfile) {
1184 logfile = fopen(logfilename, "w");
1185 if (!logfile) {
1186 perror(logfilename);
1187 _exit(1);
1188 }
1189#if !defined(CONFIG_SOFTMMU)
1190 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1191 {
1192 static uint8_t logfile_buf[4096];
1193 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1194 }
1195#else
1196 setvbuf(logfile, NULL, _IOLBF, 0);
1197#endif
1198 }
1199}
1200
1201void cpu_set_log_filename(const char *filename)
1202{
1203 logfilename = strdup(filename);
1204}
1205#endif /* !VBOX */
1206
1207/* mask must never be zero, except for A20 change call */
1208void cpu_interrupt(CPUState *env, int mask)
1209{
1210 TranslationBlock *tb;
1211 static int interrupt_lock;
1212
1213#ifdef VBOX
1214 VM_ASSERT_EMT(env->pVM);
1215 ASMAtomicOrS32(&env->interrupt_request, mask);
1216#else /* !VBOX */
1217 env->interrupt_request |= mask;
1218#endif /* !VBOX */
1219 /* if the cpu is currently executing code, we must unlink it and
1220 all the potentially executing TB */
1221 tb = env->current_tb;
1222 if (tb && !testandset(&interrupt_lock)) {
1223 env->current_tb = NULL;
1224 tb_reset_jump_recursive(tb);
1225 interrupt_lock = 0;
1226 }
1227}
1228
1229void cpu_reset_interrupt(CPUState *env, int mask)
1230{
1231#ifdef VBOX
1232 /*
1233 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1234 * for future changes!
1235 */
1236 ASMAtomicAndS32(&env->interrupt_request, ~mask);
1237#else /* !VBOX */
1238 env->interrupt_request &= ~mask;
1239#endif /* !VBOX */
1240}
1241
1242#ifndef VBOX
1243CPULogItem cpu_log_items[] = {
1244 { CPU_LOG_TB_OUT_ASM, "out_asm",
1245 "show generated host assembly code for each compiled TB" },
1246 { CPU_LOG_TB_IN_ASM, "in_asm",
1247 "show target assembly code for each compiled TB" },
1248 { CPU_LOG_TB_OP, "op",
1249 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1250#ifdef TARGET_I386
1251 { CPU_LOG_TB_OP_OPT, "op_opt",
1252 "show micro ops after optimization for each compiled TB" },
1253#endif
1254 { CPU_LOG_INT, "int",
1255 "show interrupts/exceptions in short format" },
1256 { CPU_LOG_EXEC, "exec",
1257 "show trace before each executed TB (lots of logs)" },
1258 { CPU_LOG_TB_CPU, "cpu",
1259 "show CPU state before bloc translation" },
1260#ifdef TARGET_I386
1261 { CPU_LOG_PCALL, "pcall",
1262 "show protected mode far calls/returns/exceptions" },
1263#endif
1264#ifdef DEBUG_IOPORT
1265 { CPU_LOG_IOPORT, "ioport",
1266 "show all i/o ports accesses" },
1267#endif
1268 { 0, NULL, NULL },
1269};
1270
1271static int cmp1(const char *s1, int n, const char *s2)
1272{
1273 if (strlen(s2) != n)
1274 return 0;
1275 return memcmp(s1, s2, n) == 0;
1276}
1277
1278/* takes a comma separated list of log masks. Return 0 if error. */
1279int cpu_str_to_log_mask(const char *str)
1280{
1281 CPULogItem *item;
1282 int mask;
1283 const char *p, *p1;
1284
1285 p = str;
1286 mask = 0;
1287 for(;;) {
1288 p1 = strchr(p, ',');
1289 if (!p1)
1290 p1 = p + strlen(p);
1291 if(cmp1(p,p1-p,"all")) {
1292 for(item = cpu_log_items; item->mask != 0; item++) {
1293 mask |= item->mask;
1294 }
1295 } else {
1296 for(item = cpu_log_items; item->mask != 0; item++) {
1297 if (cmp1(p, p1 - p, item->name))
1298 goto found;
1299 }
1300 return 0;
1301 }
1302 found:
1303 mask |= item->mask;
1304 if (*p1 != ',')
1305 break;
1306 p = p1 + 1;
1307 }
1308 return mask;
1309}
1310#endif /* !VBOX */
1311
1312#ifndef VBOX /* VBOX: we have our own routine. */
1313void cpu_abort(CPUState *env, const char *fmt, ...)
1314{
1315 va_list ap;
1316
1317 va_start(ap, fmt);
1318 fprintf(stderr, "qemu: fatal: ");
1319 vfprintf(stderr, fmt, ap);
1320 fprintf(stderr, "\n");
1321#ifdef TARGET_I386
1322 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1323#else
1324 cpu_dump_state(env, stderr, fprintf, 0);
1325#endif
1326 va_end(ap);
1327 abort();
1328}
1329#endif /* !VBOX */
1330
1331#if !defined(CONFIG_USER_ONLY)
1332
1333/* NOTE: if flush_global is true, also flush global entries (not
1334 implemented yet) */
1335void tlb_flush(CPUState *env, int flush_global)
1336{
1337 int i;
1338
1339#if defined(DEBUG_TLB)
1340 printf("tlb_flush:\n");
1341#endif
1342 /* must reset current TB so that interrupts cannot modify the
1343 links while we are modifying them */
1344 env->current_tb = NULL;
1345
1346 for(i = 0; i < CPU_TLB_SIZE; i++) {
1347 env->tlb_table[0][i].addr_read = -1;
1348 env->tlb_table[0][i].addr_write = -1;
1349 env->tlb_table[0][i].addr_code = -1;
1350 env->tlb_table[1][i].addr_read = -1;
1351 env->tlb_table[1][i].addr_write = -1;
1352 env->tlb_table[1][i].addr_code = -1;
1353 }
1354
1355 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1356
1357#if !defined(CONFIG_SOFTMMU)
1358 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1359#endif
1360#ifdef VBOX
1361 /* inform raw mode about TLB flush */
1362 remR3FlushTLB(env, flush_global);
1363#endif
1364#ifdef USE_KQEMU
1365 if (env->kqemu_enabled) {
1366 kqemu_flush(env, flush_global);
1367 }
1368#endif
1369 tlb_flush_count++;
1370}
1371
1372static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1373{
1374 if (addr == (tlb_entry->addr_read &
1375 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1376 addr == (tlb_entry->addr_write &
1377 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1378 addr == (tlb_entry->addr_code &
1379 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1380 tlb_entry->addr_read = -1;
1381 tlb_entry->addr_write = -1;
1382 tlb_entry->addr_code = -1;
1383 }
1384}
1385
1386void tlb_flush_page(CPUState *env, target_ulong addr)
1387{
1388 int i;
1389 TranslationBlock *tb;
1390
1391#if defined(DEBUG_TLB)
1392 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1393#endif
1394 /* must reset current TB so that interrupts cannot modify the
1395 links while we are modifying them */
1396 env->current_tb = NULL;
1397
1398 addr &= TARGET_PAGE_MASK;
1399 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1400 tlb_flush_entry(&env->tlb_table[0][i], addr);
1401 tlb_flush_entry(&env->tlb_table[1][i], addr);
1402
1403 /* Discard jump cache entries for any tb which might potentially
1404 overlap the flushed page. */
1405 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1406 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1407
1408 i = tb_jmp_cache_hash_page(addr);
1409 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1410
1411#if !defined(CONFIG_SOFTMMU)
1412 if (addr < MMAP_AREA_END)
1413 munmap((void *)addr, TARGET_PAGE_SIZE);
1414#endif
1415#ifdef VBOX
1416 /* inform raw mode about TLB page flush */
1417 remR3FlushPage(env, addr);
1418#endif /* VBOX */
1419#ifdef USE_KQEMU
1420 if (env->kqemu_enabled) {
1421 kqemu_flush_page(env, addr);
1422 }
1423#endif
1424}
1425
1426/* update the TLBs so that writes to code in the virtual page 'addr'
1427 can be detected */
1428static void tlb_protect_code(ram_addr_t ram_addr)
1429{
1430 cpu_physical_memory_reset_dirty(ram_addr,
1431 ram_addr + TARGET_PAGE_SIZE,
1432 CODE_DIRTY_FLAG);
1433#ifdef VBOX && defined(REM_MONITOR_CODE_PAGES)
1434 /** @todo Retest this? This function has changed... */
1435 remR3ProtectCode(cpu_single_env, ram_addr);
1436#endif
1437}
1438
1439/* update the TLB so that writes in physical page 'phys_addr' are no longer
1440 tested for self modifying code */
1441static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1442 target_ulong vaddr)
1443{
1444#ifdef VBOX
1445 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1446#endif
1447 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1448}
1449
1450static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1451 unsigned long start, unsigned long length)
1452{
1453 unsigned long addr;
1454 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1455 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1456 if ((addr - start) < length) {
1457 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1458 }
1459 }
1460}
1461
1462void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1463 int dirty_flags)
1464{
1465 CPUState *env;
1466 unsigned long length, start1;
1467 int i, mask, len;
1468 uint8_t *p;
1469
1470 start &= TARGET_PAGE_MASK;
1471 end = TARGET_PAGE_ALIGN(end);
1472
1473 length = end - start;
1474 if (length == 0)
1475 return;
1476 len = length >> TARGET_PAGE_BITS;
1477#ifdef USE_KQEMU
1478 /* XXX: should not depend on cpu context */
1479 env = first_cpu;
1480 if (env->kqemu_enabled) {
1481 ram_addr_t addr;
1482 addr = start;
1483 for(i = 0; i < len; i++) {
1484 kqemu_set_notdirty(env, addr);
1485 addr += TARGET_PAGE_SIZE;
1486 }
1487 }
1488#endif
1489 mask = ~dirty_flags;
1490 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1491#ifdef VBOX
1492 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1493#endif
1494 for(i = 0; i < len; i++)
1495 p[i] &= mask;
1496
1497 /* we modify the TLB cache so that the dirty bit will be set again
1498 when accessing the range */
1499#ifndef VBOX
1500 start1 = start + (unsigned long)phys_ram_base;
1501#else
1502 start1 = (unsigned long)remR3GCPhys2HCVirt(env, start);
1503#endif
1504 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1505 for(i = 0; i < CPU_TLB_SIZE; i++)
1506 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1507 for(i = 0; i < CPU_TLB_SIZE; i++)
1508 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1509 }
1510
1511#if !defined(CONFIG_SOFTMMU)
1512#ifdef VBOX /**@todo remove this check */
1513# error "We shouldn't get here..."
1514#endif
1515 /* XXX: this is expensive */
1516 {
1517 VirtPageDesc *p;
1518 int j;
1519 target_ulong addr;
1520
1521 for(i = 0; i < L1_SIZE; i++) {
1522 p = l1_virt_map[i];
1523 if (p) {
1524 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1525 for(j = 0; j < L2_SIZE; j++) {
1526 if (p->valid_tag == virt_valid_tag &&
1527 p->phys_addr >= start && p->phys_addr < end &&
1528 (p->prot & PROT_WRITE)) {
1529 if (addr < MMAP_AREA_END) {
1530 mprotect((void *)addr, TARGET_PAGE_SIZE,
1531 p->prot & ~PROT_WRITE);
1532 }
1533 }
1534 addr += TARGET_PAGE_SIZE;
1535 p++;
1536 }
1537 }
1538 }
1539 }
1540#endif
1541}
1542
1543static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1544{
1545 ram_addr_t ram_addr;
1546
1547 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1548 /* RAM case */
1549#ifndef VBOX
1550 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1551 tlb_entry->addend - (unsigned long)phys_ram_base;
1552#else
1553 ram_addr = remR3HCVirt2GCPhys(cpu_single_env, (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend); /** @todo check if this is right! */
1554#endif
1555 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1556 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1557 }
1558 }
1559}
1560
1561/* update the TLB according to the current state of the dirty bits */
1562void cpu_tlb_update_dirty(CPUState *env)
1563{
1564 int i;
1565 for(i = 0; i < CPU_TLB_SIZE; i++)
1566 tlb_update_dirty(&env->tlb_table[0][i]);
1567 for(i = 0; i < CPU_TLB_SIZE; i++)
1568 tlb_update_dirty(&env->tlb_table[1][i]);
1569}
1570
1571static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1572 unsigned long start)
1573{
1574 unsigned long addr;
1575 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1576 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1577 if (addr == start) {
1578 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1579 }
1580 }
1581}
1582
1583/* update the TLB corresponding to virtual page vaddr and phys addr
1584 addr so that it is no longer dirty */
1585static inline void tlb_set_dirty(CPUState *env,
1586 unsigned long addr, target_ulong vaddr)
1587{
1588 int i;
1589
1590 addr &= TARGET_PAGE_MASK;
1591 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1592 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1593 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1594}
1595
1596/* add a new TLB entry. At most one entry for a given virtual address
1597 is permitted. Return 0 if OK or 2 if the page could not be mapped
1598 (can only happen in non SOFTMMU mode for I/O pages or pages
1599 conflicting with the host address space). */
1600int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1601 target_phys_addr_t paddr, int prot,
1602 int is_user, int is_softmmu)
1603{
1604 PhysPageDesc *p;
1605 unsigned long pd;
1606 unsigned int index;
1607 target_ulong address;
1608 target_phys_addr_t addend;
1609 int ret;
1610 CPUTLBEntry *te;
1611
1612 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1613 if (!p) {
1614 pd = IO_MEM_UNASSIGNED;
1615 } else {
1616 pd = p->phys_offset;
1617 }
1618#if defined(DEBUG_TLB)
1619 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1620 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1621#endif
1622
1623 ret = 0;
1624#if !defined(CONFIG_SOFTMMU)
1625 if (is_softmmu)
1626#endif
1627 {
1628 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1629 /* IO memory case */
1630 address = vaddr | pd;
1631 addend = paddr;
1632 } else {
1633 /* standard memory */
1634 address = vaddr;
1635#ifndef VBOX
1636 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1637#else
1638 addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK);
1639#endif
1640 }
1641
1642 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1643 addend -= vaddr;
1644 te = &env->tlb_table[is_user][index];
1645 te->addend = addend;
1646 if (prot & PAGE_READ) {
1647 te->addr_read = address;
1648 } else {
1649 te->addr_read = -1;
1650 }
1651 if (prot & PAGE_EXEC) {
1652 te->addr_code = address;
1653 } else {
1654 te->addr_code = -1;
1655 }
1656 if (prot & PAGE_WRITE) {
1657 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1658 (pd & IO_MEM_ROMD)) {
1659 /* write access calls the I/O callback */
1660 te->addr_write = vaddr |
1661 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1662 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1663 !cpu_physical_memory_is_dirty(pd)) {
1664 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1665 } else {
1666 te->addr_write = address;
1667 }
1668 } else {
1669 te->addr_write = -1;
1670 }
1671#ifdef VBOX
1672 /* inform raw mode about TLB page change */
1673 /** @todo double check and fix this interface. OLD: remR3SetPage(env, &env->tlb_read[is_user][index], &env->tlb_write[is_user][index], prot, is_user); */
1674 remR3SetPage(env, te, te, prot, is_user);
1675#endif
1676 }
1677#if !defined(CONFIG_SOFTMMU)
1678 else {
1679 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1680 /* IO access: no mapping is done as it will be handled by the
1681 soft MMU */
1682 if (!(env->hflags & HF_SOFTMMU_MASK))
1683 ret = 2;
1684 } else {
1685 void *map_addr;
1686
1687 if (vaddr >= MMAP_AREA_END) {
1688 ret = 2;
1689 } else {
1690 if (prot & PROT_WRITE) {
1691 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1692#if defined(TARGET_HAS_SMC) || 1
1693 first_tb ||
1694#endif
1695 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1696 !cpu_physical_memory_is_dirty(pd))) {
1697 /* ROM: we do as if code was inside */
1698 /* if code is present, we only map as read only and save the
1699 original mapping */
1700 VirtPageDesc *vp;
1701
1702 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1703 vp->phys_addr = pd;
1704 vp->prot = prot;
1705 vp->valid_tag = virt_valid_tag;
1706 prot &= ~PAGE_WRITE;
1707 }
1708 }
1709 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1710 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1711 if (map_addr == MAP_FAILED) {
1712 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1713 paddr, vaddr);
1714 }
1715 }
1716 }
1717 }
1718#endif
1719 return ret;
1720}
1721
1722/* called from signal handler: invalidate the code and unprotect the
1723 page. Return TRUE if the fault was succesfully handled. */
1724int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1725{
1726#if !defined(CONFIG_SOFTMMU)
1727 VirtPageDesc *vp;
1728
1729#if defined(DEBUG_TLB)
1730 printf("page_unprotect: addr=0x%08x\n", addr);
1731#endif
1732 addr &= TARGET_PAGE_MASK;
1733
1734 /* if it is not mapped, no need to worry here */
1735 if (addr >= MMAP_AREA_END)
1736 return 0;
1737 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1738 if (!vp)
1739 return 0;
1740 /* NOTE: in this case, validate_tag is _not_ tested as it
1741 validates only the code TLB */
1742 if (vp->valid_tag != virt_valid_tag)
1743 return 0;
1744 if (!(vp->prot & PAGE_WRITE))
1745 return 0;
1746#if defined(DEBUG_TLB)
1747 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1748 addr, vp->phys_addr, vp->prot);
1749#endif
1750 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1751 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1752 (unsigned long)addr, vp->prot);
1753 /* set the dirty bit */
1754#ifdef VBOX
1755 if (RT_LIKELY((vp->phys_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1756#endif
1757 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1758 /* flush the code inside */
1759 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1760 return 1;
1761#elif defined(VBOX)
1762 addr &= TARGET_PAGE_MASK;
1763
1764 /* if it is not mapped, no need to worry here */
1765 if (addr >= MMAP_AREA_END)
1766 return 0;
1767 return 1;
1768#else
1769 return 0;
1770#endif
1771}
1772
1773#else
1774
1775void tlb_flush(CPUState *env, int flush_global)
1776{
1777}
1778
1779void tlb_flush_page(CPUState *env, target_ulong addr)
1780{
1781}
1782
1783int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1784 target_phys_addr_t paddr, int prot,
1785 int is_user, int is_softmmu)
1786{
1787 return 0;
1788}
1789
1790#ifndef VBOX
1791/* dump memory mappings */
1792void page_dump(FILE *f)
1793{
1794 unsigned long start, end;
1795 int i, j, prot, prot1;
1796 PageDesc *p;
1797
1798 fprintf(f, "%-8s %-8s %-8s %s\n",
1799 "start", "end", "size", "prot");
1800 start = -1;
1801 end = -1;
1802 prot = 0;
1803 for(i = 0; i <= L1_SIZE; i++) {
1804 if (i < L1_SIZE)
1805 p = l1_map[i];
1806 else
1807 p = NULL;
1808 for(j = 0;j < L2_SIZE; j++) {
1809 if (!p)
1810 prot1 = 0;
1811 else
1812 prot1 = p[j].flags;
1813 if (prot1 != prot) {
1814 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1815 if (start != -1) {
1816 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1817 start, end, end - start,
1818 prot & PAGE_READ ? 'r' : '-',
1819 prot & PAGE_WRITE ? 'w' : '-',
1820 prot & PAGE_EXEC ? 'x' : '-');
1821 }
1822 if (prot1 != 0)
1823 start = end;
1824 else
1825 start = -1;
1826 prot = prot1;
1827 }
1828 if (!p)
1829 break;
1830 }
1831 }
1832}
1833#endif /* !VBOX */
1834
1835int page_get_flags(target_ulong address)
1836{
1837 PageDesc *p;
1838
1839 p = page_find(address >> TARGET_PAGE_BITS);
1840 if (!p)
1841 return 0;
1842 return p->flags;
1843}
1844
1845/* modify the flags of a page and invalidate the code if
1846 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1847 depending on PAGE_WRITE */
1848void page_set_flags(target_ulong start, target_ulong end, int flags)
1849{
1850 PageDesc *p;
1851 target_ulong addr;
1852
1853 start = start & TARGET_PAGE_MASK;
1854 end = TARGET_PAGE_ALIGN(end);
1855 if (flags & PAGE_WRITE)
1856 flags |= PAGE_WRITE_ORG;
1857#ifdef VBOX
1858 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
1859#endif
1860 spin_lock(&tb_lock);
1861 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1862 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1863 /* if the write protection is set, then we invalidate the code
1864 inside */
1865 if (!(p->flags & PAGE_WRITE) &&
1866 (flags & PAGE_WRITE) &&
1867 p->first_tb) {
1868 tb_invalidate_phys_page(addr, 0, NULL);
1869 }
1870 p->flags = flags;
1871 }
1872 spin_unlock(&tb_lock);
1873}
1874
1875/* called from signal handler: invalidate the code and unprotect the
1876 page. Return TRUE if the fault was succesfully handled. */
1877int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1878{
1879 unsigned int page_index, prot, pindex;
1880 PageDesc *p, *p1;
1881 target_ulong host_start, host_end, addr;
1882
1883 host_start = address & qemu_host_page_mask;
1884 page_index = host_start >> TARGET_PAGE_BITS;
1885 p1 = page_find(page_index);
1886 if (!p1)
1887 return 0;
1888 host_end = host_start + qemu_host_page_size;
1889 p = p1;
1890 prot = 0;
1891 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1892 prot |= p->flags;
1893 p++;
1894 }
1895 /* if the page was really writable, then we change its
1896 protection back to writable */
1897 if (prot & PAGE_WRITE_ORG) {
1898 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1899 if (!(p1[pindex].flags & PAGE_WRITE)) {
1900 mprotect((void *)g2h(host_start), qemu_host_page_size,
1901 (prot & PAGE_BITS) | PAGE_WRITE);
1902 p1[pindex].flags |= PAGE_WRITE;
1903 /* and since the content will be modified, we must invalidate
1904 the corresponding translated code. */
1905 tb_invalidate_phys_page(address, pc, puc);
1906#ifdef DEBUG_TB_CHECK
1907 tb_invalidate_check(address);
1908#endif
1909 return 1;
1910 }
1911 }
1912 return 0;
1913}
1914
1915/* call this function when system calls directly modify a memory area */
1916/* ??? This should be redundant now we have lock_user. */
1917void page_unprotect_range(target_ulong data, target_ulong data_size)
1918{
1919 target_ulong start, end, addr;
1920
1921 start = data;
1922 end = start + data_size;
1923 start &= TARGET_PAGE_MASK;
1924 end = TARGET_PAGE_ALIGN(end);
1925 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1926 page_unprotect(addr, 0, NULL);
1927 }
1928}
1929
1930static inline void tlb_set_dirty(CPUState *env,
1931 unsigned long addr, target_ulong vaddr)
1932{
1933}
1934#endif /* defined(CONFIG_USER_ONLY) */
1935
1936/* register physical memory. 'size' must be a multiple of the target
1937 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1938 io memory page */
1939void cpu_register_physical_memory(target_phys_addr_t start_addr,
1940 unsigned long size,
1941 unsigned long phys_offset)
1942{
1943 target_phys_addr_t addr, end_addr;
1944 PhysPageDesc *p;
1945 CPUState *env;
1946
1947 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1948 end_addr = start_addr + size;
1949 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1950 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1951 p->phys_offset = phys_offset;
1952#ifndef VBOX
1953 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1954 (phys_offset & IO_MEM_ROMD))
1955#else
1956 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
1957 || (phys_offset & IO_MEM_ROMD)
1958 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
1959#endif
1960
1961 phys_offset += TARGET_PAGE_SIZE;
1962 }
1963
1964 /* since each CPU stores ram addresses in its TLB cache, we must
1965 reset the modified entries */
1966 /* XXX: slow ! */
1967 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1968 tlb_flush(env, 1);
1969 }
1970}
1971
1972/* XXX: temporary until new memory mapping API */
1973uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1974{
1975 PhysPageDesc *p;
1976
1977 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1978 if (!p)
1979 return IO_MEM_UNASSIGNED;
1980 return p->phys_offset;
1981}
1982
1983static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1984{
1985#ifdef DEBUG_UNASSIGNED
1986 printf("Unassigned mem read 0x%08x\n", (int)addr);
1987#endif
1988 return 0;
1989}
1990
1991static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1992{
1993#ifdef DEBUG_UNASSIGNED
1994 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1995#endif
1996}
1997
1998static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1999 unassigned_mem_readb,
2000 unassigned_mem_readb,
2001 unassigned_mem_readb,
2002};
2003
2004static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2005 unassigned_mem_writeb,
2006 unassigned_mem_writeb,
2007 unassigned_mem_writeb,
2008};
2009
2010static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2011{
2012 unsigned long ram_addr;
2013 int dirty_flags;
2014#ifndef VBOX
2015 ram_addr = addr - (unsigned long)phys_ram_base;
2016#else
2017 ram_addr = remR3HCVirt2GCPhys(cpu_single_env, (void *)addr);
2018#endif
2019#ifdef VBOX
2020 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2021 dirty_flags = 0xff;
2022 else
2023#endif /* VBOX */
2024 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2025 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2026#if !defined(CONFIG_USER_ONLY)
2027 tb_invalidate_phys_page_fast(ram_addr, 1);
2028# ifdef VBOX
2029 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2030 dirty_flags = 0xff;
2031 else
2032# endif /* VBOX */
2033 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2034#endif
2035 }
2036 stb_p((uint8_t *)(long)addr, val);
2037#ifdef USE_KQEMU
2038 if (cpu_single_env->kqemu_enabled &&
2039 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2040 kqemu_modify_page(cpu_single_env, ram_addr);
2041#endif
2042 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2043#ifdef VBOX
2044 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2045#endif /* !VBOX */
2046 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2047 /* we remove the notdirty callback only if the code has been
2048 flushed */
2049 if (dirty_flags == 0xff)
2050 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2051}
2052
2053static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2054{
2055 unsigned long ram_addr;
2056 int dirty_flags;
2057#ifndef VBOX
2058 ram_addr = addr - (unsigned long)phys_ram_base;
2059#else
2060 ram_addr = remR3HCVirt2GCPhys(cpu_single_env, (void *)addr);
2061#endif
2062#ifdef VBOX
2063 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2064 dirty_flags = 0xff;
2065 else
2066#endif /* VBOX */
2067 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2068 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2069#if !defined(CONFIG_USER_ONLY)
2070 tb_invalidate_phys_page_fast(ram_addr, 2);
2071# ifdef VBOX
2072 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2073 dirty_flags = 0xff;
2074 else
2075# endif /* VBOX */
2076 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2077#endif
2078 }
2079 stw_p((uint8_t *)(long)addr, val);
2080#ifdef USE_KQEMU
2081 if (cpu_single_env->kqemu_enabled &&
2082 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2083 kqemu_modify_page(cpu_single_env, ram_addr);
2084#endif
2085 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2086#ifdef VBOX
2087 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2088#endif
2089 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2090 /* we remove the notdirty callback only if the code has been
2091 flushed */
2092 if (dirty_flags == 0xff)
2093 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2094}
2095
2096static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2097{
2098 unsigned long ram_addr;
2099 int dirty_flags;
2100#ifndef VBOX
2101 ram_addr = addr - (unsigned long)phys_ram_base;
2102#else
2103 ram_addr = remR3HCVirt2GCPhys(cpu_single_env, (void *)addr);
2104#endif
2105#ifdef VBOX
2106 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2107 dirty_flags = 0xff;
2108 else
2109#endif /* VBOX */
2110 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2111 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2112#if !defined(CONFIG_USER_ONLY)
2113 tb_invalidate_phys_page_fast(ram_addr, 4);
2114# ifdef VBOX
2115 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2116 dirty_flags = 0xff;
2117 else
2118# endif /* VBOX */
2119 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2120#endif
2121 }
2122 stl_p((uint8_t *)(long)addr, val);
2123#ifdef USE_KQEMU
2124 if (cpu_single_env->kqemu_enabled &&
2125 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2126 kqemu_modify_page(cpu_single_env, ram_addr);
2127#endif
2128 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2129#ifdef VBOX
2130 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2131#endif
2132 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2133 /* we remove the notdirty callback only if the code has been
2134 flushed */
2135 if (dirty_flags == 0xff)
2136 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2137}
2138
2139static CPUReadMemoryFunc *error_mem_read[3] = {
2140 NULL, /* never used */
2141 NULL, /* never used */
2142 NULL, /* never used */
2143};
2144
2145static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2146 notdirty_mem_writeb,
2147 notdirty_mem_writew,
2148 notdirty_mem_writel,
2149};
2150
2151static void io_mem_init(void)
2152{
2153 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2154 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2155 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2156#ifdef VBOX
2157 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2158 io_mem_nb = 6;
2159#else
2160 io_mem_nb = 5;
2161#endif
2162
2163#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
2164 /* alloc dirty bits array */
2165 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2166 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2167#endif /* !VBOX */
2168}
2169
2170/* mem_read and mem_write are arrays of functions containing the
2171 function to access byte (index 0), word (index 1) and dword (index
2172 2). All functions must be supplied. If io_index is non zero, the
2173 corresponding io zone is modified. If it is zero, a new io zone is
2174 allocated. The return value can be used with
2175 cpu_register_physical_memory(). (-1) is returned if error. */
2176int cpu_register_io_memory(int io_index,
2177 CPUReadMemoryFunc **mem_read,
2178 CPUWriteMemoryFunc **mem_write,
2179 void *opaque)
2180{
2181 int i;
2182
2183 if (io_index <= 0) {
2184 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2185 return -1;
2186 io_index = io_mem_nb++;
2187 } else {
2188 if (io_index >= IO_MEM_NB_ENTRIES)
2189 return -1;
2190 }
2191
2192 for(i = 0;i < 3; i++) {
2193 io_mem_read[io_index][i] = mem_read[i];
2194 io_mem_write[io_index][i] = mem_write[i];
2195 }
2196 io_mem_opaque[io_index] = opaque;
2197 return io_index << IO_MEM_SHIFT;
2198}
2199
2200CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2201{
2202 return io_mem_write[io_index >> IO_MEM_SHIFT];
2203}
2204
2205CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2206{
2207 return io_mem_read[io_index >> IO_MEM_SHIFT];
2208}
2209
2210/* physical memory access (slow version, mainly for debug) */
2211#if defined(CONFIG_USER_ONLY)
2212void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2213 int len, int is_write)
2214{
2215 int l, flags;
2216 target_ulong page;
2217 void * p;
2218
2219 while (len > 0) {
2220 page = addr & TARGET_PAGE_MASK;
2221 l = (page + TARGET_PAGE_SIZE) - addr;
2222 if (l > len)
2223 l = len;
2224 flags = page_get_flags(page);
2225 if (!(flags & PAGE_VALID))
2226 return;
2227 if (is_write) {
2228 if (!(flags & PAGE_WRITE))
2229 return;
2230 p = lock_user(addr, len, 0);
2231 memcpy(p, buf, len);
2232 unlock_user(p, addr, len);
2233 } else {
2234 if (!(flags & PAGE_READ))
2235 return;
2236 p = lock_user(addr, len, 1);
2237 memcpy(buf, p, len);
2238 unlock_user(p, addr, 0);
2239 }
2240 len -= l;
2241 buf += l;
2242 addr += l;
2243 }
2244}
2245
2246#else
2247void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2248 int len, int is_write)
2249{
2250 int l, io_index;
2251 uint8_t *ptr;
2252 uint32_t val;
2253 target_phys_addr_t page;
2254 unsigned long pd;
2255 PhysPageDesc *p;
2256
2257 while (len > 0) {
2258 page = addr & TARGET_PAGE_MASK;
2259 l = (page + TARGET_PAGE_SIZE) - addr;
2260 if (l > len)
2261 l = len;
2262 p = phys_page_find(page >> TARGET_PAGE_BITS);
2263 if (!p) {
2264 pd = IO_MEM_UNASSIGNED;
2265 } else {
2266 pd = p->phys_offset;
2267 }
2268
2269 if (is_write) {
2270 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2271 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2272 /* XXX: could force cpu_single_env to NULL to avoid
2273 potential bugs */
2274 if (l >= 4 && ((addr & 3) == 0)) {
2275 /* 32 bit write access */
2276 val = ldl_p(buf);
2277 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2278 l = 4;
2279 } else if (l >= 2 && ((addr & 1) == 0)) {
2280 /* 16 bit write access */
2281 val = lduw_p(buf);
2282 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2283 l = 2;
2284 } else {
2285 /* 8 bit write access */
2286 val = ldub_p(buf);
2287 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2288 l = 1;
2289 }
2290 } else {
2291 unsigned long addr1;
2292 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2293 /* RAM case */
2294#ifdef VBOX
2295 ptr = remR3GCPhys2HCVirt(cpu_single_env, addr1);
2296 remR3PhysWrite(ptr, buf, l);
2297#else
2298 ptr = phys_ram_base + addr1;
2299 memcpy(ptr, buf, l);
2300#endif
2301 if (!cpu_physical_memory_is_dirty(addr1)) {
2302 /* invalidate code */
2303 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2304 /* set dirty bit */
2305#ifdef VBOX
2306 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2307#endif
2308 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2309 (0xff & ~CODE_DIRTY_FLAG);
2310 }
2311 }
2312 } else {
2313 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2314 !(pd & IO_MEM_ROMD)) {
2315 /* I/O case */
2316 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2317 if (l >= 4 && ((addr & 3) == 0)) {
2318 /* 32 bit read access */
2319 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2320 stl_p(buf, val);
2321 l = 4;
2322 } else if (l >= 2 && ((addr & 1) == 0)) {
2323 /* 16 bit read access */
2324 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2325 stw_p(buf, val);
2326 l = 2;
2327 } else {
2328 /* 8 bit read access */
2329 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2330 stb_p(buf, val);
2331 l = 1;
2332 }
2333 } else {
2334 /* RAM case */
2335#ifdef VBOX
2336 ptr = remR3GCPhys2HCVirt(cpu_single_env, (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK));
2337 remR3PhysRead(ptr, buf, l);
2338#else
2339 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2340 (addr & ~TARGET_PAGE_MASK);
2341 memcpy(buf, ptr, l);
2342#endif
2343 }
2344 }
2345 len -= l;
2346 buf += l;
2347 addr += l;
2348 }
2349}
2350
2351/* used for ROM loading : can write in RAM and ROM */
2352void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2353 const uint8_t *buf, int len)
2354{
2355 int l;
2356 uint8_t *ptr;
2357 target_phys_addr_t page;
2358 unsigned long pd;
2359 PhysPageDesc *p;
2360
2361 while (len > 0) {
2362 page = addr & TARGET_PAGE_MASK;
2363 l = (page + TARGET_PAGE_SIZE) - addr;
2364 if (l > len)
2365 l = len;
2366 p = phys_page_find(page >> TARGET_PAGE_BITS);
2367 if (!p) {
2368 pd = IO_MEM_UNASSIGNED;
2369 } else {
2370 pd = p->phys_offset;
2371 }
2372
2373 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2374 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2375 !(pd & IO_MEM_ROMD)) {
2376 /* do nothing */
2377 } else {
2378 unsigned long addr1;
2379 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2380 /* ROM/RAM case */
2381 /* RAM case */
2382#ifndef VBOX
2383 ptr = phys_ram_base + addr1;
2384#else
2385 ptr = remR3GCPhys2HCVirt(cpu_single_env, addr1);
2386#endif
2387 memcpy(ptr, buf, l);
2388 }
2389 len -= l;
2390 buf += l;
2391 addr += l;
2392 }
2393}
2394
2395
2396/* warning: addr must be aligned */
2397uint32_t ldl_phys(target_phys_addr_t addr)
2398{
2399 int io_index;
2400 uint8_t *ptr;
2401 uint32_t val;
2402 unsigned long pd;
2403 PhysPageDesc *p;
2404
2405 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2406 if (!p) {
2407 pd = IO_MEM_UNASSIGNED;
2408 } else {
2409 pd = p->phys_offset;
2410 }
2411
2412 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2413 !(pd & IO_MEM_ROMD)) {
2414 /* I/O case */
2415 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2416 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2417 } else {
2418 /* RAM case */
2419#ifndef VBOX
2420 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2421 (addr & ~TARGET_PAGE_MASK);
2422#else
2423 ptr = remR3GCPhys2HCVirt(cpu_single_env, (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK));
2424#endif
2425 val = ldl_p(ptr);
2426 }
2427 return val;
2428}
2429
2430/* warning: addr must be aligned */
2431uint64_t ldq_phys(target_phys_addr_t addr)
2432{
2433 int io_index;
2434 uint8_t *ptr;
2435 uint64_t val;
2436 unsigned long pd;
2437 PhysPageDesc *p;
2438
2439 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2440 if (!p) {
2441 pd = IO_MEM_UNASSIGNED;
2442 } else {
2443 pd = p->phys_offset;
2444 }
2445
2446 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2447 !(pd & IO_MEM_ROMD)) {
2448 /* I/O case */
2449 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2450#ifdef TARGET_WORDS_BIGENDIAN
2451 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2452 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2453#else
2454 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2455 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2456#endif
2457 } else {
2458 /* RAM case */
2459#ifndef VBOX
2460 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2461 (addr & ~TARGET_PAGE_MASK);
2462#else
2463 ptr = remR3GCPhys2HCVirt(cpu_single_env, (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK));
2464#endif
2465 val = ldq_p(ptr);
2466 }
2467 return val;
2468}
2469
2470/* XXX: optimize */
2471uint32_t ldub_phys(target_phys_addr_t addr)
2472{
2473 uint8_t val;
2474 cpu_physical_memory_read(addr, &val, 1);
2475 return val;
2476}
2477
2478/* XXX: optimize */
2479uint32_t lduw_phys(target_phys_addr_t addr)
2480{
2481 uint16_t val;
2482 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2483 return tswap16(val);
2484}
2485
2486/* warning: addr must be aligned. The ram page is not masked as dirty
2487 and the code inside is not invalidated. It is useful if the dirty
2488 bits are used to track modified PTEs */
2489void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2490{
2491 int io_index;
2492 uint8_t *ptr;
2493 unsigned long pd;
2494 PhysPageDesc *p;
2495
2496 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2497 if (!p) {
2498 pd = IO_MEM_UNASSIGNED;
2499 } else {
2500 pd = p->phys_offset;
2501 }
2502
2503 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2504 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2505 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2506 } else {
2507#ifndef VBOX
2508 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2509 (addr & ~TARGET_PAGE_MASK);
2510#else
2511 ptr = remR3GCPhys2HCVirt(cpu_single_env, (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK));
2512#endif
2513 stl_p(ptr, val);
2514 }
2515}
2516
2517/* warning: addr must be aligned */
2518void stl_phys(target_phys_addr_t addr, uint32_t val)
2519{
2520 int io_index;
2521 uint8_t *ptr;
2522 unsigned long pd;
2523 PhysPageDesc *p;
2524
2525 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2526 if (!p) {
2527 pd = IO_MEM_UNASSIGNED;
2528 } else {
2529 pd = p->phys_offset;
2530 }
2531
2532 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2533 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2534 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2535 } else {
2536 unsigned long addr1;
2537 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2538 /* RAM case */
2539#ifndef VBOX
2540 ptr = phys_ram_base + addr1;
2541#else
2542 ptr = remR3GCPhys2HCVirt(cpu_single_env, addr1);
2543#endif
2544
2545 stl_p(ptr, val);
2546 if (!cpu_physical_memory_is_dirty(addr1)) {
2547 /* invalidate code */
2548 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2549 /* set dirty bit */
2550#ifdef VBOX
2551 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2552#endif
2553 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2554 (0xff & ~CODE_DIRTY_FLAG);
2555 }
2556 }
2557}
2558
2559/* XXX: optimize */
2560void stb_phys(target_phys_addr_t addr, uint32_t val)
2561{
2562 uint8_t v = val;
2563 cpu_physical_memory_write(addr, &v, 1);
2564}
2565
2566/* XXX: optimize */
2567void stw_phys(target_phys_addr_t addr, uint32_t val)
2568{
2569 uint16_t v = tswap16(val);
2570 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2571}
2572
2573/* XXX: optimize */
2574void stq_phys(target_phys_addr_t addr, uint64_t val)
2575{
2576 val = tswap64(val);
2577 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2578}
2579
2580#endif
2581
2582/* virtual memory access for debug */
2583int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2584 uint8_t *buf, int len, int is_write)
2585{
2586 int l;
2587 target_ulong page, phys_addr;
2588
2589 while (len > 0) {
2590 page = addr & TARGET_PAGE_MASK;
2591 phys_addr = cpu_get_phys_page_debug(env, page);
2592 /* if no physical page mapped, return an error */
2593 if (phys_addr == -1)
2594 return -1;
2595 l = (page + TARGET_PAGE_SIZE) - addr;
2596 if (l > len)
2597 l = len;
2598 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2599 buf, l, is_write);
2600 len -= l;
2601 buf += l;
2602 addr += l;
2603 }
2604 return 0;
2605}
2606
2607#ifndef VBOX
2608void dump_exec_info(FILE *f,
2609 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2610{
2611 int i, target_code_size, max_target_code_size;
2612 int direct_jmp_count, direct_jmp2_count, cross_page;
2613 TranslationBlock *tb;
2614
2615 target_code_size = 0;
2616 max_target_code_size = 0;
2617 cross_page = 0;
2618 direct_jmp_count = 0;
2619 direct_jmp2_count = 0;
2620 for(i = 0; i < nb_tbs; i++) {
2621 tb = &tbs[i];
2622 target_code_size += tb->size;
2623 if (tb->size > max_target_code_size)
2624 max_target_code_size = tb->size;
2625 if (tb->page_addr[1] != -1)
2626 cross_page++;
2627 if (tb->tb_next_offset[0] != 0xffff) {
2628 direct_jmp_count++;
2629 if (tb->tb_next_offset[1] != 0xffff) {
2630 direct_jmp2_count++;
2631 }
2632 }
2633 }
2634 /* XXX: avoid using doubles ? */
2635 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2636 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2637 nb_tbs ? target_code_size / nb_tbs : 0,
2638 max_target_code_size);
2639 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2640 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2641 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2642 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2643 cross_page,
2644 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2645 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2646 direct_jmp_count,
2647 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2648 direct_jmp2_count,
2649 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2650 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2651 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2652 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2653}
2654#endif /* !VBOX */
2655
2656#if !defined(CONFIG_USER_ONLY)
2657
2658#define MMUSUFFIX _cmmu
2659#define GETPC() NULL
2660#define env cpu_single_env
2661#define SOFTMMU_CODE_ACCESS
2662
2663#define SHIFT 0
2664#include "softmmu_template.h"
2665
2666#define SHIFT 1
2667#include "softmmu_template.h"
2668
2669#define SHIFT 2
2670#include "softmmu_template.h"
2671
2672#define SHIFT 3
2673#include "softmmu_template.h"
2674
2675#undef env
2676
2677#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette