VirtualBox

source: vbox/trunk/src/recompiler_new/exec.c@ 17282

Last change on this file since 17282 was 17274, checked in by vboxsync, 16 years ago

REM: fixed #3525 - avoid using HVA -> GPA PGM function, store GPA in REM TLB instead

  • Property svn:eol-style set to native
File size: 112.9 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51#endif /* VBOX */
52
53#include "cpu.h"
54#include "exec-all.h"
55#if defined(CONFIG_USER_ONLY)
56#include <qemu.h>
57#endif
58
59//#define DEBUG_TB_INVALIDATE
60//#define DEBUG_FLUSH
61//#define DEBUG_TLB
62//#define DEBUG_UNASSIGNED
63
64/* make various TB consistency checks */
65//#define DEBUG_TB_CHECK
66//#define DEBUG_TLB_CHECK
67
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
73#define SMC_BITMAP_USE_THRESHOLD 10
74
75#define MMAP_AREA_START 0x00000000
76#define MMAP_AREA_END 0xa8000000
77
78#if defined(TARGET_SPARC64)
79#define TARGET_PHYS_ADDR_SPACE_BITS 41
80#elif defined(TARGET_SPARC)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
82#elif defined(TARGET_ALPHA)
83#define TARGET_PHYS_ADDR_SPACE_BITS 42
84#define TARGET_VIRT_ADDR_SPACE_BITS 42
85#elif defined(TARGET_PPC64)
86#define TARGET_PHYS_ADDR_SPACE_BITS 42
87#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
88#define TARGET_PHYS_ADDR_SPACE_BITS 42
89#elif defined(TARGET_I386) && !defined(USE_KQEMU)
90#define TARGET_PHYS_ADDR_SPACE_BITS 36
91#else
92/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
93#define TARGET_PHYS_ADDR_SPACE_BITS 32
94#endif
95
96static TranslationBlock *tbs;
97int code_gen_max_blocks;
98TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
99static int nb_tbs;
100/* any access to the tbs or the page table must use this lock */
101spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
102
103#ifndef VBOX
104#if defined(__arm__) || defined(__sparc_v9__)
105/* The prologue must be reachable with a direct jump. ARM and Sparc64
106 have limited branch ranges (possibly also PPC) so place it in a
107 section close to code segment. */
108#define code_gen_section \
109 __attribute__((__section__(".gen_code"))) \
110 __attribute__((aligned (32)))
111#else
112#define code_gen_section \
113 __attribute__((aligned (32)))
114#endif
115uint8_t code_gen_prologue[1024] code_gen_section;
116
117#else /* VBOX */
118extern uint8_t* code_gen_prologue;
119#endif /* VBOX */
120
121static uint8_t *code_gen_buffer;
122static unsigned long code_gen_buffer_size;
123/* threshold to flush the translated code buffer */
124static unsigned long code_gen_buffer_max_size;
125uint8_t *code_gen_ptr;
126
127#ifndef VBOX
128#if !defined(CONFIG_USER_ONLY)
129ram_addr_t phys_ram_size;
130int phys_ram_fd;
131uint8_t *phys_ram_base;
132uint8_t *phys_ram_dirty;
133static int in_migration;
134static ram_addr_t phys_ram_alloc_offset = 0;
135#endif
136#else /* VBOX */
137RTGCPHYS phys_ram_size;
138/* we have memory ranges (the high PC-BIOS mapping) which
139 causes some pages to fall outside the dirty map here. */
140uint32_t phys_ram_dirty_size;
141#endif /* VBOX */
142#if !defined(VBOX)
143uint8_t *phys_ram_base;
144#endif
145uint8_t *phys_ram_dirty;
146
147CPUState *first_cpu;
148/* current CPU in the current thread. It is only valid inside
149 cpu_exec() */
150CPUState *cpu_single_env;
151/* 0 = Do not count executed instructions.
152 1 = Precise instruction counting.
153 2 = Adaptive rate instruction counting. */
154int use_icount = 0;
155/* Current instruction counter. While executing translated code this may
156 include some instructions that have not yet been executed. */
157int64_t qemu_icount;
158
159typedef struct PageDesc {
160 /* list of TBs intersecting this ram page */
161 TranslationBlock *first_tb;
162 /* in order to optimize self modifying code, we count the number
163 of lookups we do to a given page to use a bitmap */
164 unsigned int code_write_count;
165 uint8_t *code_bitmap;
166#if defined(CONFIG_USER_ONLY)
167 unsigned long flags;
168#endif
169} PageDesc;
170
171typedef struct PhysPageDesc {
172 /* offset in host memory of the page + io_index in the low 12 bits */
173 ram_addr_t phys_offset;
174} PhysPageDesc;
175
176#define L2_BITS 10
177#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
178/* XXX: this is a temporary hack for alpha target.
179 * In the future, this is to be replaced by a multi-level table
180 * to actually be able to handle the complete 64 bits address space.
181 */
182#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
183#else
184#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
185#endif
186
187#define L1_SIZE (1 << L1_BITS)
188#define L2_SIZE (1 << L2_BITS)
189
190static void io_mem_init(void);
191
192unsigned long qemu_real_host_page_size;
193unsigned long qemu_host_page_bits;
194unsigned long qemu_host_page_size;
195unsigned long qemu_host_page_mask;
196
197/* XXX: for system emulation, it could just be an array */
198static PageDesc *l1_map[L1_SIZE];
199static PhysPageDesc **l1_phys_map;
200
201#if !defined(CONFIG_USER_ONLY)
202static void io_mem_init(void);
203
204/* io memory support */
205CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
206CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
207void *io_mem_opaque[IO_MEM_NB_ENTRIES];
208static int io_mem_nb;
209static int io_mem_watch;
210#endif
211
212#ifndef VBOX
213/* log support */
214static const char *logfilename = "/tmp/qemu.log";
215#endif /* !VBOX */
216FILE *logfile;
217int loglevel;
218#ifndef VBOX
219static int log_append = 0;
220#endif
221
222/* statistics */
223static int tlb_flush_count;
224static int tb_flush_count;
225#ifndef VBOX
226static int tb_phys_invalidate_count;
227#endif /* !VBOX */
228
229#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
230typedef struct subpage_t {
231 target_phys_addr_t base;
232 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
233 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
234 void *opaque[TARGET_PAGE_SIZE][2][4];
235} subpage_t;
236
237
238#ifndef VBOX
239#ifdef _WIN32
240static void map_exec(void *addr, long size)
241{
242 DWORD old_protect;
243 VirtualProtect(addr, size,
244 PAGE_EXECUTE_READWRITE, &old_protect);
245
246}
247#else
248static void map_exec(void *addr, long size)
249{
250 unsigned long start, end, page_size;
251
252 page_size = getpagesize();
253 start = (unsigned long)addr;
254 start &= ~(page_size - 1);
255
256 end = (unsigned long)addr + size;
257 end += page_size - 1;
258 end &= ~(page_size - 1);
259
260 mprotect((void *)start, end - start,
261 PROT_READ | PROT_WRITE | PROT_EXEC);
262}
263#endif
264#else // VBOX
265static void map_exec(void *addr, long size)
266{
267 RTMemProtect(addr, size,
268 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
269}
270#endif
271
272static void page_init(void)
273{
274 /* NOTE: we can always suppose that qemu_host_page_size >=
275 TARGET_PAGE_SIZE */
276#ifdef VBOX
277 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
278 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
279 qemu_real_host_page_size = PAGE_SIZE;
280#else /* !VBOX */
281#ifdef _WIN32
282 {
283 SYSTEM_INFO system_info;
284 DWORD old_protect;
285
286 GetSystemInfo(&system_info);
287 qemu_real_host_page_size = system_info.dwPageSize;
288 }
289#else
290 qemu_real_host_page_size = getpagesize();
291#endif
292#endif /* !VBOX */
293
294 if (qemu_host_page_size == 0)
295 qemu_host_page_size = qemu_real_host_page_size;
296 if (qemu_host_page_size < TARGET_PAGE_SIZE)
297 qemu_host_page_size = TARGET_PAGE_SIZE;
298 qemu_host_page_bits = 0;
299#ifndef VBOX
300 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
301#else
302 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
303#endif
304 qemu_host_page_bits++;
305 qemu_host_page_mask = ~(qemu_host_page_size - 1);
306 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
307 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
308#ifdef VBOX
309 /* We use other means to set reserved bit on our pages */
310#else
311#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
312 {
313 long long startaddr, endaddr;
314 FILE *f;
315 int n;
316
317 mmap_lock();
318 last_brk = (unsigned long)sbrk(0);
319 f = fopen("/proc/self/maps", "r");
320 if (f) {
321 do {
322 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
323 if (n == 2) {
324 startaddr = MIN(startaddr,
325 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
326 endaddr = MIN(endaddr,
327 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
328 page_set_flags(startaddr & TARGET_PAGE_MASK,
329 TARGET_PAGE_ALIGN(endaddr),
330 PAGE_RESERVED);
331 }
332 } while (!feof(f));
333 fclose(f);
334 }
335 mmap_unlock();
336 }
337#endif
338#endif
339}
340
341#ifndef VBOX
342static inline PageDesc **page_l1_map(target_ulong index)
343#else
344DECLINLINE(PageDesc **) page_l1_map(target_ulong index)
345#endif
346{
347#if TARGET_LONG_BITS > 32
348 /* Host memory outside guest VM. For 32-bit targets we have already
349 excluded high addresses. */
350 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
351 return NULL;
352#endif
353 return &l1_map[index >> L2_BITS];
354}
355
356#ifndef VBOX
357static inline PageDesc *page_find_alloc(target_ulong index)
358#else
359DECLINLINE(PageDesc *) page_find_alloc(target_ulong index)
360#endif
361{
362 PageDesc **lp, *p;
363 lp = page_l1_map(index);
364 if (!lp)
365 return NULL;
366
367 p = *lp;
368 if (!p) {
369 /* allocate if not found */
370#if defined(CONFIG_USER_ONLY)
371 unsigned long addr;
372 size_t len = sizeof(PageDesc) * L2_SIZE;
373 /* Don't use qemu_malloc because it may recurse. */
374 p = mmap(0, len, PROT_READ | PROT_WRITE,
375 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
376 *lp = p;
377 addr = h2g(p);
378 if (addr == (target_ulong)addr) {
379 page_set_flags(addr & TARGET_PAGE_MASK,
380 TARGET_PAGE_ALIGN(addr + len),
381 PAGE_RESERVED);
382 }
383#else
384 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
385 *lp = p;
386#endif
387 }
388 return p + (index & (L2_SIZE - 1));
389}
390
391#ifndef VBOX
392static inline PageDesc *page_find(target_ulong index)
393#else
394DECLINLINE(PageDesc *) page_find(target_ulong index)
395#endif
396{
397 PageDesc **lp, *p;
398 lp = page_l1_map(index);
399 if (!lp)
400 return NULL;
401
402 p = *lp;
403 if (!p)
404 return 0;
405 return p + (index & (L2_SIZE - 1));
406}
407
408static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
409{
410 void **lp, **p;
411 PhysPageDesc *pd;
412
413 p = (void **)l1_phys_map;
414#if TARGET_PHYS_ADDR_SPACE_BITS > 32
415
416#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
417#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
418#endif
419 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
420 p = *lp;
421 if (!p) {
422 /* allocate if not found */
423 if (!alloc)
424 return NULL;
425 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
426 memset(p, 0, sizeof(void *) * L1_SIZE);
427 *lp = p;
428 }
429#endif
430 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
431 pd = *lp;
432 if (!pd) {
433 int i;
434 /* allocate if not found */
435 if (!alloc)
436 return NULL;
437 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
438 *lp = pd;
439 for (i = 0; i < L2_SIZE; i++)
440 pd[i].phys_offset = IO_MEM_UNASSIGNED;
441 }
442#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
443 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
444 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
445 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
446 return pd;
447#else
448 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
449#endif
450}
451
452#ifndef VBOX
453static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
454#else
455DECLINLINE(PhysPageDesc *) phys_page_find(target_phys_addr_t index)
456#endif
457{
458 return phys_page_find_alloc(index, 0);
459}
460
461#if !defined(CONFIG_USER_ONLY)
462static void tlb_protect_code(ram_addr_t ram_addr);
463static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
464 target_ulong vaddr);
465#define mmap_lock() do { } while(0)
466#define mmap_unlock() do { } while(0)
467#endif
468
469#ifdef VBOX
470/** @todo nike: isn't 32M too much ? */
471#endif
472#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
473
474#if defined(CONFIG_USER_ONLY)
475/* Currently it is not recommanded to allocate big chunks of data in
476 user mode. It will change when a dedicated libc will be used */
477#define USE_STATIC_CODE_GEN_BUFFER
478#endif
479
480/* VBox allocates codegen buffer dynamically */
481#ifndef VBOX
482#ifdef USE_STATIC_CODE_GEN_BUFFER
483static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
484#endif
485#endif
486
487static void code_gen_alloc(unsigned long tb_size)
488{
489#ifdef USE_STATIC_CODE_GEN_BUFFER
490 code_gen_buffer = static_code_gen_buffer;
491 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
492 map_exec(code_gen_buffer, code_gen_buffer_size);
493#else
494 code_gen_buffer_size = tb_size;
495 if (code_gen_buffer_size == 0) {
496#if defined(CONFIG_USER_ONLY)
497 /* in user mode, phys_ram_size is not meaningful */
498 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
499#else
500 /* XXX: needs ajustments */
501 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
502#endif
503 }
504 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
505 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
506 /* The code gen buffer location may have constraints depending on
507 the host cpu and OS */
508#ifdef VBOX
509 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
510
511 if (!code_gen_buffer) {
512 LogRel(("REM: failed allocate codegen buffer %lld\n",
513 code_gen_buffer_size));
514 return;
515 }
516#else //!VBOX
517#if defined(__linux__)
518 {
519 int flags;
520 void *start = NULL;
521
522 flags = MAP_PRIVATE | MAP_ANONYMOUS;
523#if defined(__x86_64__)
524 flags |= MAP_32BIT;
525 /* Cannot map more than that */
526 if (code_gen_buffer_size > (800 * 1024 * 1024))
527 code_gen_buffer_size = (800 * 1024 * 1024);
528#elif defined(__sparc_v9__)
529 // Map the buffer below 2G, so we can use direct calls and branches
530 flags |= MAP_FIXED;
531 start = (void *) 0x60000000UL;
532 if (code_gen_buffer_size > (512 * 1024 * 1024))
533 code_gen_buffer_size = (512 * 1024 * 1024);
534#endif
535 code_gen_buffer = mmap(start, code_gen_buffer_size,
536 PROT_WRITE | PROT_READ | PROT_EXEC,
537 flags, -1, 0);
538 if (code_gen_buffer == MAP_FAILED) {
539 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
540 exit(1);
541 }
542 }
543#elif defined(__FreeBSD__)
544 {
545 int flags;
546 void *addr = NULL;
547 flags = MAP_PRIVATE | MAP_ANONYMOUS;
548#if defined(__x86_64__)
549 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
550 * 0x40000000 is free */
551 flags |= MAP_FIXED;
552 addr = (void *)0x40000000;
553 /* Cannot map more than that */
554 if (code_gen_buffer_size > (800 * 1024 * 1024))
555 code_gen_buffer_size = (800 * 1024 * 1024);
556#endif
557 code_gen_buffer = mmap(addr, code_gen_buffer_size,
558 PROT_WRITE | PROT_READ | PROT_EXEC,
559 flags, -1, 0);
560 if (code_gen_buffer == MAP_FAILED) {
561 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
562 exit(1);
563 }
564 }
565#else
566 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
567 if (!code_gen_buffer) {
568 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
569 exit(1);
570 }
571 map_exec(code_gen_buffer, code_gen_buffer_size);
572#endif
573 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
574#endif /* !VBOX */
575#endif /* !USE_STATIC_CODE_GEN_BUFFER */
576#ifndef VBOX
577 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
578#else
579 map_exec(code_gen_prologue, _1K);
580#endif
581
582 code_gen_buffer_max_size = code_gen_buffer_size -
583 code_gen_max_block_size();
584 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
585 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
586}
587
588/* Must be called before using the QEMU cpus. 'tb_size' is the size
589 (in bytes) allocated to the translation buffer. Zero means default
590 size. */
591void cpu_exec_init_all(unsigned long tb_size)
592{
593 cpu_gen_init();
594 code_gen_alloc(tb_size);
595 code_gen_ptr = code_gen_buffer;
596 page_init();
597#if !defined(CONFIG_USER_ONLY)
598 io_mem_init();
599#endif
600}
601
602#ifndef VBOX
603#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
604
605#define CPU_COMMON_SAVE_VERSION 1
606
607static void cpu_common_save(QEMUFile *f, void *opaque)
608{
609 CPUState *env = opaque;
610
611 qemu_put_be32s(f, &env->halted);
612 qemu_put_be32s(f, &env->interrupt_request);
613}
614
615static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
616{
617 CPUState *env = opaque;
618
619 if (version_id != CPU_COMMON_SAVE_VERSION)
620 return -EINVAL;
621
622 qemu_get_be32s(f, &env->halted);
623 qemu_get_be32s(f, &env->interrupt_request);
624 tlb_flush(env, 1);
625
626 return 0;
627}
628#endif
629#endif //!VBOX
630
631void cpu_exec_init(CPUState *env)
632{
633 CPUState **penv;
634 int cpu_index;
635
636 env->next_cpu = NULL;
637 penv = &first_cpu;
638 cpu_index = 0;
639 while (*penv != NULL) {
640 penv = (CPUState **)&(*penv)->next_cpu;
641 cpu_index++;
642 }
643 env->cpu_index = cpu_index;
644 env->nb_watchpoints = 0;
645 *penv = env;
646#ifndef VBOX
647#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
648 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
649 cpu_common_save, cpu_common_load, env);
650 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
651 cpu_save, cpu_load, env);
652#endif
653#endif // !VBOX
654}
655
656#ifndef VBOX
657static inline void invalidate_page_bitmap(PageDesc *p)
658#else
659DECLINLINE(void) invalidate_page_bitmap(PageDesc *p)
660#endif
661{
662 if (p->code_bitmap) {
663 qemu_free(p->code_bitmap);
664 p->code_bitmap = NULL;
665 }
666 p->code_write_count = 0;
667}
668
669/* set to NULL all the 'first_tb' fields in all PageDescs */
670static void page_flush_tb(void)
671{
672 int i, j;
673 PageDesc *p;
674
675 for(i = 0; i < L1_SIZE; i++) {
676 p = l1_map[i];
677 if (p) {
678 for(j = 0; j < L2_SIZE; j++) {
679 p->first_tb = NULL;
680 invalidate_page_bitmap(p);
681 p++;
682 }
683 }
684 }
685}
686
687/* flush all the translation blocks */
688/* XXX: tb_flush is currently not thread safe */
689void tb_flush(CPUState *env1)
690{
691 CPUState *env;
692#if defined(DEBUG_FLUSH)
693 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
694 (unsigned long)(code_gen_ptr - code_gen_buffer),
695 nb_tbs, nb_tbs > 0 ?
696 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
697#endif
698 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
699 cpu_abort(env1, "Internal error: code buffer overflow\n");
700
701 nb_tbs = 0;
702
703 for(env = first_cpu; env != NULL; env = env->next_cpu) {
704 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
705 }
706
707 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
708 page_flush_tb();
709
710 code_gen_ptr = code_gen_buffer;
711 /* XXX: flush processor icache at this point if cache flush is
712 expensive */
713 tb_flush_count++;
714}
715
716#ifdef DEBUG_TB_CHECK
717static void tb_invalidate_check(target_ulong address)
718{
719 TranslationBlock *tb;
720 int i;
721 address &= TARGET_PAGE_MASK;
722 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
723 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
724 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
725 address >= tb->pc + tb->size)) {
726 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
727 address, (long)tb->pc, tb->size);
728 }
729 }
730 }
731}
732
733/* verify that all the pages have correct rights for code */
734static void tb_page_check(void)
735{
736 TranslationBlock *tb;
737 int i, flags1, flags2;
738
739 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
740 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
741 flags1 = page_get_flags(tb->pc);
742 flags2 = page_get_flags(tb->pc + tb->size - 1);
743 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
744 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
745 (long)tb->pc, tb->size, flags1, flags2);
746 }
747 }
748 }
749}
750
751static void tb_jmp_check(TranslationBlock *tb)
752{
753 TranslationBlock *tb1;
754 unsigned int n1;
755
756 /* suppress any remaining jumps to this TB */
757 tb1 = tb->jmp_first;
758 for(;;) {
759 n1 = (long)tb1 & 3;
760 tb1 = (TranslationBlock *)((long)tb1 & ~3);
761 if (n1 == 2)
762 break;
763 tb1 = tb1->jmp_next[n1];
764 }
765 /* check end of list */
766 if (tb1 != tb) {
767 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
768 }
769}
770#endif // DEBUG_TB_CHECK
771
772/* invalidate one TB */
773#ifndef VBOX
774static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
775 int next_offset)
776#else
777DECLINLINE(void) tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
778 int next_offset)
779#endif
780{
781 TranslationBlock *tb1;
782 for(;;) {
783 tb1 = *ptb;
784 if (tb1 == tb) {
785 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
786 break;
787 }
788 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
789 }
790}
791
792#ifndef VBOX
793static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
794#else
795DECLINLINE(void) tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
796#endif
797{
798 TranslationBlock *tb1;
799 unsigned int n1;
800
801 for(;;) {
802 tb1 = *ptb;
803 n1 = (long)tb1 & 3;
804 tb1 = (TranslationBlock *)((long)tb1 & ~3);
805 if (tb1 == tb) {
806 *ptb = tb1->page_next[n1];
807 break;
808 }
809 ptb = &tb1->page_next[n1];
810 }
811}
812
813#ifndef VBOX
814static inline void tb_jmp_remove(TranslationBlock *tb, int n)
815#else
816DECLINLINE(void) tb_jmp_remove(TranslationBlock *tb, int n)
817#endif
818{
819 TranslationBlock *tb1, **ptb;
820 unsigned int n1;
821
822 ptb = &tb->jmp_next[n];
823 tb1 = *ptb;
824 if (tb1) {
825 /* find tb(n) in circular list */
826 for(;;) {
827 tb1 = *ptb;
828 n1 = (long)tb1 & 3;
829 tb1 = (TranslationBlock *)((long)tb1 & ~3);
830 if (n1 == n && tb1 == tb)
831 break;
832 if (n1 == 2) {
833 ptb = &tb1->jmp_first;
834 } else {
835 ptb = &tb1->jmp_next[n1];
836 }
837 }
838 /* now we can suppress tb(n) from the list */
839 *ptb = tb->jmp_next[n];
840
841 tb->jmp_next[n] = NULL;
842 }
843}
844
845/* reset the jump entry 'n' of a TB so that it is not chained to
846 another TB */
847#ifndef VBOX
848static inline void tb_reset_jump(TranslationBlock *tb, int n)
849#else
850DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
851#endif
852{
853 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
854}
855
856void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
857{
858 CPUState *env;
859 PageDesc *p;
860 unsigned int h, n1;
861 target_phys_addr_t phys_pc;
862 TranslationBlock *tb1, *tb2;
863
864 /* remove the TB from the hash list */
865 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
866 h = tb_phys_hash_func(phys_pc);
867 tb_remove(&tb_phys_hash[h], tb,
868 offsetof(TranslationBlock, phys_hash_next));
869
870 /* remove the TB from the page list */
871 if (tb->page_addr[0] != page_addr) {
872 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
873 tb_page_remove(&p->first_tb, tb);
874 invalidate_page_bitmap(p);
875 }
876 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
877 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
878 tb_page_remove(&p->first_tb, tb);
879 invalidate_page_bitmap(p);
880 }
881
882 tb_invalidated_flag = 1;
883
884 /* remove the TB from the hash list */
885 h = tb_jmp_cache_hash_func(tb->pc);
886 for(env = first_cpu; env != NULL; env = env->next_cpu) {
887 if (env->tb_jmp_cache[h] == tb)
888 env->tb_jmp_cache[h] = NULL;
889 }
890
891 /* suppress this TB from the two jump lists */
892 tb_jmp_remove(tb, 0);
893 tb_jmp_remove(tb, 1);
894
895 /* suppress any remaining jumps to this TB */
896 tb1 = tb->jmp_first;
897 for(;;) {
898 n1 = (long)tb1 & 3;
899 if (n1 == 2)
900 break;
901 tb1 = (TranslationBlock *)((long)tb1 & ~3);
902 tb2 = tb1->jmp_next[n1];
903 tb_reset_jump(tb1, n1);
904 tb1->jmp_next[n1] = NULL;
905 tb1 = tb2;
906 }
907 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
908
909#ifndef VBOX
910 tb_phys_invalidate_count++;
911#endif
912}
913
914
915#ifdef VBOX
916void tb_invalidate_virt(CPUState *env, uint32_t eip)
917{
918# if 1
919 tb_flush(env);
920# else
921 uint8_t *cs_base, *pc;
922 unsigned int flags, h, phys_pc;
923 TranslationBlock *tb, **ptb;
924
925 flags = env->hflags;
926 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
927 cs_base = env->segs[R_CS].base;
928 pc = cs_base + eip;
929
930 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
931 flags);
932
933 if(tb)
934 {
935# ifdef DEBUG
936 printf("invalidating TB (%08X) at %08X\n", tb, eip);
937# endif
938 tb_invalidate(tb);
939 //Note: this will leak TBs, but the whole cache will be flushed
940 // when it happens too often
941 tb->pc = 0;
942 tb->cs_base = 0;
943 tb->flags = 0;
944 }
945# endif
946}
947
948# ifdef VBOX_STRICT
949/**
950 * Gets the page offset.
951 */
952unsigned long get_phys_page_offset(target_ulong addr)
953{
954 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
955 return p ? p->phys_offset : 0;
956}
957# endif /* VBOX_STRICT */
958#endif /* VBOX */
959
960#ifndef VBOX
961static inline void set_bits(uint8_t *tab, int start, int len)
962#else
963DECLINLINE(void) set_bits(uint8_t *tab, int start, int len)
964#endif
965{
966 int end, mask, end1;
967
968 end = start + len;
969 tab += start >> 3;
970 mask = 0xff << (start & 7);
971 if ((start & ~7) == (end & ~7)) {
972 if (start < end) {
973 mask &= ~(0xff << (end & 7));
974 *tab |= mask;
975 }
976 } else {
977 *tab++ |= mask;
978 start = (start + 8) & ~7;
979 end1 = end & ~7;
980 while (start < end1) {
981 *tab++ = 0xff;
982 start += 8;
983 }
984 if (start < end) {
985 mask = ~(0xff << (end & 7));
986 *tab |= mask;
987 }
988 }
989}
990
991static void build_page_bitmap(PageDesc *p)
992{
993 int n, tb_start, tb_end;
994 TranslationBlock *tb;
995
996 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
997 if (!p->code_bitmap)
998 return;
999 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
1000
1001 tb = p->first_tb;
1002 while (tb != NULL) {
1003 n = (long)tb & 3;
1004 tb = (TranslationBlock *)((long)tb & ~3);
1005 /* NOTE: this is subtle as a TB may span two physical pages */
1006 if (n == 0) {
1007 /* NOTE: tb_end may be after the end of the page, but
1008 it is not a problem */
1009 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1010 tb_end = tb_start + tb->size;
1011 if (tb_end > TARGET_PAGE_SIZE)
1012 tb_end = TARGET_PAGE_SIZE;
1013 } else {
1014 tb_start = 0;
1015 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1016 }
1017 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1018 tb = tb->page_next[n];
1019 }
1020}
1021
1022TranslationBlock *tb_gen_code(CPUState *env,
1023 target_ulong pc, target_ulong cs_base,
1024 int flags, int cflags)
1025{
1026 TranslationBlock *tb;
1027 uint8_t *tc_ptr;
1028 target_ulong phys_pc, phys_page2, virt_page2;
1029 int code_gen_size;
1030
1031 phys_pc = get_phys_addr_code(env, pc);
1032 tb = tb_alloc(pc);
1033 if (!tb) {
1034 /* flush must be done */
1035 tb_flush(env);
1036 /* cannot fail at this point */
1037 tb = tb_alloc(pc);
1038 /* Don't forget to invalidate previous TB info. */
1039 tb_invalidated_flag = 1;
1040 }
1041 tc_ptr = code_gen_ptr;
1042 tb->tc_ptr = tc_ptr;
1043 tb->cs_base = cs_base;
1044 tb->flags = flags;
1045 tb->cflags = cflags;
1046 cpu_gen_code(env, tb, &code_gen_size);
1047 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1048
1049 /* check next page if needed */
1050 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1051 phys_page2 = -1;
1052 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1053 phys_page2 = get_phys_addr_code(env, virt_page2);
1054 }
1055 tb_link_phys(tb, phys_pc, phys_page2);
1056 return tb;
1057}
1058
1059/* invalidate all TBs which intersect with the target physical page
1060 starting in range [start;end[. NOTE: start and end must refer to
1061 the same physical page. 'is_cpu_write_access' should be true if called
1062 from a real cpu write access: the virtual CPU will exit the current
1063 TB if code is modified inside this TB. */
1064void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1065 int is_cpu_write_access)
1066{
1067 int n, current_tb_modified, current_tb_not_found, current_flags;
1068 CPUState *env = cpu_single_env;
1069 PageDesc *p;
1070 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1071 target_ulong tb_start, tb_end;
1072 target_ulong current_pc, current_cs_base;
1073
1074 p = page_find(start >> TARGET_PAGE_BITS);
1075 if (!p)
1076 return;
1077 if (!p->code_bitmap &&
1078 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1079 is_cpu_write_access) {
1080 /* build code bitmap */
1081 build_page_bitmap(p);
1082 }
1083
1084 /* we remove all the TBs in the range [start, end[ */
1085 /* XXX: see if in some cases it could be faster to invalidate all the code */
1086 current_tb_not_found = is_cpu_write_access;
1087 current_tb_modified = 0;
1088 current_tb = NULL; /* avoid warning */
1089 current_pc = 0; /* avoid warning */
1090 current_cs_base = 0; /* avoid warning */
1091 current_flags = 0; /* avoid warning */
1092 tb = p->first_tb;
1093 while (tb != NULL) {
1094 n = (long)tb & 3;
1095 tb = (TranslationBlock *)((long)tb & ~3);
1096 tb_next = tb->page_next[n];
1097 /* NOTE: this is subtle as a TB may span two physical pages */
1098 if (n == 0) {
1099 /* NOTE: tb_end may be after the end of the page, but
1100 it is not a problem */
1101 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1102 tb_end = tb_start + tb->size;
1103 } else {
1104 tb_start = tb->page_addr[1];
1105 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1106 }
1107 if (!(tb_end <= start || tb_start >= end)) {
1108#ifdef TARGET_HAS_PRECISE_SMC
1109 if (current_tb_not_found) {
1110 current_tb_not_found = 0;
1111 current_tb = NULL;
1112 if (env->mem_io_pc) {
1113 /* now we have a real cpu fault */
1114 current_tb = tb_find_pc(env->mem_io_pc);
1115 }
1116 }
1117 if (current_tb == tb &&
1118 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1119 /* If we are modifying the current TB, we must stop
1120 its execution. We could be more precise by checking
1121 that the modification is after the current PC, but it
1122 would require a specialized function to partially
1123 restore the CPU state */
1124
1125 current_tb_modified = 1;
1126 cpu_restore_state(current_tb, env,
1127 env->mem_io_pc, NULL);
1128#if defined(TARGET_I386)
1129 current_flags = env->hflags;
1130 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1131 current_cs_base = (target_ulong)env->segs[R_CS].base;
1132 current_pc = current_cs_base + env->eip;
1133#else
1134#error unsupported CPU
1135#endif
1136 }
1137#endif /* TARGET_HAS_PRECISE_SMC */
1138 /* we need to do that to handle the case where a signal
1139 occurs while doing tb_phys_invalidate() */
1140 saved_tb = NULL;
1141 if (env) {
1142 saved_tb = env->current_tb;
1143 env->current_tb = NULL;
1144 }
1145 tb_phys_invalidate(tb, -1);
1146 if (env) {
1147 env->current_tb = saved_tb;
1148 if (env->interrupt_request && env->current_tb)
1149 cpu_interrupt(env, env->interrupt_request);
1150 }
1151 }
1152 tb = tb_next;
1153 }
1154#if !defined(CONFIG_USER_ONLY)
1155 /* if no code remaining, no need to continue to use slow writes */
1156 if (!p->first_tb) {
1157 invalidate_page_bitmap(p);
1158 if (is_cpu_write_access) {
1159 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1160 }
1161 }
1162#endif
1163#ifdef TARGET_HAS_PRECISE_SMC
1164 if (current_tb_modified) {
1165 /* we generate a block containing just the instruction
1166 modifying the memory. It will ensure that it cannot modify
1167 itself */
1168 env->current_tb = NULL;
1169 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1170 cpu_resume_from_signal(env, NULL);
1171 }
1172#endif
1173}
1174
1175
1176/* len must be <= 8 and start must be a multiple of len */
1177#ifndef VBOX
1178static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1179#else
1180DECLINLINE(void) tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1181#endif
1182{
1183 PageDesc *p;
1184 int offset, b;
1185#if 0
1186 if (1) {
1187 if (loglevel) {
1188 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1189 cpu_single_env->mem_io_vaddr, len,
1190 cpu_single_env->eip,
1191 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1192 }
1193 }
1194#endif
1195 p = page_find(start >> TARGET_PAGE_BITS);
1196 if (!p)
1197 return;
1198 if (p->code_bitmap) {
1199 offset = start & ~TARGET_PAGE_MASK;
1200 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1201 if (b & ((1 << len) - 1))
1202 goto do_invalidate;
1203 } else {
1204 do_invalidate:
1205 tb_invalidate_phys_page_range(start, start + len, 1);
1206 }
1207}
1208
1209
1210#if !defined(CONFIG_SOFTMMU)
1211static void tb_invalidate_phys_page(target_phys_addr_t addr,
1212 unsigned long pc, void *puc)
1213{
1214 int n, current_flags, current_tb_modified;
1215 target_ulong current_pc, current_cs_base;
1216 PageDesc *p;
1217 TranslationBlock *tb, *current_tb;
1218#ifdef TARGET_HAS_PRECISE_SMC
1219 CPUState *env = cpu_single_env;
1220#endif
1221
1222 addr &= TARGET_PAGE_MASK;
1223 p = page_find(addr >> TARGET_PAGE_BITS);
1224 if (!p)
1225 return;
1226 tb = p->first_tb;
1227 current_tb_modified = 0;
1228 current_tb = NULL;
1229 current_pc = 0; /* avoid warning */
1230 current_cs_base = 0; /* avoid warning */
1231 current_flags = 0; /* avoid warning */
1232#ifdef TARGET_HAS_PRECISE_SMC
1233 if (tb && pc != 0) {
1234 current_tb = tb_find_pc(pc);
1235 }
1236#endif
1237 while (tb != NULL) {
1238 n = (long)tb & 3;
1239 tb = (TranslationBlock *)((long)tb & ~3);
1240#ifdef TARGET_HAS_PRECISE_SMC
1241 if (current_tb == tb &&
1242 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1243 /* If we are modifying the current TB, we must stop
1244 its execution. We could be more precise by checking
1245 that the modification is after the current PC, but it
1246 would require a specialized function to partially
1247 restore the CPU state */
1248
1249 current_tb_modified = 1;
1250 cpu_restore_state(current_tb, env, pc, puc);
1251#if defined(TARGET_I386)
1252 current_flags = env->hflags;
1253 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1254 current_cs_base = (target_ulong)env->segs[R_CS].base;
1255 current_pc = current_cs_base + env->eip;
1256#else
1257#error unsupported CPU
1258#endif
1259 }
1260#endif /* TARGET_HAS_PRECISE_SMC */
1261 tb_phys_invalidate(tb, addr);
1262 tb = tb->page_next[n];
1263 }
1264 p->first_tb = NULL;
1265#ifdef TARGET_HAS_PRECISE_SMC
1266 if (current_tb_modified) {
1267 /* we generate a block containing just the instruction
1268 modifying the memory. It will ensure that it cannot modify
1269 itself */
1270 env->current_tb = NULL;
1271 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1272 cpu_resume_from_signal(env, puc);
1273 }
1274#endif
1275}
1276#endif
1277
1278/* add the tb in the target page and protect it if necessary */
1279#ifndef VBOX
1280static inline void tb_alloc_page(TranslationBlock *tb,
1281 unsigned int n, target_ulong page_addr)
1282#else
1283DECLINLINE(void) tb_alloc_page(TranslationBlock *tb,
1284 unsigned int n, target_ulong page_addr)
1285#endif
1286{
1287 PageDesc *p;
1288 TranslationBlock *last_first_tb;
1289
1290 tb->page_addr[n] = page_addr;
1291 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1292 tb->page_next[n] = p->first_tb;
1293 last_first_tb = p->first_tb;
1294 p->first_tb = (TranslationBlock *)((long)tb | n);
1295 invalidate_page_bitmap(p);
1296
1297#if defined(TARGET_HAS_SMC) || 1
1298
1299#if defined(CONFIG_USER_ONLY)
1300 if (p->flags & PAGE_WRITE) {
1301 target_ulong addr;
1302 PageDesc *p2;
1303 int prot;
1304
1305 /* force the host page as non writable (writes will have a
1306 page fault + mprotect overhead) */
1307 page_addr &= qemu_host_page_mask;
1308 prot = 0;
1309 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1310 addr += TARGET_PAGE_SIZE) {
1311
1312 p2 = page_find (addr >> TARGET_PAGE_BITS);
1313 if (!p2)
1314 continue;
1315 prot |= p2->flags;
1316 p2->flags &= ~PAGE_WRITE;
1317 page_get_flags(addr);
1318 }
1319 mprotect(g2h(page_addr), qemu_host_page_size,
1320 (prot & PAGE_BITS) & ~PAGE_WRITE);
1321#ifdef DEBUG_TB_INVALIDATE
1322 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1323 page_addr);
1324#endif
1325 }
1326#else
1327 /* if some code is already present, then the pages are already
1328 protected. So we handle the case where only the first TB is
1329 allocated in a physical page */
1330 if (!last_first_tb) {
1331 tlb_protect_code(page_addr);
1332 }
1333#endif
1334
1335#endif /* TARGET_HAS_SMC */
1336}
1337
1338/* Allocate a new translation block. Flush the translation buffer if
1339 too many translation blocks or too much generated code. */
1340TranslationBlock *tb_alloc(target_ulong pc)
1341{
1342 TranslationBlock *tb;
1343
1344 if (nb_tbs >= code_gen_max_blocks ||
1345#ifndef VBOX
1346 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1347#else
1348 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1349#endif
1350 return NULL;
1351 tb = &tbs[nb_tbs++];
1352 tb->pc = pc;
1353 tb->cflags = 0;
1354 return tb;
1355}
1356
1357void tb_free(TranslationBlock *tb)
1358{
1359 /* In practice this is mostly used for single use temporary TB
1360 Ignore the hard cases and just back up if this TB happens to
1361 be the last one generated. */
1362 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1363 code_gen_ptr = tb->tc_ptr;
1364 nb_tbs--;
1365 }
1366}
1367
1368/* add a new TB and link it to the physical page tables. phys_page2 is
1369 (-1) to indicate that only one page contains the TB. */
1370void tb_link_phys(TranslationBlock *tb,
1371 target_ulong phys_pc, target_ulong phys_page2)
1372{
1373 unsigned int h;
1374 TranslationBlock **ptb;
1375
1376 /* Grab the mmap lock to stop another thread invalidating this TB
1377 before we are done. */
1378 mmap_lock();
1379 /* add in the physical hash table */
1380 h = tb_phys_hash_func(phys_pc);
1381 ptb = &tb_phys_hash[h];
1382 tb->phys_hash_next = *ptb;
1383 *ptb = tb;
1384
1385 /* add in the page list */
1386 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1387 if (phys_page2 != -1)
1388 tb_alloc_page(tb, 1, phys_page2);
1389 else
1390 tb->page_addr[1] = -1;
1391
1392 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1393 tb->jmp_next[0] = NULL;
1394 tb->jmp_next[1] = NULL;
1395
1396 /* init original jump addresses */
1397 if (tb->tb_next_offset[0] != 0xffff)
1398 tb_reset_jump(tb, 0);
1399 if (tb->tb_next_offset[1] != 0xffff)
1400 tb_reset_jump(tb, 1);
1401
1402#ifdef DEBUG_TB_CHECK
1403 tb_page_check();
1404#endif
1405 mmap_unlock();
1406}
1407
1408/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1409 tb[1].tc_ptr. Return NULL if not found */
1410TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1411{
1412 int m_min, m_max, m;
1413 unsigned long v;
1414 TranslationBlock *tb;
1415
1416 if (nb_tbs <= 0)
1417 return NULL;
1418 if (tc_ptr < (unsigned long)code_gen_buffer ||
1419 tc_ptr >= (unsigned long)code_gen_ptr)
1420 return NULL;
1421 /* binary search (cf Knuth) */
1422 m_min = 0;
1423 m_max = nb_tbs - 1;
1424 while (m_min <= m_max) {
1425 m = (m_min + m_max) >> 1;
1426 tb = &tbs[m];
1427 v = (unsigned long)tb->tc_ptr;
1428 if (v == tc_ptr)
1429 return tb;
1430 else if (tc_ptr < v) {
1431 m_max = m - 1;
1432 } else {
1433 m_min = m + 1;
1434 }
1435 }
1436 return &tbs[m_max];
1437}
1438
1439static void tb_reset_jump_recursive(TranslationBlock *tb);
1440
1441#ifndef VBOX
1442static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1443#else
1444DECLINLINE(void) tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1445#endif
1446{
1447 TranslationBlock *tb1, *tb_next, **ptb;
1448 unsigned int n1;
1449
1450 tb1 = tb->jmp_next[n];
1451 if (tb1 != NULL) {
1452 /* find head of list */
1453 for(;;) {
1454 n1 = (long)tb1 & 3;
1455 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1456 if (n1 == 2)
1457 break;
1458 tb1 = tb1->jmp_next[n1];
1459 }
1460 /* we are now sure now that tb jumps to tb1 */
1461 tb_next = tb1;
1462
1463 /* remove tb from the jmp_first list */
1464 ptb = &tb_next->jmp_first;
1465 for(;;) {
1466 tb1 = *ptb;
1467 n1 = (long)tb1 & 3;
1468 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1469 if (n1 == n && tb1 == tb)
1470 break;
1471 ptb = &tb1->jmp_next[n1];
1472 }
1473 *ptb = tb->jmp_next[n];
1474 tb->jmp_next[n] = NULL;
1475
1476 /* suppress the jump to next tb in generated code */
1477 tb_reset_jump(tb, n);
1478
1479 /* suppress jumps in the tb on which we could have jumped */
1480 tb_reset_jump_recursive(tb_next);
1481 }
1482}
1483
1484static void tb_reset_jump_recursive(TranslationBlock *tb)
1485{
1486 tb_reset_jump_recursive2(tb, 0);
1487 tb_reset_jump_recursive2(tb, 1);
1488}
1489
1490#if defined(TARGET_HAS_ICE)
1491static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1492{
1493 target_ulong addr, pd;
1494 ram_addr_t ram_addr;
1495 PhysPageDesc *p;
1496
1497 addr = cpu_get_phys_page_debug(env, pc);
1498 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1499 if (!p) {
1500 pd = IO_MEM_UNASSIGNED;
1501 } else {
1502 pd = p->phys_offset;
1503 }
1504 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1505 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1506}
1507#endif
1508
1509/* Add a watchpoint. */
1510int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1511{
1512 int i;
1513
1514 for (i = 0; i < env->nb_watchpoints; i++) {
1515 if (addr == env->watchpoint[i].vaddr)
1516 return 0;
1517 }
1518 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1519 return -1;
1520
1521 i = env->nb_watchpoints++;
1522 env->watchpoint[i].vaddr = addr;
1523 env->watchpoint[i].type = type;
1524 tlb_flush_page(env, addr);
1525 /* FIXME: This flush is needed because of the hack to make memory ops
1526 terminate the TB. It can be removed once the proper IO trap and
1527 re-execute bits are in. */
1528 tb_flush(env);
1529 return i;
1530}
1531
1532/* Remove a watchpoint. */
1533int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1534{
1535 int i;
1536
1537 for (i = 0; i < env->nb_watchpoints; i++) {
1538 if (addr == env->watchpoint[i].vaddr) {
1539 env->nb_watchpoints--;
1540 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1541 tlb_flush_page(env, addr);
1542 return 0;
1543 }
1544 }
1545 return -1;
1546}
1547
1548/* Remove all watchpoints. */
1549void cpu_watchpoint_remove_all(CPUState *env) {
1550 int i;
1551
1552 for (i = 0; i < env->nb_watchpoints; i++) {
1553 tlb_flush_page(env, env->watchpoint[i].vaddr);
1554 }
1555 env->nb_watchpoints = 0;
1556}
1557
1558/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1559 breakpoint is reached */
1560int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1561{
1562#if defined(TARGET_HAS_ICE)
1563 int i;
1564
1565 for(i = 0; i < env->nb_breakpoints; i++) {
1566 if (env->breakpoints[i] == pc)
1567 return 0;
1568 }
1569
1570 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1571 return -1;
1572 env->breakpoints[env->nb_breakpoints++] = pc;
1573
1574 breakpoint_invalidate(env, pc);
1575 return 0;
1576#else
1577 return -1;
1578#endif
1579}
1580
1581/* remove all breakpoints */
1582void cpu_breakpoint_remove_all(CPUState *env) {
1583#if defined(TARGET_HAS_ICE)
1584 int i;
1585 for(i = 0; i < env->nb_breakpoints; i++) {
1586 breakpoint_invalidate(env, env->breakpoints[i]);
1587 }
1588 env->nb_breakpoints = 0;
1589#endif
1590}
1591
1592/* remove a breakpoint */
1593int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1594{
1595#if defined(TARGET_HAS_ICE)
1596 int i;
1597 for(i = 0; i < env->nb_breakpoints; i++) {
1598 if (env->breakpoints[i] == pc)
1599 goto found;
1600 }
1601 return -1;
1602 found:
1603 env->nb_breakpoints--;
1604 if (i < env->nb_breakpoints)
1605 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1606
1607 breakpoint_invalidate(env, pc);
1608 return 0;
1609#else
1610 return -1;
1611#endif
1612}
1613
1614/* enable or disable single step mode. EXCP_DEBUG is returned by the
1615 CPU loop after each instruction */
1616void cpu_single_step(CPUState *env, int enabled)
1617{
1618#if defined(TARGET_HAS_ICE)
1619 if (env->singlestep_enabled != enabled) {
1620 env->singlestep_enabled = enabled;
1621 /* must flush all the translated code to avoid inconsistancies */
1622 /* XXX: only flush what is necessary */
1623 tb_flush(env);
1624 }
1625#endif
1626}
1627
1628#ifndef VBOX
1629/* enable or disable low levels log */
1630void cpu_set_log(int log_flags)
1631{
1632 loglevel = log_flags;
1633 if (loglevel && !logfile) {
1634 logfile = fopen(logfilename, "w");
1635 if (!logfile) {
1636 perror(logfilename);
1637 _exit(1);
1638 }
1639#if !defined(CONFIG_SOFTMMU)
1640 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1641 {
1642 static uint8_t logfile_buf[4096];
1643 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1644 }
1645#else
1646 setvbuf(logfile, NULL, _IOLBF, 0);
1647#endif
1648 }
1649}
1650
1651void cpu_set_log_filename(const char *filename)
1652{
1653 logfilename = strdup(filename);
1654}
1655#endif /* !VBOX */
1656
1657/* mask must never be zero, except for A20 change call */
1658void cpu_interrupt(CPUState *env, int mask)
1659{
1660#if !defined(USE_NPTL)
1661 TranslationBlock *tb;
1662 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1663#endif
1664 int old_mask;
1665
1666 old_mask = env->interrupt_request;
1667#ifdef VBOX
1668 VM_ASSERT_EMT(env->pVM);
1669 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1670#else /* !VBOX */
1671 /* FIXME: This is probably not threadsafe. A different thread could
1672 be in the middle of a read-modify-write operation. */
1673 env->interrupt_request |= mask;
1674#endif /* !VBOX */
1675#if defined(USE_NPTL)
1676 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1677 problem and hope the cpu will stop of its own accord. For userspace
1678 emulation this often isn't actually as bad as it sounds. Often
1679 signals are used primarily to interrupt blocking syscalls. */
1680#else
1681 if (use_icount) {
1682 env->icount_decr.u16.high = 0xffff;
1683#ifndef CONFIG_USER_ONLY
1684 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1685 an async event happened and we need to process it. */
1686 if (!can_do_io(env)
1687 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1688 cpu_abort(env, "Raised interrupt while not in I/O function");
1689 }
1690#endif
1691 } else {
1692 tb = env->current_tb;
1693 /* if the cpu is currently executing code, we must unlink it and
1694 all the potentially executing TB */
1695 if (tb && !testandset(&interrupt_lock)) {
1696 env->current_tb = NULL;
1697 tb_reset_jump_recursive(tb);
1698 resetlock(&interrupt_lock);
1699 }
1700 }
1701#endif
1702}
1703
1704void cpu_reset_interrupt(CPUState *env, int mask)
1705{
1706#ifdef VBOX
1707 /*
1708 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1709 * for future changes!
1710 */
1711 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1712#else /* !VBOX */
1713 env->interrupt_request &= ~mask;
1714#endif /* !VBOX */
1715}
1716
1717#ifndef VBOX
1718CPULogItem cpu_log_items[] = {
1719 { CPU_LOG_TB_OUT_ASM, "out_asm",
1720 "show generated host assembly code for each compiled TB" },
1721 { CPU_LOG_TB_IN_ASM, "in_asm",
1722 "show target assembly code for each compiled TB" },
1723 { CPU_LOG_TB_OP, "op",
1724 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1725#ifdef TARGET_I386
1726 { CPU_LOG_TB_OP_OPT, "op_opt",
1727 "show micro ops after optimization for each compiled TB" },
1728#endif
1729 { CPU_LOG_INT, "int",
1730 "show interrupts/exceptions in short format" },
1731 { CPU_LOG_EXEC, "exec",
1732 "show trace before each executed TB (lots of logs)" },
1733 { CPU_LOG_TB_CPU, "cpu",
1734 "show CPU state before bloc translation" },
1735#ifdef TARGET_I386
1736 { CPU_LOG_PCALL, "pcall",
1737 "show protected mode far calls/returns/exceptions" },
1738#endif
1739#ifdef DEBUG_IOPORT
1740 { CPU_LOG_IOPORT, "ioport",
1741 "show all i/o ports accesses" },
1742#endif
1743 { 0, NULL, NULL },
1744};
1745
1746static int cmp1(const char *s1, int n, const char *s2)
1747{
1748 if (strlen(s2) != n)
1749 return 0;
1750 return memcmp(s1, s2, n) == 0;
1751}
1752
1753/* takes a comma separated list of log masks. Return 0 if error. */
1754int cpu_str_to_log_mask(const char *str)
1755{
1756 CPULogItem *item;
1757 int mask;
1758 const char *p, *p1;
1759
1760 p = str;
1761 mask = 0;
1762 for(;;) {
1763 p1 = strchr(p, ',');
1764 if (!p1)
1765 p1 = p + strlen(p);
1766 if(cmp1(p,p1-p,"all")) {
1767 for(item = cpu_log_items; item->mask != 0; item++) {
1768 mask |= item->mask;
1769 }
1770 } else {
1771 for(item = cpu_log_items; item->mask != 0; item++) {
1772 if (cmp1(p, p1 - p, item->name))
1773 goto found;
1774 }
1775 return 0;
1776 }
1777 found:
1778 mask |= item->mask;
1779 if (*p1 != ',')
1780 break;
1781 p = p1 + 1;
1782 }
1783 return mask;
1784}
1785#endif /* !VBOX */
1786
1787#ifndef VBOX /* VBOX: we have our own routine. */
1788void cpu_abort(CPUState *env, const char *fmt, ...)
1789{
1790 va_list ap;
1791
1792 va_start(ap, fmt);
1793 fprintf(stderr, "qemu: fatal: ");
1794 vfprintf(stderr, fmt, ap);
1795 fprintf(stderr, "\n");
1796#ifdef TARGET_I386
1797 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1798#else
1799 cpu_dump_state(env, stderr, fprintf, 0);
1800#endif
1801 va_end(ap);
1802 abort();
1803}
1804#endif /* !VBOX */
1805
1806#ifndef VBOX
1807CPUState *cpu_copy(CPUState *env)
1808{
1809 CPUState *new_env = cpu_init(env->cpu_model_str);
1810 /* preserve chaining and index */
1811 CPUState *next_cpu = new_env->next_cpu;
1812 int cpu_index = new_env->cpu_index;
1813 memcpy(new_env, env, sizeof(CPUState));
1814 new_env->next_cpu = next_cpu;
1815 new_env->cpu_index = cpu_index;
1816 return new_env;
1817}
1818#endif
1819
1820#if !defined(CONFIG_USER_ONLY)
1821
1822#ifndef VBOX
1823static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1824#else
1825DECLINLINE(void) tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1826#endif
1827{
1828 unsigned int i;
1829
1830 /* Discard jump cache entries for any tb which might potentially
1831 overlap the flushed page. */
1832 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1833 memset (&env->tb_jmp_cache[i], 0,
1834 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1835
1836 i = tb_jmp_cache_hash_page(addr);
1837 memset (&env->tb_jmp_cache[i], 0,
1838 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1839
1840#ifdef VBOX
1841 /* inform raw mode about TLB page flush */
1842 remR3FlushPage(env, addr);
1843#endif /* VBOX */
1844}
1845
1846/* NOTE: if flush_global is true, also flush global entries (not
1847 implemented yet) */
1848void tlb_flush(CPUState *env, int flush_global)
1849{
1850 int i;
1851#if defined(DEBUG_TLB)
1852 printf("tlb_flush:\n");
1853#endif
1854 /* must reset current TB so that interrupts cannot modify the
1855 links while we are modifying them */
1856 env->current_tb = NULL;
1857
1858 for(i = 0; i < CPU_TLB_SIZE; i++) {
1859 env->tlb_table[0][i].addr_read = -1;
1860 env->tlb_table[0][i].addr_write = -1;
1861 env->tlb_table[0][i].addr_code = -1;
1862 env->tlb_table[1][i].addr_read = -1;
1863 env->tlb_table[1][i].addr_write = -1;
1864 env->tlb_table[1][i].addr_code = -1;
1865#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1866 env->phys_addends[0][i] = -1;
1867 env->phys_addends[1][i] = -1;
1868#endif
1869#if (NB_MMU_MODES >= 3)
1870 env->tlb_table[2][i].addr_read = -1;
1871 env->tlb_table[2][i].addr_write = -1;
1872 env->tlb_table[2][i].addr_code = -1;
1873#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1874 env->phys_addends[2][i] = -1;
1875#endif
1876#if (NB_MMU_MODES == 4)
1877 env->tlb_table[3][i].addr_read = -1;
1878 env->tlb_table[3][i].addr_write = -1;
1879 env->tlb_table[3][i].addr_code = -1;
1880#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1881 env->phys_addends[3][i] = -1;
1882#endif
1883#endif
1884#endif
1885 }
1886
1887 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1888
1889#ifdef VBOX
1890 /* inform raw mode about TLB flush */
1891 remR3FlushTLB(env, flush_global);
1892#endif
1893#ifdef USE_KQEMU
1894 if (env->kqemu_enabled) {
1895 kqemu_flush(env, flush_global);
1896 }
1897#endif
1898 tlb_flush_count++;
1899}
1900
1901#ifndef VBOX
1902static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1903#else
1904DECLINLINE(void) tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1905#endif
1906{
1907 if (addr == (tlb_entry->addr_read &
1908 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1909 addr == (tlb_entry->addr_write &
1910 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1911 addr == (tlb_entry->addr_code &
1912 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1913 tlb_entry->addr_read = -1;
1914 tlb_entry->addr_write = -1;
1915 tlb_entry->addr_code = -1;
1916 }
1917}
1918
1919void tlb_flush_page(CPUState *env, target_ulong addr)
1920{
1921 int i;
1922
1923#if defined(DEBUG_TLB)
1924 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1925#endif
1926 /* must reset current TB so that interrupts cannot modify the
1927 links while we are modifying them */
1928 env->current_tb = NULL;
1929
1930 addr &= TARGET_PAGE_MASK;
1931 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1932 tlb_flush_entry(&env->tlb_table[0][i], addr);
1933 tlb_flush_entry(&env->tlb_table[1][i], addr);
1934#if (NB_MMU_MODES >= 3)
1935 tlb_flush_entry(&env->tlb_table[2][i], addr);
1936#if (NB_MMU_MODES == 4)
1937 tlb_flush_entry(&env->tlb_table[3][i], addr);
1938#endif
1939#endif
1940
1941 tlb_flush_jmp_cache(env, addr);
1942
1943#ifdef USE_KQEMU
1944 if (env->kqemu_enabled) {
1945 kqemu_flush_page(env, addr);
1946 }
1947#endif
1948}
1949
1950/* update the TLBs so that writes to code in the virtual page 'addr'
1951 can be detected */
1952static void tlb_protect_code(ram_addr_t ram_addr)
1953{
1954 cpu_physical_memory_reset_dirty(ram_addr,
1955 ram_addr + TARGET_PAGE_SIZE,
1956 CODE_DIRTY_FLAG);
1957#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1958 /** @todo Retest this? This function has changed... */
1959 remR3ProtectCode(cpu_single_env, ram_addr);
1960#endif
1961}
1962
1963/* update the TLB so that writes in physical page 'phys_addr' are no longer
1964 tested for self modifying code */
1965static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1966 target_ulong vaddr)
1967{
1968#ifdef VBOX
1969 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1970#endif
1971 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1972}
1973
1974#ifndef VBOX
1975static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1976 unsigned long start, unsigned long length)
1977#else
1978DECLINLINE(void) tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1979 unsigned long start, unsigned long length)
1980#endif
1981{
1982 unsigned long addr;
1983
1984#ifdef VBOX
1985 if (start & 3)
1986 return;
1987#endif
1988 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1989 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1990 if ((addr - start) < length) {
1991 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1992 }
1993 }
1994}
1995
1996void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1997 int dirty_flags)
1998{
1999 CPUState *env;
2000 unsigned long length, start1;
2001 int i, mask, len;
2002 uint8_t *p;
2003
2004 start &= TARGET_PAGE_MASK;
2005 end = TARGET_PAGE_ALIGN(end);
2006
2007 length = end - start;
2008 if (length == 0)
2009 return;
2010 len = length >> TARGET_PAGE_BITS;
2011#ifdef USE_KQEMU
2012 /* XXX: should not depend on cpu context */
2013 env = first_cpu;
2014 if (env->kqemu_enabled) {
2015 ram_addr_t addr;
2016 addr = start;
2017 for(i = 0; i < len; i++) {
2018 kqemu_set_notdirty(env, addr);
2019 addr += TARGET_PAGE_SIZE;
2020 }
2021 }
2022#endif
2023 mask = ~dirty_flags;
2024 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2025#ifdef VBOX
2026 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2027#endif
2028 for(i = 0; i < len; i++)
2029 p[i] &= mask;
2030
2031 /* we modify the TLB cache so that the dirty bit will be set again
2032 when accessing the range */
2033#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2034 start1 = start;
2035#elif !defined(VBOX)
2036 start1 = start + (unsigned long)phys_ram_base;
2037#else
2038 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo this can be harmful with VBOX_WITH_NEW_PHYS_CODE, fix interface/whatever. */
2039#endif
2040 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2041 for(i = 0; i < CPU_TLB_SIZE; i++)
2042 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2043 for(i = 0; i < CPU_TLB_SIZE; i++)
2044 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2045#if (NB_MMU_MODES >= 3)
2046 for(i = 0; i < CPU_TLB_SIZE; i++)
2047 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2048#if (NB_MMU_MODES == 4)
2049 for(i = 0; i < CPU_TLB_SIZE; i++)
2050 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2051#endif
2052#endif
2053 }
2054}
2055
2056#ifndef VBOX
2057int cpu_physical_memory_set_dirty_tracking(int enable)
2058{
2059 in_migration = enable;
2060 return 0;
2061}
2062
2063int cpu_physical_memory_get_dirty_tracking(void)
2064{
2065 return in_migration;
2066}
2067#endif
2068
2069#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2070DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2071#else
2072static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2073#endif
2074{
2075 ram_addr_t ram_addr;
2076
2077 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2078 /* RAM case */
2079#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2080 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2081#elif !defined(VBOX)
2082 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2083 tlb_entry->addend - (unsigned long)phys_ram_base;
2084#else
2085 Assert(phys_addend != -1);
2086 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2087
2088 /** @todo: nike: will remove this assert along with remR3HCVirt2GCPhys() soon */
2089 Assert(ram_addr == remR3HCVirt2GCPhys(first_cpu, (void*)((tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend)));
2090#endif
2091 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2092 tlb_entry->addr_write |= TLB_NOTDIRTY;
2093 }
2094 }
2095}
2096
2097/* update the TLB according to the current state of the dirty bits */
2098void cpu_tlb_update_dirty(CPUState *env)
2099{
2100 int i;
2101#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2102 for(i = 0; i < CPU_TLB_SIZE; i++)
2103 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
2104 for(i = 0; i < CPU_TLB_SIZE; i++)
2105 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
2106#if (NB_MMU_MODES >= 3)
2107 for(i = 0; i < CPU_TLB_SIZE; i++)
2108 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
2109#if (NB_MMU_MODES == 4)
2110 for(i = 0; i < CPU_TLB_SIZE; i++)
2111 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
2112#endif
2113#endif
2114#else /* VBOX */
2115 for(i = 0; i < CPU_TLB_SIZE; i++)
2116 tlb_update_dirty(&env->tlb_table[0][i]);
2117 for(i = 0; i < CPU_TLB_SIZE; i++)
2118 tlb_update_dirty(&env->tlb_table[1][i]);
2119#if (NB_MMU_MODES >= 3)
2120 for(i = 0; i < CPU_TLB_SIZE; i++)
2121 tlb_update_dirty(&env->tlb_table[2][i]);
2122#if (NB_MMU_MODES == 4)
2123 for(i = 0; i < CPU_TLB_SIZE; i++)
2124 tlb_update_dirty(&env->tlb_table[3][i]);
2125#endif
2126#endif
2127#endif /* VBOX */
2128}
2129
2130#ifndef VBOX
2131static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2132#else
2133DECLINLINE(void) tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2134#endif
2135{
2136 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2137 tlb_entry->addr_write = vaddr;
2138}
2139
2140
2141/* update the TLB corresponding to virtual page vaddr and phys addr
2142 addr so that it is no longer dirty */
2143#ifndef VBOX
2144static inline void tlb_set_dirty(CPUState *env,
2145 unsigned long addr, target_ulong vaddr)
2146#else
2147DECLINLINE(void) tlb_set_dirty(CPUState *env,
2148 unsigned long addr, target_ulong vaddr)
2149#endif
2150{
2151 int i;
2152
2153 addr &= TARGET_PAGE_MASK;
2154 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2155 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2156 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2157#if (NB_MMU_MODES >= 3)
2158 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2159#if (NB_MMU_MODES == 4)
2160 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2161#endif
2162#endif
2163}
2164
2165/* add a new TLB entry. At most one entry for a given virtual address
2166 is permitted. Return 0 if OK or 2 if the page could not be mapped
2167 (can only happen in non SOFTMMU mode for I/O pages or pages
2168 conflicting with the host address space). */
2169int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2170 target_phys_addr_t paddr, int prot,
2171 int mmu_idx, int is_softmmu)
2172{
2173 PhysPageDesc *p;
2174 unsigned long pd;
2175 unsigned int index;
2176 target_ulong address;
2177 target_ulong code_address;
2178 target_phys_addr_t addend;
2179 int ret;
2180 CPUTLBEntry *te;
2181 int i;
2182 target_phys_addr_t iotlb;
2183#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2184 int read_mods = 0, write_mods = 0, code_mods = 0;
2185#endif
2186
2187 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2188 if (!p) {
2189 pd = IO_MEM_UNASSIGNED;
2190 } else {
2191 pd = p->phys_offset;
2192 }
2193#if defined(DEBUG_TLB)
2194 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2195 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2196#endif
2197
2198 ret = 0;
2199 address = vaddr;
2200 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2201 /* IO memory case (romd handled later) */
2202 address |= TLB_MMIO;
2203 }
2204#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2205 addend = pd & TARGET_PAGE_MASK;
2206#elif !defined(VBOX)
2207 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2208#else
2209 /** @todo this is racing the phys_page_find call above since it may register
2210 * a new chunk of memory... */
2211 addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
2212 pd & TARGET_PAGE_MASK,
2213 !!(prot & PAGE_WRITE));
2214#endif
2215
2216 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2217 /* Normal RAM. */
2218 iotlb = pd & TARGET_PAGE_MASK;
2219 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2220 iotlb |= IO_MEM_NOTDIRTY;
2221 else
2222 iotlb |= IO_MEM_ROM;
2223 } else {
2224 /* IO handlers are currently passed a phsical address.
2225 It would be nice to pass an offset from the base address
2226 of that region. This would avoid having to special case RAM,
2227 and avoid full address decoding in every device.
2228 We can't use the high bits of pd for this because
2229 IO_MEM_ROMD uses these as a ram address. */
2230 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2231 }
2232
2233 code_address = address;
2234
2235#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2236 if (addend & 0x3)
2237 {
2238 if (addend & 0x2)
2239 {
2240 /* catch write */
2241 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2242 write_mods |= TLB_MMIO;
2243 }
2244 else if (addend & 0x1)
2245 {
2246 /* catch all */
2247 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2248 {
2249 read_mods |= TLB_MMIO;
2250 write_mods |= TLB_MMIO;
2251 code_mods |= TLB_MMIO;
2252 }
2253 }
2254 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2255 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2256 addend &= ~(target_ulong)0x3;
2257 }
2258#endif
2259
2260 /* Make accesses to pages with watchpoints go via the
2261 watchpoint trap routines. */
2262 for (i = 0; i < env->nb_watchpoints; i++) {
2263 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2264 iotlb = io_mem_watch + paddr;
2265 /* TODO: The memory case can be optimized by not trapping
2266 reads of pages with a write breakpoint. */
2267 address |= TLB_MMIO;
2268 }
2269 }
2270
2271 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2272 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2273 te = &env->tlb_table[mmu_idx][index];
2274 te->addend = addend - vaddr;
2275 if (prot & PAGE_READ) {
2276 te->addr_read = address;
2277 } else {
2278 te->addr_read = -1;
2279 }
2280
2281 if (prot & PAGE_EXEC) {
2282 te->addr_code = code_address;
2283 } else {
2284 te->addr_code = -1;
2285 }
2286 if (prot & PAGE_WRITE) {
2287 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2288 (pd & IO_MEM_ROMD)) {
2289 /* Write access calls the I/O callback. */
2290 te->addr_write = address | TLB_MMIO;
2291 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2292 !cpu_physical_memory_is_dirty(pd)) {
2293 te->addr_write = address | TLB_NOTDIRTY;
2294 } else {
2295 te->addr_write = address;
2296 }
2297 } else {
2298 te->addr_write = -1;
2299 }
2300
2301#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2302 if (prot & PAGE_READ)
2303 te->addr_read |= read_mods;
2304 if (prot & PAGE_EXEC)
2305 te->addr_code |= code_mods;
2306 if (prot & PAGE_WRITE)
2307 te->addr_write |= write_mods;
2308
2309 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2310#endif
2311
2312#ifdef VBOX
2313 /* inform raw mode about TLB page change */
2314 remR3FlushPage(env, vaddr);
2315#endif
2316 return ret;
2317}
2318#if 0
2319/* called from signal handler: invalidate the code and unprotect the
2320 page. Return TRUE if the fault was succesfully handled. */
2321int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2322{
2323#if !defined(CONFIG_SOFTMMU)
2324 VirtPageDesc *vp;
2325
2326#if defined(DEBUG_TLB)
2327 printf("page_unprotect: addr=0x%08x\n", addr);
2328#endif
2329 addr &= TARGET_PAGE_MASK;
2330
2331 /* if it is not mapped, no need to worry here */
2332 if (addr >= MMAP_AREA_END)
2333 return 0;
2334 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2335 if (!vp)
2336 return 0;
2337 /* NOTE: in this case, validate_tag is _not_ tested as it
2338 validates only the code TLB */
2339 if (vp->valid_tag != virt_valid_tag)
2340 return 0;
2341 if (!(vp->prot & PAGE_WRITE))
2342 return 0;
2343#if defined(DEBUG_TLB)
2344 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2345 addr, vp->phys_addr, vp->prot);
2346#endif
2347 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2348 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2349 (unsigned long)addr, vp->prot);
2350 /* set the dirty bit */
2351 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2352 /* flush the code inside */
2353 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2354 return 1;
2355#elif defined(VBOX)
2356 addr &= TARGET_PAGE_MASK;
2357
2358 /* if it is not mapped, no need to worry here */
2359 if (addr >= MMAP_AREA_END)
2360 return 0;
2361 return 1;
2362#else
2363 return 0;
2364#endif
2365}
2366#endif /* 0 */
2367
2368#else
2369
2370void tlb_flush(CPUState *env, int flush_global)
2371{
2372}
2373
2374void tlb_flush_page(CPUState *env, target_ulong addr)
2375{
2376}
2377
2378int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2379 target_phys_addr_t paddr, int prot,
2380 int mmu_idx, int is_softmmu)
2381{
2382 return 0;
2383}
2384
2385#ifndef VBOX
2386/* dump memory mappings */
2387void page_dump(FILE *f)
2388{
2389 unsigned long start, end;
2390 int i, j, prot, prot1;
2391 PageDesc *p;
2392
2393 fprintf(f, "%-8s %-8s %-8s %s\n",
2394 "start", "end", "size", "prot");
2395 start = -1;
2396 end = -1;
2397 prot = 0;
2398 for(i = 0; i <= L1_SIZE; i++) {
2399 if (i < L1_SIZE)
2400 p = l1_map[i];
2401 else
2402 p = NULL;
2403 for(j = 0;j < L2_SIZE; j++) {
2404 if (!p)
2405 prot1 = 0;
2406 else
2407 prot1 = p[j].flags;
2408 if (prot1 != prot) {
2409 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2410 if (start != -1) {
2411 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2412 start, end, end - start,
2413 prot & PAGE_READ ? 'r' : '-',
2414 prot & PAGE_WRITE ? 'w' : '-',
2415 prot & PAGE_EXEC ? 'x' : '-');
2416 }
2417 if (prot1 != 0)
2418 start = end;
2419 else
2420 start = -1;
2421 prot = prot1;
2422 }
2423 if (!p)
2424 break;
2425 }
2426 }
2427}
2428#endif /* !VBOX */
2429
2430int page_get_flags(target_ulong address)
2431{
2432 PageDesc *p;
2433
2434 p = page_find(address >> TARGET_PAGE_BITS);
2435 if (!p)
2436 return 0;
2437 return p->flags;
2438}
2439
2440/* modify the flags of a page and invalidate the code if
2441 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2442 depending on PAGE_WRITE */
2443void page_set_flags(target_ulong start, target_ulong end, int flags)
2444{
2445 PageDesc *p;
2446 target_ulong addr;
2447
2448 start = start & TARGET_PAGE_MASK;
2449 end = TARGET_PAGE_ALIGN(end);
2450 if (flags & PAGE_WRITE)
2451 flags |= PAGE_WRITE_ORG;
2452#ifdef VBOX
2453 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2454#endif
2455 spin_lock(&tb_lock);
2456 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2457 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2458 /* if the write protection is set, then we invalidate the code
2459 inside */
2460 if (!(p->flags & PAGE_WRITE) &&
2461 (flags & PAGE_WRITE) &&
2462 p->first_tb) {
2463 tb_invalidate_phys_page(addr, 0, NULL);
2464 }
2465 p->flags = flags;
2466 }
2467 spin_unlock(&tb_lock);
2468}
2469
2470int page_check_range(target_ulong start, target_ulong len, int flags)
2471{
2472 PageDesc *p;
2473 target_ulong end;
2474 target_ulong addr;
2475
2476 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2477 start = start & TARGET_PAGE_MASK;
2478
2479 if( end < start )
2480 /* we've wrapped around */
2481 return -1;
2482 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2483 p = page_find(addr >> TARGET_PAGE_BITS);
2484 if( !p )
2485 return -1;
2486 if( !(p->flags & PAGE_VALID) )
2487 return -1;
2488
2489 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2490 return -1;
2491 if (flags & PAGE_WRITE) {
2492 if (!(p->flags & PAGE_WRITE_ORG))
2493 return -1;
2494 /* unprotect the page if it was put read-only because it
2495 contains translated code */
2496 if (!(p->flags & PAGE_WRITE)) {
2497 if (!page_unprotect(addr, 0, NULL))
2498 return -1;
2499 }
2500 return 0;
2501 }
2502 }
2503 return 0;
2504}
2505
2506/* called from signal handler: invalidate the code and unprotect the
2507 page. Return TRUE if the fault was succesfully handled. */
2508int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2509{
2510 unsigned int page_index, prot, pindex;
2511 PageDesc *p, *p1;
2512 target_ulong host_start, host_end, addr;
2513
2514 /* Technically this isn't safe inside a signal handler. However we
2515 know this only ever happens in a synchronous SEGV handler, so in
2516 practice it seems to be ok. */
2517 mmap_lock();
2518
2519 host_start = address & qemu_host_page_mask;
2520 page_index = host_start >> TARGET_PAGE_BITS;
2521 p1 = page_find(page_index);
2522 if (!p1) {
2523 mmap_unlock();
2524 return 0;
2525 }
2526 host_end = host_start + qemu_host_page_size;
2527 p = p1;
2528 prot = 0;
2529 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2530 prot |= p->flags;
2531 p++;
2532 }
2533 /* if the page was really writable, then we change its
2534 protection back to writable */
2535 if (prot & PAGE_WRITE_ORG) {
2536 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2537 if (!(p1[pindex].flags & PAGE_WRITE)) {
2538 mprotect((void *)g2h(host_start), qemu_host_page_size,
2539 (prot & PAGE_BITS) | PAGE_WRITE);
2540 p1[pindex].flags |= PAGE_WRITE;
2541 /* and since the content will be modified, we must invalidate
2542 the corresponding translated code. */
2543 tb_invalidate_phys_page(address, pc, puc);
2544#ifdef DEBUG_TB_CHECK
2545 tb_invalidate_check(address);
2546#endif
2547 mmap_unlock();
2548 return 1;
2549 }
2550 }
2551 mmap_unlock();
2552 return 0;
2553}
2554
2555static inline void tlb_set_dirty(CPUState *env,
2556 unsigned long addr, target_ulong vaddr)
2557{
2558}
2559#endif /* defined(CONFIG_USER_ONLY) */
2560
2561#if !defined(CONFIG_USER_ONLY)
2562static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2563 ram_addr_t memory);
2564static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2565 ram_addr_t orig_memory);
2566#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2567 need_subpage) \
2568 do { \
2569 if (addr > start_addr) \
2570 start_addr2 = 0; \
2571 else { \
2572 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2573 if (start_addr2 > 0) \
2574 need_subpage = 1; \
2575 } \
2576 \
2577 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2578 end_addr2 = TARGET_PAGE_SIZE - 1; \
2579 else { \
2580 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2581 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2582 need_subpage = 1; \
2583 } \
2584 } while (0)
2585
2586
2587/* register physical memory. 'size' must be a multiple of the target
2588 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2589 io memory page */
2590void cpu_register_physical_memory(target_phys_addr_t start_addr,
2591 unsigned long size,
2592 unsigned long phys_offset)
2593{
2594 target_phys_addr_t addr, end_addr;
2595 PhysPageDesc *p;
2596 CPUState *env;
2597 ram_addr_t orig_size = size;
2598 void *subpage;
2599
2600#ifdef USE_KQEMU
2601 /* XXX: should not depend on cpu context */
2602 env = first_cpu;
2603 if (env->kqemu_enabled) {
2604 kqemu_set_phys_mem(start_addr, size, phys_offset);
2605 }
2606#endif
2607 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2608 end_addr = start_addr + (target_phys_addr_t)size;
2609 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2610 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2611 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2612 ram_addr_t orig_memory = p->phys_offset;
2613 target_phys_addr_t start_addr2, end_addr2;
2614 int need_subpage = 0;
2615
2616 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2617 need_subpage);
2618 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2619 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2620 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2621 &p->phys_offset, orig_memory);
2622 } else {
2623 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2624 >> IO_MEM_SHIFT];
2625 }
2626 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2627 } else {
2628 p->phys_offset = phys_offset;
2629#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2630 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2631 (phys_offset & IO_MEM_ROMD))
2632#else
2633 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2634 || (phys_offset & IO_MEM_ROMD)
2635 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2636#endif
2637 phys_offset += TARGET_PAGE_SIZE;
2638 }
2639 } else {
2640 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2641 p->phys_offset = phys_offset;
2642#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2643 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2644 (phys_offset & IO_MEM_ROMD))
2645#else
2646 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2647 || (phys_offset & IO_MEM_ROMD)
2648 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2649#endif
2650 phys_offset += TARGET_PAGE_SIZE;
2651 else {
2652 target_phys_addr_t start_addr2, end_addr2;
2653 int need_subpage = 0;
2654
2655 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2656 end_addr2, need_subpage);
2657
2658 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2659 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2660 &p->phys_offset, IO_MEM_UNASSIGNED);
2661 subpage_register(subpage, start_addr2, end_addr2,
2662 phys_offset);
2663 }
2664 }
2665 }
2666 }
2667 /* since each CPU stores ram addresses in its TLB cache, we must
2668 reset the modified entries */
2669 /* XXX: slow ! */
2670 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2671 tlb_flush(env, 1);
2672 }
2673}
2674
2675/* XXX: temporary until new memory mapping API */
2676uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2677{
2678 PhysPageDesc *p;
2679
2680 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2681 if (!p)
2682 return IO_MEM_UNASSIGNED;
2683 return p->phys_offset;
2684}
2685
2686#ifndef VBOX
2687/* XXX: better than nothing */
2688ram_addr_t qemu_ram_alloc(ram_addr_t size)
2689{
2690 ram_addr_t addr;
2691 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2692 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2693 (uint64_t)size, (uint64_t)phys_ram_size);
2694 abort();
2695 }
2696 addr = phys_ram_alloc_offset;
2697 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2698 return addr;
2699}
2700
2701void qemu_ram_free(ram_addr_t addr)
2702{
2703}
2704#endif
2705
2706
2707static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2708{
2709#ifdef DEBUG_UNASSIGNED
2710 printf("Unassigned mem read 0x%08x\n", (int)addr);
2711#endif
2712#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2713 do_unassigned_access(addr, 0, 0, 0, 1);
2714#endif
2715 return 0;
2716}
2717
2718static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2719{
2720#ifdef DEBUG_UNASSIGNED
2721 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2722#endif
2723#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2724 do_unassigned_access(addr, 0, 0, 0, 2);
2725#endif
2726 return 0;
2727}
2728
2729static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2730{
2731#ifdef DEBUG_UNASSIGNED
2732 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2733#endif
2734#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2735 do_unassigned_access(addr, 0, 0, 0, 4);
2736#endif
2737 return 0;
2738}
2739
2740static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2741{
2742#ifdef DEBUG_UNASSIGNED
2743 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2744#endif
2745}
2746
2747static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2748{
2749#ifdef DEBUG_UNASSIGNED
2750 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2751#endif
2752#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2753 do_unassigned_access(addr, 1, 0, 0, 2);
2754#endif
2755}
2756
2757static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2758{
2759#ifdef DEBUG_UNASSIGNED
2760 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2761#endif
2762#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2763 do_unassigned_access(addr, 1, 0, 0, 4);
2764#endif
2765}
2766static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2767 unassigned_mem_readb,
2768 unassigned_mem_readw,
2769 unassigned_mem_readl,
2770};
2771
2772static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2773 unassigned_mem_writeb,
2774 unassigned_mem_writew,
2775 unassigned_mem_writel,
2776};
2777
2778static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2779{
2780 unsigned long ram_addr;
2781 int dirty_flags;
2782#if defined(VBOX)
2783 ram_addr = addr;
2784#elif
2785 ram_addr = addr - (unsigned long)phys_ram_base;
2786#endif
2787#ifdef VBOX
2788 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2789 dirty_flags = 0xff;
2790 else
2791#endif /* VBOX */
2792 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2793 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2794#if !defined(CONFIG_USER_ONLY)
2795 tb_invalidate_phys_page_fast(ram_addr, 1);
2796# ifdef VBOX
2797 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2798 dirty_flags = 0xff;
2799 else
2800# endif /* VBOX */
2801 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2802#endif
2803 }
2804#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2805 remR3PhysWriteU8(addr, val);
2806#else
2807 stb_p((uint8_t *)(long)addr, val);
2808#endif
2809#ifdef USE_KQEMU
2810 if (cpu_single_env->kqemu_enabled &&
2811 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2812 kqemu_modify_page(cpu_single_env, ram_addr);
2813#endif
2814 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2815#ifdef VBOX
2816 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2817#endif /* !VBOX */
2818 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2819 /* we remove the notdirty callback only if the code has been
2820 flushed */
2821 if (dirty_flags == 0xff)
2822 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2823}
2824
2825static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2826{
2827 unsigned long ram_addr;
2828 int dirty_flags;
2829#if defined(VBOX)
2830 ram_addr = addr;
2831#else
2832 ram_addr = addr - (unsigned long)phys_ram_base;
2833#endif
2834#ifdef VBOX
2835 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2836 dirty_flags = 0xff;
2837 else
2838#endif /* VBOX */
2839 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2840 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2841#if !defined(CONFIG_USER_ONLY)
2842 tb_invalidate_phys_page_fast(ram_addr, 2);
2843# ifdef VBOX
2844 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2845 dirty_flags = 0xff;
2846 else
2847# endif /* VBOX */
2848 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2849#endif
2850 }
2851#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2852 remR3PhysWriteU16(addr, val);
2853#else
2854 stw_p((uint8_t *)(long)addr, val);
2855#endif
2856
2857#ifdef USE_KQEMU
2858 if (cpu_single_env->kqemu_enabled &&
2859 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2860 kqemu_modify_page(cpu_single_env, ram_addr);
2861#endif
2862 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2863#ifdef VBOX
2864 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2865#endif
2866 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2867 /* we remove the notdirty callback only if the code has been
2868 flushed */
2869 if (dirty_flags == 0xff)
2870 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2871}
2872
2873static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2874{
2875 unsigned long ram_addr;
2876 int dirty_flags;
2877#if defined(VBOX)
2878 ram_addr = addr;
2879#else
2880 ram_addr = addr - (unsigned long)phys_ram_base;
2881#endif
2882#ifdef VBOX
2883 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2884 dirty_flags = 0xff;
2885 else
2886#endif /* VBOX */
2887 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2888 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2889#if !defined(CONFIG_USER_ONLY)
2890 tb_invalidate_phys_page_fast(ram_addr, 4);
2891# ifdef VBOX
2892 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2893 dirty_flags = 0xff;
2894 else
2895# endif /* VBOX */
2896 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2897#endif
2898 }
2899#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2900 remR3PhysWriteU32(addr, val);
2901#else
2902 stl_p((uint8_t *)(long)addr, val);
2903#endif
2904#ifdef USE_KQEMU
2905 if (cpu_single_env->kqemu_enabled &&
2906 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2907 kqemu_modify_page(cpu_single_env, ram_addr);
2908#endif
2909 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2910#ifdef VBOX
2911 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2912#endif
2913 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2914 /* we remove the notdirty callback only if the code has been
2915 flushed */
2916 if (dirty_flags == 0xff)
2917 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2918}
2919
2920static CPUReadMemoryFunc *error_mem_read[3] = {
2921 NULL, /* never used */
2922 NULL, /* never used */
2923 NULL, /* never used */
2924};
2925
2926static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2927 notdirty_mem_writeb,
2928 notdirty_mem_writew,
2929 notdirty_mem_writel,
2930};
2931
2932
2933/* Generate a debug exception if a watchpoint has been hit. */
2934static void check_watchpoint(int offset, int flags)
2935{
2936 CPUState *env = cpu_single_env;
2937 target_ulong vaddr;
2938 int i;
2939
2940 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2941 for (i = 0; i < env->nb_watchpoints; i++) {
2942 if (vaddr == env->watchpoint[i].vaddr
2943 && (env->watchpoint[i].type & flags)) {
2944 env->watchpoint_hit = i + 1;
2945 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2946 break;
2947 }
2948 }
2949}
2950
2951/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2952 so these check for a hit then pass through to the normal out-of-line
2953 phys routines. */
2954static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2955{
2956 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2957 return ldub_phys(addr);
2958}
2959
2960static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2961{
2962 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2963 return lduw_phys(addr);
2964}
2965
2966static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2967{
2968 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2969 return ldl_phys(addr);
2970}
2971
2972static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2973 uint32_t val)
2974{
2975 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2976 stb_phys(addr, val);
2977}
2978
2979static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2980 uint32_t val)
2981{
2982 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2983 stw_phys(addr, val);
2984}
2985
2986static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2987 uint32_t val)
2988{
2989 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2990 stl_phys(addr, val);
2991}
2992
2993static CPUReadMemoryFunc *watch_mem_read[3] = {
2994 watch_mem_readb,
2995 watch_mem_readw,
2996 watch_mem_readl,
2997};
2998
2999static CPUWriteMemoryFunc *watch_mem_write[3] = {
3000 watch_mem_writeb,
3001 watch_mem_writew,
3002 watch_mem_writel,
3003};
3004
3005static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3006 unsigned int len)
3007{
3008 uint32_t ret;
3009 unsigned int idx;
3010
3011 idx = SUBPAGE_IDX(addr - mmio->base);
3012#if defined(DEBUG_SUBPAGE)
3013 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3014 mmio, len, addr, idx);
3015#endif
3016 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
3017
3018 return ret;
3019}
3020
3021static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3022 uint32_t value, unsigned int len)
3023{
3024 unsigned int idx;
3025
3026 idx = SUBPAGE_IDX(addr - mmio->base);
3027#if defined(DEBUG_SUBPAGE)
3028 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3029 mmio, len, addr, idx, value);
3030#endif
3031 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
3032}
3033
3034static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3035{
3036#if defined(DEBUG_SUBPAGE)
3037 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3038#endif
3039
3040 return subpage_readlen(opaque, addr, 0);
3041}
3042
3043static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3044 uint32_t value)
3045{
3046#if defined(DEBUG_SUBPAGE)
3047 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3048#endif
3049 subpage_writelen(opaque, addr, value, 0);
3050}
3051
3052static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3053{
3054#if defined(DEBUG_SUBPAGE)
3055 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3056#endif
3057
3058 return subpage_readlen(opaque, addr, 1);
3059}
3060
3061static void subpage_writew (void *opaque, target_phys_addr_t addr,
3062 uint32_t value)
3063{
3064#if defined(DEBUG_SUBPAGE)
3065 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3066#endif
3067 subpage_writelen(opaque, addr, value, 1);
3068}
3069
3070static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3071{
3072#if defined(DEBUG_SUBPAGE)
3073 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3074#endif
3075
3076 return subpage_readlen(opaque, addr, 2);
3077}
3078
3079static void subpage_writel (void *opaque,
3080 target_phys_addr_t addr, uint32_t value)
3081{
3082#if defined(DEBUG_SUBPAGE)
3083 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3084#endif
3085 subpage_writelen(opaque, addr, value, 2);
3086}
3087
3088static CPUReadMemoryFunc *subpage_read[] = {
3089 &subpage_readb,
3090 &subpage_readw,
3091 &subpage_readl,
3092};
3093
3094static CPUWriteMemoryFunc *subpage_write[] = {
3095 &subpage_writeb,
3096 &subpage_writew,
3097 &subpage_writel,
3098};
3099
3100static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3101 ram_addr_t memory)
3102{
3103 int idx, eidx;
3104 unsigned int i;
3105
3106 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3107 return -1;
3108 idx = SUBPAGE_IDX(start);
3109 eidx = SUBPAGE_IDX(end);
3110#if defined(DEBUG_SUBPAGE)
3111 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3112 mmio, start, end, idx, eidx, memory);
3113#endif
3114 memory >>= IO_MEM_SHIFT;
3115 for (; idx <= eidx; idx++) {
3116 for (i = 0; i < 4; i++) {
3117 if (io_mem_read[memory][i]) {
3118 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3119 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3120 }
3121 if (io_mem_write[memory][i]) {
3122 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3123 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3124 }
3125 }
3126 }
3127
3128 return 0;
3129}
3130
3131static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3132 ram_addr_t orig_memory)
3133{
3134 subpage_t *mmio;
3135 int subpage_memory;
3136
3137 mmio = qemu_mallocz(sizeof(subpage_t));
3138 if (mmio != NULL) {
3139 mmio->base = base;
3140 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3141#if defined(DEBUG_SUBPAGE)
3142 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3143 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3144#endif
3145 *phys = subpage_memory | IO_MEM_SUBPAGE;
3146 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
3147 }
3148
3149 return mmio;
3150}
3151
3152static void io_mem_init(void)
3153{
3154 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3155 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3156 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3157#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
3158 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3159 io_mem_nb = 6;
3160#else
3161 io_mem_nb = 5;
3162#endif
3163
3164 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3165 watch_mem_write, NULL);
3166
3167#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
3168 /* alloc dirty bits array */
3169 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3170 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
3171#endif /* !VBOX */
3172}
3173
3174/* mem_read and mem_write are arrays of functions containing the
3175 function to access byte (index 0), word (index 1) and dword (index
3176 2). Functions can be omitted with a NULL function pointer. The
3177 registered functions may be modified dynamically later.
3178 If io_index is non zero, the corresponding io zone is
3179 modified. If it is zero, a new io zone is allocated. The return
3180 value can be used with cpu_register_physical_memory(). (-1) is
3181 returned if error. */
3182int cpu_register_io_memory(int io_index,
3183 CPUReadMemoryFunc **mem_read,
3184 CPUWriteMemoryFunc **mem_write,
3185 void *opaque)
3186{
3187 int i, subwidth = 0;
3188
3189 if (io_index <= 0) {
3190 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
3191 return -1;
3192 io_index = io_mem_nb++;
3193 } else {
3194 if (io_index >= IO_MEM_NB_ENTRIES)
3195 return -1;
3196 }
3197
3198 for(i = 0;i < 3; i++) {
3199 if (!mem_read[i] || !mem_write[i])
3200 subwidth = IO_MEM_SUBWIDTH;
3201 io_mem_read[io_index][i] = mem_read[i];
3202 io_mem_write[io_index][i] = mem_write[i];
3203 }
3204 io_mem_opaque[io_index] = opaque;
3205 return (io_index << IO_MEM_SHIFT) | subwidth;
3206}
3207
3208CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3209{
3210 return io_mem_write[io_index >> IO_MEM_SHIFT];
3211}
3212
3213CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3214{
3215 return io_mem_read[io_index >> IO_MEM_SHIFT];
3216}
3217#endif /* !defined(CONFIG_USER_ONLY) */
3218
3219/* physical memory access (slow version, mainly for debug) */
3220#if defined(CONFIG_USER_ONLY)
3221void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3222 int len, int is_write)
3223{
3224 int l, flags;
3225 target_ulong page;
3226 void * p;
3227
3228 while (len > 0) {
3229 page = addr & TARGET_PAGE_MASK;
3230 l = (page + TARGET_PAGE_SIZE) - addr;
3231 if (l > len)
3232 l = len;
3233 flags = page_get_flags(page);
3234 if (!(flags & PAGE_VALID))
3235 return;
3236 if (is_write) {
3237 if (!(flags & PAGE_WRITE))
3238 return;
3239 /* XXX: this code should not depend on lock_user */
3240 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3241 /* FIXME - should this return an error rather than just fail? */
3242 return;
3243 memcpy(p, buf, len);
3244 unlock_user(p, addr, len);
3245 } else {
3246 if (!(flags & PAGE_READ))
3247 return;
3248 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3249 /* FIXME - should this return an error rather than just fail? */
3250 return;
3251 memcpy(buf, p, len);
3252 unlock_user(p, addr, 0);
3253 }
3254 len -= l;
3255 buf += l;
3256 addr += l;
3257 }
3258}
3259
3260#else
3261void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3262 int len, int is_write)
3263{
3264 int l, io_index;
3265 uint8_t *ptr;
3266 uint32_t val;
3267 target_phys_addr_t page;
3268 unsigned long pd;
3269 PhysPageDesc *p;
3270
3271 while (len > 0) {
3272 page = addr & TARGET_PAGE_MASK;
3273 l = (page + TARGET_PAGE_SIZE) - addr;
3274 if (l > len)
3275 l = len;
3276 p = phys_page_find(page >> TARGET_PAGE_BITS);
3277 if (!p) {
3278 pd = IO_MEM_UNASSIGNED;
3279 } else {
3280 pd = p->phys_offset;
3281 }
3282
3283 if (is_write) {
3284 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3285 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3286 /* XXX: could force cpu_single_env to NULL to avoid
3287 potential bugs */
3288 if (l >= 4 && ((addr & 3) == 0)) {
3289 /* 32 bit write access */
3290#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3291 val = ldl_p(buf);
3292#else
3293 val = *(const uint32_t *)buf;
3294#endif
3295 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3296 l = 4;
3297 } else if (l >= 2 && ((addr & 1) == 0)) {
3298 /* 16 bit write access */
3299#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3300 val = lduw_p(buf);
3301#else
3302 val = *(const uint16_t *)buf;
3303#endif
3304 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3305 l = 2;
3306 } else {
3307 /* 8 bit write access */
3308#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3309 val = ldub_p(buf);
3310#else
3311 val = *(const uint8_t *)buf;
3312#endif
3313 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
3314 l = 1;
3315 }
3316 } else {
3317 unsigned long addr1;
3318 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3319 /* RAM case */
3320#ifdef VBOX
3321 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3322#else
3323 ptr = phys_ram_base + addr1;
3324 memcpy(ptr, buf, l);
3325#endif
3326 if (!cpu_physical_memory_is_dirty(addr1)) {
3327 /* invalidate code */
3328 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3329 /* set dirty bit */
3330#ifdef VBOX
3331 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3332#endif
3333 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3334 (0xff & ~CODE_DIRTY_FLAG);
3335 }
3336 }
3337 } else {
3338 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3339 !(pd & IO_MEM_ROMD)) {
3340 /* I/O case */
3341 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3342 if (l >= 4 && ((addr & 3) == 0)) {
3343 /* 32 bit read access */
3344 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3345#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3346 stl_p(buf, val);
3347#else
3348 *(uint32_t *)buf = val;
3349#endif
3350 l = 4;
3351 } else if (l >= 2 && ((addr & 1) == 0)) {
3352 /* 16 bit read access */
3353 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3354#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3355 stw_p(buf, val);
3356#else
3357 *(uint16_t *)buf = val;
3358#endif
3359 l = 2;
3360 } else {
3361 /* 8 bit read access */
3362 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3363#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3364 stb_p(buf, val);
3365#else
3366 *(uint8_t *)buf = val;
3367#endif
3368 l = 1;
3369 }
3370 } else {
3371 /* RAM case */
3372#ifdef VBOX
3373 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3374#else
3375 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3376 (addr & ~TARGET_PAGE_MASK);
3377 memcpy(buf, ptr, l);
3378#endif
3379 }
3380 }
3381 len -= l;
3382 buf += l;
3383 addr += l;
3384 }
3385}
3386
3387#ifndef VBOX
3388/* used for ROM loading : can write in RAM and ROM */
3389void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3390 const uint8_t *buf, int len)
3391{
3392 int l;
3393 uint8_t *ptr;
3394 target_phys_addr_t page;
3395 unsigned long pd;
3396 PhysPageDesc *p;
3397
3398 while (len > 0) {
3399 page = addr & TARGET_PAGE_MASK;
3400 l = (page + TARGET_PAGE_SIZE) - addr;
3401 if (l > len)
3402 l = len;
3403 p = phys_page_find(page >> TARGET_PAGE_BITS);
3404 if (!p) {
3405 pd = IO_MEM_UNASSIGNED;
3406 } else {
3407 pd = p->phys_offset;
3408 }
3409
3410 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3411 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3412 !(pd & IO_MEM_ROMD)) {
3413 /* do nothing */
3414 } else {
3415 unsigned long addr1;
3416 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3417 /* ROM/RAM case */
3418 ptr = phys_ram_base + addr1;
3419 memcpy(ptr, buf, l);
3420 }
3421 len -= l;
3422 buf += l;
3423 addr += l;
3424 }
3425}
3426#endif /* !VBOX */
3427
3428
3429/* warning: addr must be aligned */
3430uint32_t ldl_phys(target_phys_addr_t addr)
3431{
3432 int io_index;
3433 uint8_t *ptr;
3434 uint32_t val;
3435 unsigned long pd;
3436 PhysPageDesc *p;
3437
3438 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3439 if (!p) {
3440 pd = IO_MEM_UNASSIGNED;
3441 } else {
3442 pd = p->phys_offset;
3443 }
3444
3445 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3446 !(pd & IO_MEM_ROMD)) {
3447 /* I/O case */
3448 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3449 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3450 } else {
3451 /* RAM case */
3452#ifndef VBOX
3453 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3454 (addr & ~TARGET_PAGE_MASK);
3455 val = ldl_p(ptr);
3456#else
3457 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3458#endif
3459 }
3460 return val;
3461}
3462
3463/* warning: addr must be aligned */
3464uint64_t ldq_phys(target_phys_addr_t addr)
3465{
3466 int io_index;
3467 uint8_t *ptr;
3468 uint64_t val;
3469 unsigned long pd;
3470 PhysPageDesc *p;
3471
3472 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3473 if (!p) {
3474 pd = IO_MEM_UNASSIGNED;
3475 } else {
3476 pd = p->phys_offset;
3477 }
3478
3479 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3480 !(pd & IO_MEM_ROMD)) {
3481 /* I/O case */
3482 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3483#ifdef TARGET_WORDS_BIGENDIAN
3484 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3485 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3486#else
3487 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3488 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3489#endif
3490 } else {
3491 /* RAM case */
3492#ifndef VBOX
3493 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3494 (addr & ~TARGET_PAGE_MASK);
3495 val = ldq_p(ptr);
3496#else
3497 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3498#endif
3499 }
3500 return val;
3501}
3502
3503/* XXX: optimize */
3504uint32_t ldub_phys(target_phys_addr_t addr)
3505{
3506 uint8_t val;
3507 cpu_physical_memory_read(addr, &val, 1);
3508 return val;
3509}
3510
3511/* XXX: optimize */
3512uint32_t lduw_phys(target_phys_addr_t addr)
3513{
3514 uint16_t val;
3515 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3516 return tswap16(val);
3517}
3518
3519/* warning: addr must be aligned. The ram page is not masked as dirty
3520 and the code inside is not invalidated. It is useful if the dirty
3521 bits are used to track modified PTEs */
3522void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3523{
3524 int io_index;
3525 uint8_t *ptr;
3526 unsigned long pd;
3527 PhysPageDesc *p;
3528
3529 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3530 if (!p) {
3531 pd = IO_MEM_UNASSIGNED;
3532 } else {
3533 pd = p->phys_offset;
3534 }
3535
3536 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3537 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3538 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3539 } else {
3540#ifndef VBOX
3541 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3542 (addr & ~TARGET_PAGE_MASK);
3543 stl_p(ptr, val);
3544#else
3545 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3546#endif
3547#ifndef VBOX
3548 if (unlikely(in_migration)) {
3549 if (!cpu_physical_memory_is_dirty(addr1)) {
3550 /* invalidate code */
3551 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3552 /* set dirty bit */
3553 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3554 (0xff & ~CODE_DIRTY_FLAG);
3555 }
3556 }
3557#endif
3558 }
3559}
3560
3561void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3562{
3563 int io_index;
3564 uint8_t *ptr;
3565 unsigned long pd;
3566 PhysPageDesc *p;
3567
3568 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3569 if (!p) {
3570 pd = IO_MEM_UNASSIGNED;
3571 } else {
3572 pd = p->phys_offset;
3573 }
3574
3575 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3576 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3577#ifdef TARGET_WORDS_BIGENDIAN
3578 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3579 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3580#else
3581 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3582 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3583#endif
3584 } else {
3585#ifndef VBOX
3586 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3587 (addr & ~TARGET_PAGE_MASK);
3588 stq_p(ptr, val);
3589#else
3590 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3591#endif
3592 }
3593}
3594
3595
3596/* warning: addr must be aligned */
3597void stl_phys(target_phys_addr_t addr, uint32_t val)
3598{
3599 int io_index;
3600 uint8_t *ptr;
3601 unsigned long pd;
3602 PhysPageDesc *p;
3603
3604 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3605 if (!p) {
3606 pd = IO_MEM_UNASSIGNED;
3607 } else {
3608 pd = p->phys_offset;
3609 }
3610
3611 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3612 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3613 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3614 } else {
3615 unsigned long addr1;
3616 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3617 /* RAM case */
3618#ifndef VBOX
3619 ptr = phys_ram_base + addr1;
3620 stl_p(ptr, val);
3621#else
3622 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3623#endif
3624 if (!cpu_physical_memory_is_dirty(addr1)) {
3625 /* invalidate code */
3626 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3627 /* set dirty bit */
3628#ifdef VBOX
3629 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3630#endif
3631 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3632 (0xff & ~CODE_DIRTY_FLAG);
3633 }
3634 }
3635}
3636
3637/* XXX: optimize */
3638void stb_phys(target_phys_addr_t addr, uint32_t val)
3639{
3640 uint8_t v = val;
3641 cpu_physical_memory_write(addr, &v, 1);
3642}
3643
3644/* XXX: optimize */
3645void stw_phys(target_phys_addr_t addr, uint32_t val)
3646{
3647 uint16_t v = tswap16(val);
3648 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3649}
3650
3651/* XXX: optimize */
3652void stq_phys(target_phys_addr_t addr, uint64_t val)
3653{
3654 val = tswap64(val);
3655 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3656}
3657
3658#endif
3659
3660/* virtual memory access for debug */
3661int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3662 uint8_t *buf, int len, int is_write)
3663{
3664 int l;
3665 target_ulong page, phys_addr;
3666
3667 while (len > 0) {
3668 page = addr & TARGET_PAGE_MASK;
3669 phys_addr = cpu_get_phys_page_debug(env, page);
3670 /* if no physical page mapped, return an error */
3671 if (phys_addr == -1)
3672 return -1;
3673 l = (page + TARGET_PAGE_SIZE) - addr;
3674 if (l > len)
3675 l = len;
3676 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3677 buf, l, is_write);
3678 len -= l;
3679 buf += l;
3680 addr += l;
3681 }
3682 return 0;
3683}
3684
3685/* in deterministic execution mode, instructions doing device I/Os
3686 must be at the end of the TB */
3687void cpu_io_recompile(CPUState *env, void *retaddr)
3688{
3689 TranslationBlock *tb;
3690 uint32_t n, cflags;
3691 target_ulong pc, cs_base;
3692 uint64_t flags;
3693
3694 tb = tb_find_pc((unsigned long)retaddr);
3695 if (!tb) {
3696 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3697 retaddr);
3698 }
3699 n = env->icount_decr.u16.low + tb->icount;
3700 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3701 /* Calculate how many instructions had been executed before the fault
3702 occurred. */
3703 n = n - env->icount_decr.u16.low;
3704 /* Generate a new TB ending on the I/O insn. */
3705 n++;
3706 /* On MIPS and SH, delay slot instructions can only be restarted if
3707 they were already the first instruction in the TB. If this is not
3708 the first instruction in a TB then re-execute the preceding
3709 branch. */
3710#if defined(TARGET_MIPS)
3711 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3712 env->active_tc.PC -= 4;
3713 env->icount_decr.u16.low++;
3714 env->hflags &= ~MIPS_HFLAG_BMASK;
3715 }
3716#elif defined(TARGET_SH4)
3717 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3718 && n > 1) {
3719 env->pc -= 2;
3720 env->icount_decr.u16.low++;
3721 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3722 }
3723#endif
3724 /* This should never happen. */
3725 if (n > CF_COUNT_MASK)
3726 cpu_abort(env, "TB too big during recompile");
3727
3728 cflags = n | CF_LAST_IO;
3729 pc = tb->pc;
3730 cs_base = tb->cs_base;
3731 flags = tb->flags;
3732 tb_phys_invalidate(tb, -1);
3733 /* FIXME: In theory this could raise an exception. In practice
3734 we have already translated the block once so it's probably ok. */
3735 tb_gen_code(env, pc, cs_base, flags, cflags);
3736 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3737 the first in the TB) then we end up generating a whole new TB and
3738 repeating the fault, which is horribly inefficient.
3739 Better would be to execute just this insn uncached, or generate a
3740 second new TB. */
3741 cpu_resume_from_signal(env, NULL);
3742}
3743
3744#ifndef VBOX
3745void dump_exec_info(FILE *f,
3746 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3747{
3748 int i, target_code_size, max_target_code_size;
3749 int direct_jmp_count, direct_jmp2_count, cross_page;
3750 TranslationBlock *tb;
3751
3752 target_code_size = 0;
3753 max_target_code_size = 0;
3754 cross_page = 0;
3755 direct_jmp_count = 0;
3756 direct_jmp2_count = 0;
3757 for(i = 0; i < nb_tbs; i++) {
3758 tb = &tbs[i];
3759 target_code_size += tb->size;
3760 if (tb->size > max_target_code_size)
3761 max_target_code_size = tb->size;
3762 if (tb->page_addr[1] != -1)
3763 cross_page++;
3764 if (tb->tb_next_offset[0] != 0xffff) {
3765 direct_jmp_count++;
3766 if (tb->tb_next_offset[1] != 0xffff) {
3767 direct_jmp2_count++;
3768 }
3769 }
3770 }
3771 /* XXX: avoid using doubles ? */
3772 cpu_fprintf(f, "Translation buffer state:\n");
3773 cpu_fprintf(f, "gen code size %ld/%ld\n",
3774 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3775 cpu_fprintf(f, "TB count %d/%d\n",
3776 nb_tbs, code_gen_max_blocks);
3777 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3778 nb_tbs ? target_code_size / nb_tbs : 0,
3779 max_target_code_size);
3780 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3781 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3782 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3783 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3784 cross_page,
3785 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3786 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3787 direct_jmp_count,
3788 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3789 direct_jmp2_count,
3790 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3791 cpu_fprintf(f, "\nStatistics:\n");
3792 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3793 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3794 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3795 tcg_dump_info(f, cpu_fprintf);
3796}
3797#endif /* !VBOX */
3798
3799#if !defined(CONFIG_USER_ONLY)
3800
3801#define MMUSUFFIX _cmmu
3802#define GETPC() NULL
3803#define env cpu_single_env
3804#define SOFTMMU_CODE_ACCESS
3805
3806#define SHIFT 0
3807#include "softmmu_template.h"
3808
3809#define SHIFT 1
3810#include "softmmu_template.h"
3811
3812#define SHIFT 2
3813#include "softmmu_template.h"
3814
3815#define SHIFT 3
3816#include "softmmu_template.h"
3817
3818#undef env
3819
3820#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette