VirtualBox

source: vbox/trunk/src/recompiler_new/exec.c@ 17659

Last change on this file since 17659 was 17420, checked in by vboxsync, 16 years ago

REM: fix problem in REM codegen buffer size initialization, leading to very small (1M) code buffer. Now use 8M always.

  • Property svn:eol-style set to native
File size: 113.1 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51#endif /* VBOX */
52
53#include "cpu.h"
54#include "exec-all.h"
55#if defined(CONFIG_USER_ONLY)
56#include <qemu.h>
57#endif
58
59//#define DEBUG_TB_INVALIDATE
60//#define DEBUG_FLUSH
61//#define DEBUG_TLB
62//#define DEBUG_UNASSIGNED
63
64/* make various TB consistency checks */
65//#define DEBUG_TB_CHECK
66//#define DEBUG_TLB_CHECK
67
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
73#define SMC_BITMAP_USE_THRESHOLD 10
74
75#define MMAP_AREA_START 0x00000000
76#define MMAP_AREA_END 0xa8000000
77
78#if defined(TARGET_SPARC64)
79#define TARGET_PHYS_ADDR_SPACE_BITS 41
80#elif defined(TARGET_SPARC)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
82#elif defined(TARGET_ALPHA)
83#define TARGET_PHYS_ADDR_SPACE_BITS 42
84#define TARGET_VIRT_ADDR_SPACE_BITS 42
85#elif defined(TARGET_PPC64)
86#define TARGET_PHYS_ADDR_SPACE_BITS 42
87#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
88#define TARGET_PHYS_ADDR_SPACE_BITS 42
89#elif defined(TARGET_I386) && !defined(USE_KQEMU)
90#define TARGET_PHYS_ADDR_SPACE_BITS 36
91#else
92/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
93#define TARGET_PHYS_ADDR_SPACE_BITS 32
94#endif
95
96static TranslationBlock *tbs;
97int code_gen_max_blocks;
98TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
99static int nb_tbs;
100/* any access to the tbs or the page table must use this lock */
101spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
102
103#ifndef VBOX
104#if defined(__arm__) || defined(__sparc_v9__)
105/* The prologue must be reachable with a direct jump. ARM and Sparc64
106 have limited branch ranges (possibly also PPC) so place it in a
107 section close to code segment. */
108#define code_gen_section \
109 __attribute__((__section__(".gen_code"))) \
110 __attribute__((aligned (32)))
111#else
112#define code_gen_section \
113 __attribute__((aligned (32)))
114#endif
115uint8_t code_gen_prologue[1024] code_gen_section;
116
117#else /* VBOX */
118extern uint8_t* code_gen_prologue;
119#endif /* VBOX */
120
121static uint8_t *code_gen_buffer;
122static unsigned long code_gen_buffer_size;
123/* threshold to flush the translated code buffer */
124static unsigned long code_gen_buffer_max_size;
125uint8_t *code_gen_ptr;
126
127#ifndef VBOX
128#if !defined(CONFIG_USER_ONLY)
129ram_addr_t phys_ram_size;
130int phys_ram_fd;
131uint8_t *phys_ram_base;
132uint8_t *phys_ram_dirty;
133static int in_migration;
134static ram_addr_t phys_ram_alloc_offset = 0;
135#endif
136#else /* VBOX */
137RTGCPHYS phys_ram_size;
138/* we have memory ranges (the high PC-BIOS mapping) which
139 causes some pages to fall outside the dirty map here. */
140RTGCPHYS phys_ram_dirty_size;
141#endif /* VBOX */
142#if !defined(VBOX)
143uint8_t *phys_ram_base;
144#endif
145uint8_t *phys_ram_dirty;
146
147CPUState *first_cpu;
148/* current CPU in the current thread. It is only valid inside
149 cpu_exec() */
150CPUState *cpu_single_env;
151/* 0 = Do not count executed instructions.
152 1 = Precise instruction counting.
153 2 = Adaptive rate instruction counting. */
154int use_icount = 0;
155/* Current instruction counter. While executing translated code this may
156 include some instructions that have not yet been executed. */
157int64_t qemu_icount;
158
159typedef struct PageDesc {
160 /* list of TBs intersecting this ram page */
161 TranslationBlock *first_tb;
162 /* in order to optimize self modifying code, we count the number
163 of lookups we do to a given page to use a bitmap */
164 unsigned int code_write_count;
165 uint8_t *code_bitmap;
166#if defined(CONFIG_USER_ONLY)
167 unsigned long flags;
168#endif
169} PageDesc;
170
171typedef struct PhysPageDesc {
172 /* offset in host memory of the page + io_index in the low 12 bits */
173 ram_addr_t phys_offset;
174} PhysPageDesc;
175
176#define L2_BITS 10
177#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
178/* XXX: this is a temporary hack for alpha target.
179 * In the future, this is to be replaced by a multi-level table
180 * to actually be able to handle the complete 64 bits address space.
181 */
182#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
183#else
184#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
185#endif
186
187#define L1_SIZE (1 << L1_BITS)
188#define L2_SIZE (1 << L2_BITS)
189
190static void io_mem_init(void);
191
192unsigned long qemu_real_host_page_size;
193unsigned long qemu_host_page_bits;
194unsigned long qemu_host_page_size;
195unsigned long qemu_host_page_mask;
196
197/* XXX: for system emulation, it could just be an array */
198static PageDesc *l1_map[L1_SIZE];
199static PhysPageDesc **l1_phys_map;
200
201#if !defined(CONFIG_USER_ONLY)
202static void io_mem_init(void);
203
204/* io memory support */
205CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
206CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
207void *io_mem_opaque[IO_MEM_NB_ENTRIES];
208static int io_mem_nb;
209static int io_mem_watch;
210#endif
211
212#ifndef VBOX
213/* log support */
214static const char *logfilename = "/tmp/qemu.log";
215#endif /* !VBOX */
216FILE *logfile;
217int loglevel;
218#ifndef VBOX
219static int log_append = 0;
220#endif
221
222/* statistics */
223static int tlb_flush_count;
224static int tb_flush_count;
225#ifndef VBOX
226static int tb_phys_invalidate_count;
227#endif /* !VBOX */
228
229#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
230typedef struct subpage_t {
231 target_phys_addr_t base;
232 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
233 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
234 void *opaque[TARGET_PAGE_SIZE][2][4];
235} subpage_t;
236
237
238#ifndef VBOX
239#ifdef _WIN32
240static void map_exec(void *addr, long size)
241{
242 DWORD old_protect;
243 VirtualProtect(addr, size,
244 PAGE_EXECUTE_READWRITE, &old_protect);
245
246}
247#else
248static void map_exec(void *addr, long size)
249{
250 unsigned long start, end, page_size;
251
252 page_size = getpagesize();
253 start = (unsigned long)addr;
254 start &= ~(page_size - 1);
255
256 end = (unsigned long)addr + size;
257 end += page_size - 1;
258 end &= ~(page_size - 1);
259
260 mprotect((void *)start, end - start,
261 PROT_READ | PROT_WRITE | PROT_EXEC);
262}
263#endif
264#else // VBOX
265static void map_exec(void *addr, long size)
266{
267 RTMemProtect(addr, size,
268 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
269}
270#endif
271
272static void page_init(void)
273{
274 /* NOTE: we can always suppose that qemu_host_page_size >=
275 TARGET_PAGE_SIZE */
276#ifdef VBOX
277 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
278 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
279 qemu_real_host_page_size = PAGE_SIZE;
280#else /* !VBOX */
281#ifdef _WIN32
282 {
283 SYSTEM_INFO system_info;
284 DWORD old_protect;
285
286 GetSystemInfo(&system_info);
287 qemu_real_host_page_size = system_info.dwPageSize;
288 }
289#else
290 qemu_real_host_page_size = getpagesize();
291#endif
292#endif /* !VBOX */
293
294 if (qemu_host_page_size == 0)
295 qemu_host_page_size = qemu_real_host_page_size;
296 if (qemu_host_page_size < TARGET_PAGE_SIZE)
297 qemu_host_page_size = TARGET_PAGE_SIZE;
298 qemu_host_page_bits = 0;
299#ifndef VBOX
300 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
301#else
302 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
303#endif
304 qemu_host_page_bits++;
305 qemu_host_page_mask = ~(qemu_host_page_size - 1);
306 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
307 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
308#ifdef VBOX
309 /* We use other means to set reserved bit on our pages */
310#else
311#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
312 {
313 long long startaddr, endaddr;
314 FILE *f;
315 int n;
316
317 mmap_lock();
318 last_brk = (unsigned long)sbrk(0);
319 f = fopen("/proc/self/maps", "r");
320 if (f) {
321 do {
322 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
323 if (n == 2) {
324 startaddr = MIN(startaddr,
325 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
326 endaddr = MIN(endaddr,
327 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
328 page_set_flags(startaddr & TARGET_PAGE_MASK,
329 TARGET_PAGE_ALIGN(endaddr),
330 PAGE_RESERVED);
331 }
332 } while (!feof(f));
333 fclose(f);
334 }
335 mmap_unlock();
336 }
337#endif
338#endif
339}
340
341#ifndef VBOX
342static inline PageDesc **page_l1_map(target_ulong index)
343#else
344DECLINLINE(PageDesc **) page_l1_map(target_ulong index)
345#endif
346{
347#if TARGET_LONG_BITS > 32
348 /* Host memory outside guest VM. For 32-bit targets we have already
349 excluded high addresses. */
350 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
351 return NULL;
352#endif
353 return &l1_map[index >> L2_BITS];
354}
355
356#ifndef VBOX
357static inline PageDesc *page_find_alloc(target_ulong index)
358#else
359DECLINLINE(PageDesc *) page_find_alloc(target_ulong index)
360#endif
361{
362 PageDesc **lp, *p;
363 lp = page_l1_map(index);
364 if (!lp)
365 return NULL;
366
367 p = *lp;
368 if (!p) {
369 /* allocate if not found */
370#if defined(CONFIG_USER_ONLY)
371 unsigned long addr;
372 size_t len = sizeof(PageDesc) * L2_SIZE;
373 /* Don't use qemu_malloc because it may recurse. */
374 p = mmap(0, len, PROT_READ | PROT_WRITE,
375 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
376 *lp = p;
377 addr = h2g(p);
378 if (addr == (target_ulong)addr) {
379 page_set_flags(addr & TARGET_PAGE_MASK,
380 TARGET_PAGE_ALIGN(addr + len),
381 PAGE_RESERVED);
382 }
383#else
384 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
385 *lp = p;
386#endif
387 }
388 return p + (index & (L2_SIZE - 1));
389}
390
391#ifndef VBOX
392static inline PageDesc *page_find(target_ulong index)
393#else
394DECLINLINE(PageDesc *) page_find(target_ulong index)
395#endif
396{
397 PageDesc **lp, *p;
398 lp = page_l1_map(index);
399 if (!lp)
400 return NULL;
401
402 p = *lp;
403 if (!p)
404 return 0;
405 return p + (index & (L2_SIZE - 1));
406}
407
408static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
409{
410 void **lp, **p;
411 PhysPageDesc *pd;
412
413 p = (void **)l1_phys_map;
414#if TARGET_PHYS_ADDR_SPACE_BITS > 32
415
416#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
417#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
418#endif
419 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
420 p = *lp;
421 if (!p) {
422 /* allocate if not found */
423 if (!alloc)
424 return NULL;
425 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
426 memset(p, 0, sizeof(void *) * L1_SIZE);
427 *lp = p;
428 }
429#endif
430 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
431 pd = *lp;
432 if (!pd) {
433 int i;
434 /* allocate if not found */
435 if (!alloc)
436 return NULL;
437 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
438 *lp = pd;
439 for (i = 0; i < L2_SIZE; i++)
440 pd[i].phys_offset = IO_MEM_UNASSIGNED;
441 }
442#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
443 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
444 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
445 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
446 return pd;
447#else
448 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
449#endif
450}
451
452#ifndef VBOX
453static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
454#else
455DECLINLINE(PhysPageDesc *) phys_page_find(target_phys_addr_t index)
456#endif
457{
458 return phys_page_find_alloc(index, 0);
459}
460
461#if !defined(CONFIG_USER_ONLY)
462static void tlb_protect_code(ram_addr_t ram_addr);
463static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
464 target_ulong vaddr);
465#define mmap_lock() do { } while(0)
466#define mmap_unlock() do { } while(0)
467#endif
468
469#ifdef VBOX
470/*
471 * We don't need such huge codegen buffer size, as execute most of the code
472 * in raw or hwacc mode
473 */
474#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
475#else
476#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
477#endif
478
479#if defined(CONFIG_USER_ONLY)
480/* Currently it is not recommanded to allocate big chunks of data in
481 user mode. It will change when a dedicated libc will be used */
482#define USE_STATIC_CODE_GEN_BUFFER
483#endif
484
485/* VBox allocates codegen buffer dynamically */
486#ifndef VBOX
487#ifdef USE_STATIC_CODE_GEN_BUFFER
488static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
489#endif
490#endif
491
492static void code_gen_alloc(unsigned long tb_size)
493{
494#ifdef USE_STATIC_CODE_GEN_BUFFER
495 code_gen_buffer = static_code_gen_buffer;
496 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
497 map_exec(code_gen_buffer, code_gen_buffer_size);
498#else
499#ifdef VBOX
500 /* We cannot use phys_ram_size here, as it's 0 now,
501 * it only gets initialized once RAM registration callback
502 * (REMR3NotifyPhysRamRegister()) called.
503 */
504 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
505#else
506 code_gen_buffer_size = tb_size;
507 if (code_gen_buffer_size == 0) {
508#if defined(CONFIG_USER_ONLY)
509 /* in user mode, phys_ram_size is not meaningful */
510 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
511#else
512 /* XXX: needs ajustments */
513 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
514#endif
515
516 }
517 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
518 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
519#endif /* VBOX */
520
521 /* The code gen buffer location may have constraints depending on
522 the host cpu and OS */
523#ifdef VBOX
524 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
525
526 if (!code_gen_buffer) {
527 LogRel(("REM: failed allocate codegen buffer %lld\n",
528 code_gen_buffer_size));
529 return;
530 }
531#else //!VBOX
532#if defined(__linux__)
533 {
534 int flags;
535 void *start = NULL;
536
537 flags = MAP_PRIVATE | MAP_ANONYMOUS;
538#if defined(__x86_64__)
539 flags |= MAP_32BIT;
540 /* Cannot map more than that */
541 if (code_gen_buffer_size > (800 * 1024 * 1024))
542 code_gen_buffer_size = (800 * 1024 * 1024);
543#elif defined(__sparc_v9__)
544 // Map the buffer below 2G, so we can use direct calls and branches
545 flags |= MAP_FIXED;
546 start = (void *) 0x60000000UL;
547 if (code_gen_buffer_size > (512 * 1024 * 1024))
548 code_gen_buffer_size = (512 * 1024 * 1024);
549#endif
550 code_gen_buffer = mmap(start, code_gen_buffer_size,
551 PROT_WRITE | PROT_READ | PROT_EXEC,
552 flags, -1, 0);
553 if (code_gen_buffer == MAP_FAILED) {
554 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
555 exit(1);
556 }
557 }
558#elif defined(__FreeBSD__)
559 {
560 int flags;
561 void *addr = NULL;
562 flags = MAP_PRIVATE | MAP_ANONYMOUS;
563#if defined(__x86_64__)
564 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
565 * 0x40000000 is free */
566 flags |= MAP_FIXED;
567 addr = (void *)0x40000000;
568 /* Cannot map more than that */
569 if (code_gen_buffer_size > (800 * 1024 * 1024))
570 code_gen_buffer_size = (800 * 1024 * 1024);
571#endif
572 code_gen_buffer = mmap(addr, code_gen_buffer_size,
573 PROT_WRITE | PROT_READ | PROT_EXEC,
574 flags, -1, 0);
575 if (code_gen_buffer == MAP_FAILED) {
576 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
577 exit(1);
578 }
579 }
580#else
581 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
582 if (!code_gen_buffer) {
583 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
584 exit(1);
585 }
586 map_exec(code_gen_buffer, code_gen_buffer_size);
587#endif
588 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
589#endif /* !VBOX */
590#endif /* !USE_STATIC_CODE_GEN_BUFFER */
591#ifndef VBOX
592 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
593#else
594 map_exec(code_gen_prologue, _1K);
595#endif
596
597 code_gen_buffer_max_size = code_gen_buffer_size -
598 code_gen_max_block_size();
599 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
600 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
601}
602
603/* Must be called before using the QEMU cpus. 'tb_size' is the size
604 (in bytes) allocated to the translation buffer. Zero means default
605 size. */
606void cpu_exec_init_all(unsigned long tb_size)
607{
608 cpu_gen_init();
609 code_gen_alloc(tb_size);
610 code_gen_ptr = code_gen_buffer;
611 page_init();
612#if !defined(CONFIG_USER_ONLY)
613 io_mem_init();
614#endif
615}
616
617#ifndef VBOX
618#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
619
620#define CPU_COMMON_SAVE_VERSION 1
621
622static void cpu_common_save(QEMUFile *f, void *opaque)
623{
624 CPUState *env = opaque;
625
626 qemu_put_be32s(f, &env->halted);
627 qemu_put_be32s(f, &env->interrupt_request);
628}
629
630static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
631{
632 CPUState *env = opaque;
633
634 if (version_id != CPU_COMMON_SAVE_VERSION)
635 return -EINVAL;
636
637 qemu_get_be32s(f, &env->halted);
638 qemu_get_be32s(f, &env->interrupt_request);
639 tlb_flush(env, 1);
640
641 return 0;
642}
643#endif
644#endif //!VBOX
645
646void cpu_exec_init(CPUState *env)
647{
648 CPUState **penv;
649 int cpu_index;
650
651 env->next_cpu = NULL;
652 penv = &first_cpu;
653 cpu_index = 0;
654 while (*penv != NULL) {
655 penv = (CPUState **)&(*penv)->next_cpu;
656 cpu_index++;
657 }
658 env->cpu_index = cpu_index;
659 env->nb_watchpoints = 0;
660 *penv = env;
661#ifndef VBOX
662#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
663 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
664 cpu_common_save, cpu_common_load, env);
665 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
666 cpu_save, cpu_load, env);
667#endif
668#endif // !VBOX
669}
670
671#ifndef VBOX
672static inline void invalidate_page_bitmap(PageDesc *p)
673#else
674DECLINLINE(void) invalidate_page_bitmap(PageDesc *p)
675#endif
676{
677 if (p->code_bitmap) {
678 qemu_free(p->code_bitmap);
679 p->code_bitmap = NULL;
680 }
681 p->code_write_count = 0;
682}
683
684/* set to NULL all the 'first_tb' fields in all PageDescs */
685static void page_flush_tb(void)
686{
687 int i, j;
688 PageDesc *p;
689
690 for(i = 0; i < L1_SIZE; i++) {
691 p = l1_map[i];
692 if (p) {
693 for(j = 0; j < L2_SIZE; j++) {
694 p->first_tb = NULL;
695 invalidate_page_bitmap(p);
696 p++;
697 }
698 }
699 }
700}
701
702/* flush all the translation blocks */
703/* XXX: tb_flush is currently not thread safe */
704void tb_flush(CPUState *env1)
705{
706 CPUState *env;
707#if defined(DEBUG_FLUSH)
708 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
709 (unsigned long)(code_gen_ptr - code_gen_buffer),
710 nb_tbs, nb_tbs > 0 ?
711 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
712#endif
713 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
714 cpu_abort(env1, "Internal error: code buffer overflow\n");
715
716 nb_tbs = 0;
717
718 for(env = first_cpu; env != NULL; env = env->next_cpu) {
719 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
720 }
721
722 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
723 page_flush_tb();
724
725 code_gen_ptr = code_gen_buffer;
726 /* XXX: flush processor icache at this point if cache flush is
727 expensive */
728 tb_flush_count++;
729}
730
731#ifdef DEBUG_TB_CHECK
732static void tb_invalidate_check(target_ulong address)
733{
734 TranslationBlock *tb;
735 int i;
736 address &= TARGET_PAGE_MASK;
737 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
738 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
739 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
740 address >= tb->pc + tb->size)) {
741 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
742 address, (long)tb->pc, tb->size);
743 }
744 }
745 }
746}
747
748/* verify that all the pages have correct rights for code */
749static void tb_page_check(void)
750{
751 TranslationBlock *tb;
752 int i, flags1, flags2;
753
754 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
755 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
756 flags1 = page_get_flags(tb->pc);
757 flags2 = page_get_flags(tb->pc + tb->size - 1);
758 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
759 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
760 (long)tb->pc, tb->size, flags1, flags2);
761 }
762 }
763 }
764}
765
766static void tb_jmp_check(TranslationBlock *tb)
767{
768 TranslationBlock *tb1;
769 unsigned int n1;
770
771 /* suppress any remaining jumps to this TB */
772 tb1 = tb->jmp_first;
773 for(;;) {
774 n1 = (long)tb1 & 3;
775 tb1 = (TranslationBlock *)((long)tb1 & ~3);
776 if (n1 == 2)
777 break;
778 tb1 = tb1->jmp_next[n1];
779 }
780 /* check end of list */
781 if (tb1 != tb) {
782 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
783 }
784}
785#endif // DEBUG_TB_CHECK
786
787/* invalidate one TB */
788#ifndef VBOX
789static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
790 int next_offset)
791#else
792DECLINLINE(void) tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
793 int next_offset)
794#endif
795{
796 TranslationBlock *tb1;
797 for(;;) {
798 tb1 = *ptb;
799 if (tb1 == tb) {
800 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
801 break;
802 }
803 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
804 }
805}
806
807#ifndef VBOX
808static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
809#else
810DECLINLINE(void) tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
811#endif
812{
813 TranslationBlock *tb1;
814 unsigned int n1;
815
816 for(;;) {
817 tb1 = *ptb;
818 n1 = (long)tb1 & 3;
819 tb1 = (TranslationBlock *)((long)tb1 & ~3);
820 if (tb1 == tb) {
821 *ptb = tb1->page_next[n1];
822 break;
823 }
824 ptb = &tb1->page_next[n1];
825 }
826}
827
828#ifndef VBOX
829static inline void tb_jmp_remove(TranslationBlock *tb, int n)
830#else
831DECLINLINE(void) tb_jmp_remove(TranslationBlock *tb, int n)
832#endif
833{
834 TranslationBlock *tb1, **ptb;
835 unsigned int n1;
836
837 ptb = &tb->jmp_next[n];
838 tb1 = *ptb;
839 if (tb1) {
840 /* find tb(n) in circular list */
841 for(;;) {
842 tb1 = *ptb;
843 n1 = (long)tb1 & 3;
844 tb1 = (TranslationBlock *)((long)tb1 & ~3);
845 if (n1 == n && tb1 == tb)
846 break;
847 if (n1 == 2) {
848 ptb = &tb1->jmp_first;
849 } else {
850 ptb = &tb1->jmp_next[n1];
851 }
852 }
853 /* now we can suppress tb(n) from the list */
854 *ptb = tb->jmp_next[n];
855
856 tb->jmp_next[n] = NULL;
857 }
858}
859
860/* reset the jump entry 'n' of a TB so that it is not chained to
861 another TB */
862#ifndef VBOX
863static inline void tb_reset_jump(TranslationBlock *tb, int n)
864#else
865DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
866#endif
867{
868 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
869}
870
871void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
872{
873 CPUState *env;
874 PageDesc *p;
875 unsigned int h, n1;
876 target_phys_addr_t phys_pc;
877 TranslationBlock *tb1, *tb2;
878
879 /* remove the TB from the hash list */
880 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
881 h = tb_phys_hash_func(phys_pc);
882 tb_remove(&tb_phys_hash[h], tb,
883 offsetof(TranslationBlock, phys_hash_next));
884
885 /* remove the TB from the page list */
886 if (tb->page_addr[0] != page_addr) {
887 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
888 tb_page_remove(&p->first_tb, tb);
889 invalidate_page_bitmap(p);
890 }
891 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
892 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
893 tb_page_remove(&p->first_tb, tb);
894 invalidate_page_bitmap(p);
895 }
896
897 tb_invalidated_flag = 1;
898
899 /* remove the TB from the hash list */
900 h = tb_jmp_cache_hash_func(tb->pc);
901 for(env = first_cpu; env != NULL; env = env->next_cpu) {
902 if (env->tb_jmp_cache[h] == tb)
903 env->tb_jmp_cache[h] = NULL;
904 }
905
906 /* suppress this TB from the two jump lists */
907 tb_jmp_remove(tb, 0);
908 tb_jmp_remove(tb, 1);
909
910 /* suppress any remaining jumps to this TB */
911 tb1 = tb->jmp_first;
912 for(;;) {
913 n1 = (long)tb1 & 3;
914 if (n1 == 2)
915 break;
916 tb1 = (TranslationBlock *)((long)tb1 & ~3);
917 tb2 = tb1->jmp_next[n1];
918 tb_reset_jump(tb1, n1);
919 tb1->jmp_next[n1] = NULL;
920 tb1 = tb2;
921 }
922 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
923
924#ifndef VBOX
925 tb_phys_invalidate_count++;
926#endif
927}
928
929
930#ifdef VBOX
931void tb_invalidate_virt(CPUState *env, uint32_t eip)
932{
933# if 1
934 tb_flush(env);
935# else
936 uint8_t *cs_base, *pc;
937 unsigned int flags, h, phys_pc;
938 TranslationBlock *tb, **ptb;
939
940 flags = env->hflags;
941 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
942 cs_base = env->segs[R_CS].base;
943 pc = cs_base + eip;
944
945 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
946 flags);
947
948 if(tb)
949 {
950# ifdef DEBUG
951 printf("invalidating TB (%08X) at %08X\n", tb, eip);
952# endif
953 tb_invalidate(tb);
954 //Note: this will leak TBs, but the whole cache will be flushed
955 // when it happens too often
956 tb->pc = 0;
957 tb->cs_base = 0;
958 tb->flags = 0;
959 }
960# endif
961}
962
963# ifdef VBOX_STRICT
964/**
965 * Gets the page offset.
966 */
967unsigned long get_phys_page_offset(target_ulong addr)
968{
969 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
970 return p ? p->phys_offset : 0;
971}
972# endif /* VBOX_STRICT */
973#endif /* VBOX */
974
975#ifndef VBOX
976static inline void set_bits(uint8_t *tab, int start, int len)
977#else
978DECLINLINE(void) set_bits(uint8_t *tab, int start, int len)
979#endif
980{
981 int end, mask, end1;
982
983 end = start + len;
984 tab += start >> 3;
985 mask = 0xff << (start & 7);
986 if ((start & ~7) == (end & ~7)) {
987 if (start < end) {
988 mask &= ~(0xff << (end & 7));
989 *tab |= mask;
990 }
991 } else {
992 *tab++ |= mask;
993 start = (start + 8) & ~7;
994 end1 = end & ~7;
995 while (start < end1) {
996 *tab++ = 0xff;
997 start += 8;
998 }
999 if (start < end) {
1000 mask = ~(0xff << (end & 7));
1001 *tab |= mask;
1002 }
1003 }
1004}
1005
1006static void build_page_bitmap(PageDesc *p)
1007{
1008 int n, tb_start, tb_end;
1009 TranslationBlock *tb;
1010
1011 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
1012 if (!p->code_bitmap)
1013 return;
1014 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
1015
1016 tb = p->first_tb;
1017 while (tb != NULL) {
1018 n = (long)tb & 3;
1019 tb = (TranslationBlock *)((long)tb & ~3);
1020 /* NOTE: this is subtle as a TB may span two physical pages */
1021 if (n == 0) {
1022 /* NOTE: tb_end may be after the end of the page, but
1023 it is not a problem */
1024 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1025 tb_end = tb_start + tb->size;
1026 if (tb_end > TARGET_PAGE_SIZE)
1027 tb_end = TARGET_PAGE_SIZE;
1028 } else {
1029 tb_start = 0;
1030 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1031 }
1032 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1033 tb = tb->page_next[n];
1034 }
1035}
1036
1037TranslationBlock *tb_gen_code(CPUState *env,
1038 target_ulong pc, target_ulong cs_base,
1039 int flags, int cflags)
1040{
1041 TranslationBlock *tb;
1042 uint8_t *tc_ptr;
1043 target_ulong phys_pc, phys_page2, virt_page2;
1044 int code_gen_size;
1045
1046 phys_pc = get_phys_addr_code(env, pc);
1047 tb = tb_alloc(pc);
1048 if (!tb) {
1049 /* flush must be done */
1050 tb_flush(env);
1051 /* cannot fail at this point */
1052 tb = tb_alloc(pc);
1053 /* Don't forget to invalidate previous TB info. */
1054 tb_invalidated_flag = 1;
1055 }
1056 tc_ptr = code_gen_ptr;
1057 tb->tc_ptr = tc_ptr;
1058 tb->cs_base = cs_base;
1059 tb->flags = flags;
1060 tb->cflags = cflags;
1061 cpu_gen_code(env, tb, &code_gen_size);
1062 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1063
1064 /* check next page if needed */
1065 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1066 phys_page2 = -1;
1067 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1068 phys_page2 = get_phys_addr_code(env, virt_page2);
1069 }
1070 tb_link_phys(tb, phys_pc, phys_page2);
1071 return tb;
1072}
1073
1074/* invalidate all TBs which intersect with the target physical page
1075 starting in range [start;end[. NOTE: start and end must refer to
1076 the same physical page. 'is_cpu_write_access' should be true if called
1077 from a real cpu write access: the virtual CPU will exit the current
1078 TB if code is modified inside this TB. */
1079void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1080 int is_cpu_write_access)
1081{
1082 int n, current_tb_modified, current_tb_not_found, current_flags;
1083 CPUState *env = cpu_single_env;
1084 PageDesc *p;
1085 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1086 target_ulong tb_start, tb_end;
1087 target_ulong current_pc, current_cs_base;
1088
1089 p = page_find(start >> TARGET_PAGE_BITS);
1090 if (!p)
1091 return;
1092 if (!p->code_bitmap &&
1093 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1094 is_cpu_write_access) {
1095 /* build code bitmap */
1096 build_page_bitmap(p);
1097 }
1098
1099 /* we remove all the TBs in the range [start, end[ */
1100 /* XXX: see if in some cases it could be faster to invalidate all the code */
1101 current_tb_not_found = is_cpu_write_access;
1102 current_tb_modified = 0;
1103 current_tb = NULL; /* avoid warning */
1104 current_pc = 0; /* avoid warning */
1105 current_cs_base = 0; /* avoid warning */
1106 current_flags = 0; /* avoid warning */
1107 tb = p->first_tb;
1108 while (tb != NULL) {
1109 n = (long)tb & 3;
1110 tb = (TranslationBlock *)((long)tb & ~3);
1111 tb_next = tb->page_next[n];
1112 /* NOTE: this is subtle as a TB may span two physical pages */
1113 if (n == 0) {
1114 /* NOTE: tb_end may be after the end of the page, but
1115 it is not a problem */
1116 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1117 tb_end = tb_start + tb->size;
1118 } else {
1119 tb_start = tb->page_addr[1];
1120 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1121 }
1122 if (!(tb_end <= start || tb_start >= end)) {
1123#ifdef TARGET_HAS_PRECISE_SMC
1124 if (current_tb_not_found) {
1125 current_tb_not_found = 0;
1126 current_tb = NULL;
1127 if (env->mem_io_pc) {
1128 /* now we have a real cpu fault */
1129 current_tb = tb_find_pc(env->mem_io_pc);
1130 }
1131 }
1132 if (current_tb == tb &&
1133 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1134 /* If we are modifying the current TB, we must stop
1135 its execution. We could be more precise by checking
1136 that the modification is after the current PC, but it
1137 would require a specialized function to partially
1138 restore the CPU state */
1139
1140 current_tb_modified = 1;
1141 cpu_restore_state(current_tb, env,
1142 env->mem_io_pc, NULL);
1143#if defined(TARGET_I386)
1144 current_flags = env->hflags;
1145 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1146 current_cs_base = (target_ulong)env->segs[R_CS].base;
1147 current_pc = current_cs_base + env->eip;
1148#else
1149#error unsupported CPU
1150#endif
1151 }
1152#endif /* TARGET_HAS_PRECISE_SMC */
1153 /* we need to do that to handle the case where a signal
1154 occurs while doing tb_phys_invalidate() */
1155 saved_tb = NULL;
1156 if (env) {
1157 saved_tb = env->current_tb;
1158 env->current_tb = NULL;
1159 }
1160 tb_phys_invalidate(tb, -1);
1161 if (env) {
1162 env->current_tb = saved_tb;
1163 if (env->interrupt_request && env->current_tb)
1164 cpu_interrupt(env, env->interrupt_request);
1165 }
1166 }
1167 tb = tb_next;
1168 }
1169#if !defined(CONFIG_USER_ONLY)
1170 /* if no code remaining, no need to continue to use slow writes */
1171 if (!p->first_tb) {
1172 invalidate_page_bitmap(p);
1173 if (is_cpu_write_access) {
1174 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1175 }
1176 }
1177#endif
1178#ifdef TARGET_HAS_PRECISE_SMC
1179 if (current_tb_modified) {
1180 /* we generate a block containing just the instruction
1181 modifying the memory. It will ensure that it cannot modify
1182 itself */
1183 env->current_tb = NULL;
1184 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1185 cpu_resume_from_signal(env, NULL);
1186 }
1187#endif
1188}
1189
1190
1191/* len must be <= 8 and start must be a multiple of len */
1192#ifndef VBOX
1193static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1194#else
1195DECLINLINE(void) tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1196#endif
1197{
1198 PageDesc *p;
1199 int offset, b;
1200#if 0
1201 if (1) {
1202 if (loglevel) {
1203 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1204 cpu_single_env->mem_io_vaddr, len,
1205 cpu_single_env->eip,
1206 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1207 }
1208 }
1209#endif
1210 p = page_find(start >> TARGET_PAGE_BITS);
1211 if (!p)
1212 return;
1213 if (p->code_bitmap) {
1214 offset = start & ~TARGET_PAGE_MASK;
1215 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1216 if (b & ((1 << len) - 1))
1217 goto do_invalidate;
1218 } else {
1219 do_invalidate:
1220 tb_invalidate_phys_page_range(start, start + len, 1);
1221 }
1222}
1223
1224
1225#if !defined(CONFIG_SOFTMMU)
1226static void tb_invalidate_phys_page(target_phys_addr_t addr,
1227 unsigned long pc, void *puc)
1228{
1229 int n, current_flags, current_tb_modified;
1230 target_ulong current_pc, current_cs_base;
1231 PageDesc *p;
1232 TranslationBlock *tb, *current_tb;
1233#ifdef TARGET_HAS_PRECISE_SMC
1234 CPUState *env = cpu_single_env;
1235#endif
1236
1237 addr &= TARGET_PAGE_MASK;
1238 p = page_find(addr >> TARGET_PAGE_BITS);
1239 if (!p)
1240 return;
1241 tb = p->first_tb;
1242 current_tb_modified = 0;
1243 current_tb = NULL;
1244 current_pc = 0; /* avoid warning */
1245 current_cs_base = 0; /* avoid warning */
1246 current_flags = 0; /* avoid warning */
1247#ifdef TARGET_HAS_PRECISE_SMC
1248 if (tb && pc != 0) {
1249 current_tb = tb_find_pc(pc);
1250 }
1251#endif
1252 while (tb != NULL) {
1253 n = (long)tb & 3;
1254 tb = (TranslationBlock *)((long)tb & ~3);
1255#ifdef TARGET_HAS_PRECISE_SMC
1256 if (current_tb == tb &&
1257 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1258 /* If we are modifying the current TB, we must stop
1259 its execution. We could be more precise by checking
1260 that the modification is after the current PC, but it
1261 would require a specialized function to partially
1262 restore the CPU state */
1263
1264 current_tb_modified = 1;
1265 cpu_restore_state(current_tb, env, pc, puc);
1266#if defined(TARGET_I386)
1267 current_flags = env->hflags;
1268 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1269 current_cs_base = (target_ulong)env->segs[R_CS].base;
1270 current_pc = current_cs_base + env->eip;
1271#else
1272#error unsupported CPU
1273#endif
1274 }
1275#endif /* TARGET_HAS_PRECISE_SMC */
1276 tb_phys_invalidate(tb, addr);
1277 tb = tb->page_next[n];
1278 }
1279 p->first_tb = NULL;
1280#ifdef TARGET_HAS_PRECISE_SMC
1281 if (current_tb_modified) {
1282 /* we generate a block containing just the instruction
1283 modifying the memory. It will ensure that it cannot modify
1284 itself */
1285 env->current_tb = NULL;
1286 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1287 cpu_resume_from_signal(env, puc);
1288 }
1289#endif
1290}
1291#endif
1292
1293/* add the tb in the target page and protect it if necessary */
1294#ifndef VBOX
1295static inline void tb_alloc_page(TranslationBlock *tb,
1296 unsigned int n, target_ulong page_addr)
1297#else
1298DECLINLINE(void) tb_alloc_page(TranslationBlock *tb,
1299 unsigned int n, target_ulong page_addr)
1300#endif
1301{
1302 PageDesc *p;
1303 TranslationBlock *last_first_tb;
1304
1305 tb->page_addr[n] = page_addr;
1306 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1307 tb->page_next[n] = p->first_tb;
1308 last_first_tb = p->first_tb;
1309 p->first_tb = (TranslationBlock *)((long)tb | n);
1310 invalidate_page_bitmap(p);
1311
1312#if defined(TARGET_HAS_SMC) || 1
1313
1314#if defined(CONFIG_USER_ONLY)
1315 if (p->flags & PAGE_WRITE) {
1316 target_ulong addr;
1317 PageDesc *p2;
1318 int prot;
1319
1320 /* force the host page as non writable (writes will have a
1321 page fault + mprotect overhead) */
1322 page_addr &= qemu_host_page_mask;
1323 prot = 0;
1324 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1325 addr += TARGET_PAGE_SIZE) {
1326
1327 p2 = page_find (addr >> TARGET_PAGE_BITS);
1328 if (!p2)
1329 continue;
1330 prot |= p2->flags;
1331 p2->flags &= ~PAGE_WRITE;
1332 page_get_flags(addr);
1333 }
1334 mprotect(g2h(page_addr), qemu_host_page_size,
1335 (prot & PAGE_BITS) & ~PAGE_WRITE);
1336#ifdef DEBUG_TB_INVALIDATE
1337 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1338 page_addr);
1339#endif
1340 }
1341#else
1342 /* if some code is already present, then the pages are already
1343 protected. So we handle the case where only the first TB is
1344 allocated in a physical page */
1345 if (!last_first_tb) {
1346 tlb_protect_code(page_addr);
1347 }
1348#endif
1349
1350#endif /* TARGET_HAS_SMC */
1351}
1352
1353/* Allocate a new translation block. Flush the translation buffer if
1354 too many translation blocks or too much generated code. */
1355TranslationBlock *tb_alloc(target_ulong pc)
1356{
1357 TranslationBlock *tb;
1358
1359 if (nb_tbs >= code_gen_max_blocks ||
1360#ifndef VBOX
1361 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1362#else
1363 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1364#endif
1365 return NULL;
1366 tb = &tbs[nb_tbs++];
1367 tb->pc = pc;
1368 tb->cflags = 0;
1369 return tb;
1370}
1371
1372void tb_free(TranslationBlock *tb)
1373{
1374 /* In practice this is mostly used for single use temporary TB
1375 Ignore the hard cases and just back up if this TB happens to
1376 be the last one generated. */
1377 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1378 code_gen_ptr = tb->tc_ptr;
1379 nb_tbs--;
1380 }
1381}
1382
1383/* add a new TB and link it to the physical page tables. phys_page2 is
1384 (-1) to indicate that only one page contains the TB. */
1385void tb_link_phys(TranslationBlock *tb,
1386 target_ulong phys_pc, target_ulong phys_page2)
1387{
1388 unsigned int h;
1389 TranslationBlock **ptb;
1390
1391 /* Grab the mmap lock to stop another thread invalidating this TB
1392 before we are done. */
1393 mmap_lock();
1394 /* add in the physical hash table */
1395 h = tb_phys_hash_func(phys_pc);
1396 ptb = &tb_phys_hash[h];
1397 tb->phys_hash_next = *ptb;
1398 *ptb = tb;
1399
1400 /* add in the page list */
1401 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1402 if (phys_page2 != -1)
1403 tb_alloc_page(tb, 1, phys_page2);
1404 else
1405 tb->page_addr[1] = -1;
1406
1407 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1408 tb->jmp_next[0] = NULL;
1409 tb->jmp_next[1] = NULL;
1410
1411 /* init original jump addresses */
1412 if (tb->tb_next_offset[0] != 0xffff)
1413 tb_reset_jump(tb, 0);
1414 if (tb->tb_next_offset[1] != 0xffff)
1415 tb_reset_jump(tb, 1);
1416
1417#ifdef DEBUG_TB_CHECK
1418 tb_page_check();
1419#endif
1420 mmap_unlock();
1421}
1422
1423/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1424 tb[1].tc_ptr. Return NULL if not found */
1425TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1426{
1427 int m_min, m_max, m;
1428 unsigned long v;
1429 TranslationBlock *tb;
1430
1431 if (nb_tbs <= 0)
1432 return NULL;
1433 if (tc_ptr < (unsigned long)code_gen_buffer ||
1434 tc_ptr >= (unsigned long)code_gen_ptr)
1435 return NULL;
1436 /* binary search (cf Knuth) */
1437 m_min = 0;
1438 m_max = nb_tbs - 1;
1439 while (m_min <= m_max) {
1440 m = (m_min + m_max) >> 1;
1441 tb = &tbs[m];
1442 v = (unsigned long)tb->tc_ptr;
1443 if (v == tc_ptr)
1444 return tb;
1445 else if (tc_ptr < v) {
1446 m_max = m - 1;
1447 } else {
1448 m_min = m + 1;
1449 }
1450 }
1451 return &tbs[m_max];
1452}
1453
1454static void tb_reset_jump_recursive(TranslationBlock *tb);
1455
1456#ifndef VBOX
1457static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1458#else
1459DECLINLINE(void) tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1460#endif
1461{
1462 TranslationBlock *tb1, *tb_next, **ptb;
1463 unsigned int n1;
1464
1465 tb1 = tb->jmp_next[n];
1466 if (tb1 != NULL) {
1467 /* find head of list */
1468 for(;;) {
1469 n1 = (long)tb1 & 3;
1470 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1471 if (n1 == 2)
1472 break;
1473 tb1 = tb1->jmp_next[n1];
1474 }
1475 /* we are now sure now that tb jumps to tb1 */
1476 tb_next = tb1;
1477
1478 /* remove tb from the jmp_first list */
1479 ptb = &tb_next->jmp_first;
1480 for(;;) {
1481 tb1 = *ptb;
1482 n1 = (long)tb1 & 3;
1483 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1484 if (n1 == n && tb1 == tb)
1485 break;
1486 ptb = &tb1->jmp_next[n1];
1487 }
1488 *ptb = tb->jmp_next[n];
1489 tb->jmp_next[n] = NULL;
1490
1491 /* suppress the jump to next tb in generated code */
1492 tb_reset_jump(tb, n);
1493
1494 /* suppress jumps in the tb on which we could have jumped */
1495 tb_reset_jump_recursive(tb_next);
1496 }
1497}
1498
1499static void tb_reset_jump_recursive(TranslationBlock *tb)
1500{
1501 tb_reset_jump_recursive2(tb, 0);
1502 tb_reset_jump_recursive2(tb, 1);
1503}
1504
1505#if defined(TARGET_HAS_ICE)
1506static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1507{
1508 target_ulong addr, pd;
1509 ram_addr_t ram_addr;
1510 PhysPageDesc *p;
1511
1512 addr = cpu_get_phys_page_debug(env, pc);
1513 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1514 if (!p) {
1515 pd = IO_MEM_UNASSIGNED;
1516 } else {
1517 pd = p->phys_offset;
1518 }
1519 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1520 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1521}
1522#endif
1523
1524/* Add a watchpoint. */
1525int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1526{
1527 int i;
1528
1529 for (i = 0; i < env->nb_watchpoints; i++) {
1530 if (addr == env->watchpoint[i].vaddr)
1531 return 0;
1532 }
1533 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1534 return -1;
1535
1536 i = env->nb_watchpoints++;
1537 env->watchpoint[i].vaddr = addr;
1538 env->watchpoint[i].type = type;
1539 tlb_flush_page(env, addr);
1540 /* FIXME: This flush is needed because of the hack to make memory ops
1541 terminate the TB. It can be removed once the proper IO trap and
1542 re-execute bits are in. */
1543 tb_flush(env);
1544 return i;
1545}
1546
1547/* Remove a watchpoint. */
1548int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1549{
1550 int i;
1551
1552 for (i = 0; i < env->nb_watchpoints; i++) {
1553 if (addr == env->watchpoint[i].vaddr) {
1554 env->nb_watchpoints--;
1555 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1556 tlb_flush_page(env, addr);
1557 return 0;
1558 }
1559 }
1560 return -1;
1561}
1562
1563/* Remove all watchpoints. */
1564void cpu_watchpoint_remove_all(CPUState *env) {
1565 int i;
1566
1567 for (i = 0; i < env->nb_watchpoints; i++) {
1568 tlb_flush_page(env, env->watchpoint[i].vaddr);
1569 }
1570 env->nb_watchpoints = 0;
1571}
1572
1573/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1574 breakpoint is reached */
1575int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1576{
1577#if defined(TARGET_HAS_ICE)
1578 int i;
1579
1580 for(i = 0; i < env->nb_breakpoints; i++) {
1581 if (env->breakpoints[i] == pc)
1582 return 0;
1583 }
1584
1585 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1586 return -1;
1587 env->breakpoints[env->nb_breakpoints++] = pc;
1588
1589 breakpoint_invalidate(env, pc);
1590 return 0;
1591#else
1592 return -1;
1593#endif
1594}
1595
1596/* remove all breakpoints */
1597void cpu_breakpoint_remove_all(CPUState *env) {
1598#if defined(TARGET_HAS_ICE)
1599 int i;
1600 for(i = 0; i < env->nb_breakpoints; i++) {
1601 breakpoint_invalidate(env, env->breakpoints[i]);
1602 }
1603 env->nb_breakpoints = 0;
1604#endif
1605}
1606
1607/* remove a breakpoint */
1608int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1609{
1610#if defined(TARGET_HAS_ICE)
1611 int i;
1612 for(i = 0; i < env->nb_breakpoints; i++) {
1613 if (env->breakpoints[i] == pc)
1614 goto found;
1615 }
1616 return -1;
1617 found:
1618 env->nb_breakpoints--;
1619 if (i < env->nb_breakpoints)
1620 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1621
1622 breakpoint_invalidate(env, pc);
1623 return 0;
1624#else
1625 return -1;
1626#endif
1627}
1628
1629/* enable or disable single step mode. EXCP_DEBUG is returned by the
1630 CPU loop after each instruction */
1631void cpu_single_step(CPUState *env, int enabled)
1632{
1633#if defined(TARGET_HAS_ICE)
1634 if (env->singlestep_enabled != enabled) {
1635 env->singlestep_enabled = enabled;
1636 /* must flush all the translated code to avoid inconsistancies */
1637 /* XXX: only flush what is necessary */
1638 tb_flush(env);
1639 }
1640#endif
1641}
1642
1643#ifndef VBOX
1644/* enable or disable low levels log */
1645void cpu_set_log(int log_flags)
1646{
1647 loglevel = log_flags;
1648 if (loglevel && !logfile) {
1649 logfile = fopen(logfilename, "w");
1650 if (!logfile) {
1651 perror(logfilename);
1652 _exit(1);
1653 }
1654#if !defined(CONFIG_SOFTMMU)
1655 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1656 {
1657 static uint8_t logfile_buf[4096];
1658 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1659 }
1660#else
1661 setvbuf(logfile, NULL, _IOLBF, 0);
1662#endif
1663 }
1664}
1665
1666void cpu_set_log_filename(const char *filename)
1667{
1668 logfilename = strdup(filename);
1669}
1670#endif /* !VBOX */
1671
1672/* mask must never be zero, except for A20 change call */
1673void cpu_interrupt(CPUState *env, int mask)
1674{
1675#if !defined(USE_NPTL)
1676 TranslationBlock *tb;
1677 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1678#endif
1679 int old_mask;
1680
1681 old_mask = env->interrupt_request;
1682#ifdef VBOX
1683 VM_ASSERT_EMT(env->pVM);
1684 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1685#else /* !VBOX */
1686 /* FIXME: This is probably not threadsafe. A different thread could
1687 be in the middle of a read-modify-write operation. */
1688 env->interrupt_request |= mask;
1689#endif /* !VBOX */
1690#if defined(USE_NPTL)
1691 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1692 problem and hope the cpu will stop of its own accord. For userspace
1693 emulation this often isn't actually as bad as it sounds. Often
1694 signals are used primarily to interrupt blocking syscalls. */
1695#else
1696 if (use_icount) {
1697 env->icount_decr.u16.high = 0xffff;
1698#ifndef CONFIG_USER_ONLY
1699 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1700 an async event happened and we need to process it. */
1701 if (!can_do_io(env)
1702 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1703 cpu_abort(env, "Raised interrupt while not in I/O function");
1704 }
1705#endif
1706 } else {
1707 tb = env->current_tb;
1708 /* if the cpu is currently executing code, we must unlink it and
1709 all the potentially executing TB */
1710 if (tb && !testandset(&interrupt_lock)) {
1711 env->current_tb = NULL;
1712 tb_reset_jump_recursive(tb);
1713 resetlock(&interrupt_lock);
1714 }
1715 }
1716#endif
1717}
1718
1719void cpu_reset_interrupt(CPUState *env, int mask)
1720{
1721#ifdef VBOX
1722 /*
1723 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1724 * for future changes!
1725 */
1726 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1727#else /* !VBOX */
1728 env->interrupt_request &= ~mask;
1729#endif /* !VBOX */
1730}
1731
1732#ifndef VBOX
1733CPULogItem cpu_log_items[] = {
1734 { CPU_LOG_TB_OUT_ASM, "out_asm",
1735 "show generated host assembly code for each compiled TB" },
1736 { CPU_LOG_TB_IN_ASM, "in_asm",
1737 "show target assembly code for each compiled TB" },
1738 { CPU_LOG_TB_OP, "op",
1739 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1740#ifdef TARGET_I386
1741 { CPU_LOG_TB_OP_OPT, "op_opt",
1742 "show micro ops after optimization for each compiled TB" },
1743#endif
1744 { CPU_LOG_INT, "int",
1745 "show interrupts/exceptions in short format" },
1746 { CPU_LOG_EXEC, "exec",
1747 "show trace before each executed TB (lots of logs)" },
1748 { CPU_LOG_TB_CPU, "cpu",
1749 "show CPU state before bloc translation" },
1750#ifdef TARGET_I386
1751 { CPU_LOG_PCALL, "pcall",
1752 "show protected mode far calls/returns/exceptions" },
1753#endif
1754#ifdef DEBUG_IOPORT
1755 { CPU_LOG_IOPORT, "ioport",
1756 "show all i/o ports accesses" },
1757#endif
1758 { 0, NULL, NULL },
1759};
1760
1761static int cmp1(const char *s1, int n, const char *s2)
1762{
1763 if (strlen(s2) != n)
1764 return 0;
1765 return memcmp(s1, s2, n) == 0;
1766}
1767
1768/* takes a comma separated list of log masks. Return 0 if error. */
1769int cpu_str_to_log_mask(const char *str)
1770{
1771 CPULogItem *item;
1772 int mask;
1773 const char *p, *p1;
1774
1775 p = str;
1776 mask = 0;
1777 for(;;) {
1778 p1 = strchr(p, ',');
1779 if (!p1)
1780 p1 = p + strlen(p);
1781 if(cmp1(p,p1-p,"all")) {
1782 for(item = cpu_log_items; item->mask != 0; item++) {
1783 mask |= item->mask;
1784 }
1785 } else {
1786 for(item = cpu_log_items; item->mask != 0; item++) {
1787 if (cmp1(p, p1 - p, item->name))
1788 goto found;
1789 }
1790 return 0;
1791 }
1792 found:
1793 mask |= item->mask;
1794 if (*p1 != ',')
1795 break;
1796 p = p1 + 1;
1797 }
1798 return mask;
1799}
1800#endif /* !VBOX */
1801
1802#ifndef VBOX /* VBOX: we have our own routine. */
1803void cpu_abort(CPUState *env, const char *fmt, ...)
1804{
1805 va_list ap;
1806
1807 va_start(ap, fmt);
1808 fprintf(stderr, "qemu: fatal: ");
1809 vfprintf(stderr, fmt, ap);
1810 fprintf(stderr, "\n");
1811#ifdef TARGET_I386
1812 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1813#else
1814 cpu_dump_state(env, stderr, fprintf, 0);
1815#endif
1816 va_end(ap);
1817 abort();
1818}
1819#endif /* !VBOX */
1820
1821#ifndef VBOX
1822CPUState *cpu_copy(CPUState *env)
1823{
1824 CPUState *new_env = cpu_init(env->cpu_model_str);
1825 /* preserve chaining and index */
1826 CPUState *next_cpu = new_env->next_cpu;
1827 int cpu_index = new_env->cpu_index;
1828 memcpy(new_env, env, sizeof(CPUState));
1829 new_env->next_cpu = next_cpu;
1830 new_env->cpu_index = cpu_index;
1831 return new_env;
1832}
1833#endif
1834
1835#if !defined(CONFIG_USER_ONLY)
1836
1837#ifndef VBOX
1838static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1839#else
1840DECLINLINE(void) tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1841#endif
1842{
1843 unsigned int i;
1844
1845 /* Discard jump cache entries for any tb which might potentially
1846 overlap the flushed page. */
1847 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1848 memset (&env->tb_jmp_cache[i], 0,
1849 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1850
1851 i = tb_jmp_cache_hash_page(addr);
1852 memset (&env->tb_jmp_cache[i], 0,
1853 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1854
1855#ifdef VBOX
1856 /* inform raw mode about TLB page flush */
1857 remR3FlushPage(env, addr);
1858#endif /* VBOX */
1859}
1860
1861/* NOTE: if flush_global is true, also flush global entries (not
1862 implemented yet) */
1863void tlb_flush(CPUState *env, int flush_global)
1864{
1865 int i;
1866#if defined(DEBUG_TLB)
1867 printf("tlb_flush:\n");
1868#endif
1869 /* must reset current TB so that interrupts cannot modify the
1870 links while we are modifying them */
1871 env->current_tb = NULL;
1872
1873 for(i = 0; i < CPU_TLB_SIZE; i++) {
1874 env->tlb_table[0][i].addr_read = -1;
1875 env->tlb_table[0][i].addr_write = -1;
1876 env->tlb_table[0][i].addr_code = -1;
1877 env->tlb_table[1][i].addr_read = -1;
1878 env->tlb_table[1][i].addr_write = -1;
1879 env->tlb_table[1][i].addr_code = -1;
1880#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1881 env->phys_addends[0][i] = -1;
1882 env->phys_addends[1][i] = -1;
1883#endif
1884#if (NB_MMU_MODES >= 3)
1885 env->tlb_table[2][i].addr_read = -1;
1886 env->tlb_table[2][i].addr_write = -1;
1887 env->tlb_table[2][i].addr_code = -1;
1888#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1889 env->phys_addends[2][i] = -1;
1890#endif
1891#if (NB_MMU_MODES == 4)
1892 env->tlb_table[3][i].addr_read = -1;
1893 env->tlb_table[3][i].addr_write = -1;
1894 env->tlb_table[3][i].addr_code = -1;
1895#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1896 env->phys_addends[3][i] = -1;
1897#endif
1898#endif
1899#endif
1900 }
1901
1902 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1903
1904#ifdef VBOX
1905 /* inform raw mode about TLB flush */
1906 remR3FlushTLB(env, flush_global);
1907#endif
1908#ifdef USE_KQEMU
1909 if (env->kqemu_enabled) {
1910 kqemu_flush(env, flush_global);
1911 }
1912#endif
1913 tlb_flush_count++;
1914}
1915
1916#ifndef VBOX
1917static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1918#else
1919DECLINLINE(void) tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1920#endif
1921{
1922 if (addr == (tlb_entry->addr_read &
1923 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1924 addr == (tlb_entry->addr_write &
1925 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1926 addr == (tlb_entry->addr_code &
1927 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1928 tlb_entry->addr_read = -1;
1929 tlb_entry->addr_write = -1;
1930 tlb_entry->addr_code = -1;
1931 }
1932}
1933
1934void tlb_flush_page(CPUState *env, target_ulong addr)
1935{
1936 int i;
1937
1938#if defined(DEBUG_TLB)
1939 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1940#endif
1941 /* must reset current TB so that interrupts cannot modify the
1942 links while we are modifying them */
1943 env->current_tb = NULL;
1944
1945 addr &= TARGET_PAGE_MASK;
1946 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1947 tlb_flush_entry(&env->tlb_table[0][i], addr);
1948 tlb_flush_entry(&env->tlb_table[1][i], addr);
1949#if (NB_MMU_MODES >= 3)
1950 tlb_flush_entry(&env->tlb_table[2][i], addr);
1951#if (NB_MMU_MODES == 4)
1952 tlb_flush_entry(&env->tlb_table[3][i], addr);
1953#endif
1954#endif
1955
1956 tlb_flush_jmp_cache(env, addr);
1957
1958#ifdef USE_KQEMU
1959 if (env->kqemu_enabled) {
1960 kqemu_flush_page(env, addr);
1961 }
1962#endif
1963}
1964
1965/* update the TLBs so that writes to code in the virtual page 'addr'
1966 can be detected */
1967static void tlb_protect_code(ram_addr_t ram_addr)
1968{
1969 cpu_physical_memory_reset_dirty(ram_addr,
1970 ram_addr + TARGET_PAGE_SIZE,
1971 CODE_DIRTY_FLAG);
1972#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1973 /** @todo Retest this? This function has changed... */
1974 remR3ProtectCode(cpu_single_env, ram_addr);
1975#endif
1976}
1977
1978/* update the TLB so that writes in physical page 'phys_addr' are no longer
1979 tested for self modifying code */
1980static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1981 target_ulong vaddr)
1982{
1983#ifdef VBOX
1984 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1985#endif
1986 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1987}
1988
1989#ifndef VBOX
1990static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1991 unsigned long start, unsigned long length)
1992#else
1993DECLINLINE(void) tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1994 unsigned long start, unsigned long length)
1995#endif
1996{
1997 unsigned long addr;
1998
1999#ifdef VBOX
2000 if (start & 3)
2001 return;
2002#endif
2003 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2004 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2005 if ((addr - start) < length) {
2006 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
2007 }
2008 }
2009}
2010
2011void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2012 int dirty_flags)
2013{
2014 CPUState *env;
2015 unsigned long length, start1;
2016 int i, mask, len;
2017 uint8_t *p;
2018
2019 start &= TARGET_PAGE_MASK;
2020 end = TARGET_PAGE_ALIGN(end);
2021
2022 length = end - start;
2023 if (length == 0)
2024 return;
2025 len = length >> TARGET_PAGE_BITS;
2026#ifdef USE_KQEMU
2027 /* XXX: should not depend on cpu context */
2028 env = first_cpu;
2029 if (env->kqemu_enabled) {
2030 ram_addr_t addr;
2031 addr = start;
2032 for(i = 0; i < len; i++) {
2033 kqemu_set_notdirty(env, addr);
2034 addr += TARGET_PAGE_SIZE;
2035 }
2036 }
2037#endif
2038 mask = ~dirty_flags;
2039 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2040#ifdef VBOX
2041 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2042#endif
2043 for(i = 0; i < len; i++)
2044 p[i] &= mask;
2045
2046 /* we modify the TLB cache so that the dirty bit will be set again
2047 when accessing the range */
2048#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2049 start1 = start;
2050#elif !defined(VBOX)
2051 start1 = start + (unsigned long)phys_ram_base;
2052#else
2053 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo this can be harmful with VBOX_WITH_NEW_PHYS_CODE, fix interface/whatever. */
2054#endif
2055 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2056 for(i = 0; i < CPU_TLB_SIZE; i++)
2057 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2058 for(i = 0; i < CPU_TLB_SIZE; i++)
2059 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2060#if (NB_MMU_MODES >= 3)
2061 for(i = 0; i < CPU_TLB_SIZE; i++)
2062 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2063#if (NB_MMU_MODES == 4)
2064 for(i = 0; i < CPU_TLB_SIZE; i++)
2065 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2066#endif
2067#endif
2068 }
2069}
2070
2071#ifndef VBOX
2072int cpu_physical_memory_set_dirty_tracking(int enable)
2073{
2074 in_migration = enable;
2075 return 0;
2076}
2077
2078int cpu_physical_memory_get_dirty_tracking(void)
2079{
2080 return in_migration;
2081}
2082#endif
2083
2084#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2085DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2086#else
2087static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2088#endif
2089{
2090 ram_addr_t ram_addr;
2091
2092 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2093 /* RAM case */
2094#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2095 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2096#elif !defined(VBOX)
2097 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2098 tlb_entry->addend - (unsigned long)phys_ram_base;
2099#else
2100 Assert(phys_addend != -1);
2101 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2102#endif
2103 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2104 tlb_entry->addr_write |= TLB_NOTDIRTY;
2105 }
2106 }
2107}
2108
2109/* update the TLB according to the current state of the dirty bits */
2110void cpu_tlb_update_dirty(CPUState *env)
2111{
2112 int i;
2113#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2114 for(i = 0; i < CPU_TLB_SIZE; i++)
2115 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
2116 for(i = 0; i < CPU_TLB_SIZE; i++)
2117 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
2118#if (NB_MMU_MODES >= 3)
2119 for(i = 0; i < CPU_TLB_SIZE; i++)
2120 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
2121#if (NB_MMU_MODES == 4)
2122 for(i = 0; i < CPU_TLB_SIZE; i++)
2123 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
2124#endif
2125#endif
2126#else /* VBOX */
2127 for(i = 0; i < CPU_TLB_SIZE; i++)
2128 tlb_update_dirty(&env->tlb_table[0][i]);
2129 for(i = 0; i < CPU_TLB_SIZE; i++)
2130 tlb_update_dirty(&env->tlb_table[1][i]);
2131#if (NB_MMU_MODES >= 3)
2132 for(i = 0; i < CPU_TLB_SIZE; i++)
2133 tlb_update_dirty(&env->tlb_table[2][i]);
2134#if (NB_MMU_MODES == 4)
2135 for(i = 0; i < CPU_TLB_SIZE; i++)
2136 tlb_update_dirty(&env->tlb_table[3][i]);
2137#endif
2138#endif
2139#endif /* VBOX */
2140}
2141
2142#ifndef VBOX
2143static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2144#else
2145DECLINLINE(void) tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2146#endif
2147{
2148 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2149 tlb_entry->addr_write = vaddr;
2150}
2151
2152
2153/* update the TLB corresponding to virtual page vaddr and phys addr
2154 addr so that it is no longer dirty */
2155#ifndef VBOX
2156static inline void tlb_set_dirty(CPUState *env,
2157 unsigned long addr, target_ulong vaddr)
2158#else
2159DECLINLINE(void) tlb_set_dirty(CPUState *env,
2160 unsigned long addr, target_ulong vaddr)
2161#endif
2162{
2163 int i;
2164
2165 addr &= TARGET_PAGE_MASK;
2166 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2167 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2168 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2169#if (NB_MMU_MODES >= 3)
2170 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2171#if (NB_MMU_MODES == 4)
2172 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2173#endif
2174#endif
2175}
2176
2177/* add a new TLB entry. At most one entry for a given virtual address
2178 is permitted. Return 0 if OK or 2 if the page could not be mapped
2179 (can only happen in non SOFTMMU mode for I/O pages or pages
2180 conflicting with the host address space). */
2181int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2182 target_phys_addr_t paddr, int prot,
2183 int mmu_idx, int is_softmmu)
2184{
2185 PhysPageDesc *p;
2186 unsigned long pd;
2187 unsigned int index;
2188 target_ulong address;
2189 target_ulong code_address;
2190 target_phys_addr_t addend;
2191 int ret;
2192 CPUTLBEntry *te;
2193 int i;
2194 target_phys_addr_t iotlb;
2195#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2196 int read_mods = 0, write_mods = 0, code_mods = 0;
2197#endif
2198
2199 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2200 if (!p) {
2201 pd = IO_MEM_UNASSIGNED;
2202 } else {
2203 pd = p->phys_offset;
2204 }
2205#if defined(DEBUG_TLB)
2206 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2207 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2208#endif
2209
2210 ret = 0;
2211 address = vaddr;
2212 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2213 /* IO memory case (romd handled later) */
2214 address |= TLB_MMIO;
2215 }
2216#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2217 addend = pd & TARGET_PAGE_MASK;
2218#elif !defined(VBOX)
2219 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2220#else
2221 /** @todo this is racing the phys_page_find call above since it may register
2222 * a new chunk of memory... */
2223 addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
2224 pd & TARGET_PAGE_MASK,
2225 !!(prot & PAGE_WRITE));
2226#endif
2227
2228 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2229 /* Normal RAM. */
2230 iotlb = pd & TARGET_PAGE_MASK;
2231 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2232 iotlb |= IO_MEM_NOTDIRTY;
2233 else
2234 iotlb |= IO_MEM_ROM;
2235 } else {
2236 /* IO handlers are currently passed a phsical address.
2237 It would be nice to pass an offset from the base address
2238 of that region. This would avoid having to special case RAM,
2239 and avoid full address decoding in every device.
2240 We can't use the high bits of pd for this because
2241 IO_MEM_ROMD uses these as a ram address. */
2242 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2243 }
2244
2245 code_address = address;
2246
2247#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2248 if (addend & 0x3)
2249 {
2250 if (addend & 0x2)
2251 {
2252 /* catch write */
2253 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2254 write_mods |= TLB_MMIO;
2255 }
2256 else if (addend & 0x1)
2257 {
2258 /* catch all */
2259 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2260 {
2261 read_mods |= TLB_MMIO;
2262 write_mods |= TLB_MMIO;
2263 code_mods |= TLB_MMIO;
2264 }
2265 }
2266 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2267 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2268 addend &= ~(target_ulong)0x3;
2269 }
2270#endif
2271
2272 /* Make accesses to pages with watchpoints go via the
2273 watchpoint trap routines. */
2274 for (i = 0; i < env->nb_watchpoints; i++) {
2275 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2276 iotlb = io_mem_watch + paddr;
2277 /* TODO: The memory case can be optimized by not trapping
2278 reads of pages with a write breakpoint. */
2279 address |= TLB_MMIO;
2280 }
2281 }
2282
2283 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2284 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2285 te = &env->tlb_table[mmu_idx][index];
2286 te->addend = addend - vaddr;
2287 if (prot & PAGE_READ) {
2288 te->addr_read = address;
2289 } else {
2290 te->addr_read = -1;
2291 }
2292
2293 if (prot & PAGE_EXEC) {
2294 te->addr_code = code_address;
2295 } else {
2296 te->addr_code = -1;
2297 }
2298 if (prot & PAGE_WRITE) {
2299 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2300 (pd & IO_MEM_ROMD)) {
2301 /* Write access calls the I/O callback. */
2302 te->addr_write = address | TLB_MMIO;
2303 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2304 !cpu_physical_memory_is_dirty(pd)) {
2305 te->addr_write = address | TLB_NOTDIRTY;
2306 } else {
2307 te->addr_write = address;
2308 }
2309 } else {
2310 te->addr_write = -1;
2311 }
2312
2313#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2314 if (prot & PAGE_READ)
2315 te->addr_read |= read_mods;
2316 if (prot & PAGE_EXEC)
2317 te->addr_code |= code_mods;
2318 if (prot & PAGE_WRITE)
2319 te->addr_write |= write_mods;
2320
2321 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2322#endif
2323
2324#ifdef VBOX
2325 /* inform raw mode about TLB page change */
2326 remR3FlushPage(env, vaddr);
2327#endif
2328 return ret;
2329}
2330#if 0
2331/* called from signal handler: invalidate the code and unprotect the
2332 page. Return TRUE if the fault was succesfully handled. */
2333int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2334{
2335#if !defined(CONFIG_SOFTMMU)
2336 VirtPageDesc *vp;
2337
2338#if defined(DEBUG_TLB)
2339 printf("page_unprotect: addr=0x%08x\n", addr);
2340#endif
2341 addr &= TARGET_PAGE_MASK;
2342
2343 /* if it is not mapped, no need to worry here */
2344 if (addr >= MMAP_AREA_END)
2345 return 0;
2346 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2347 if (!vp)
2348 return 0;
2349 /* NOTE: in this case, validate_tag is _not_ tested as it
2350 validates only the code TLB */
2351 if (vp->valid_tag != virt_valid_tag)
2352 return 0;
2353 if (!(vp->prot & PAGE_WRITE))
2354 return 0;
2355#if defined(DEBUG_TLB)
2356 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2357 addr, vp->phys_addr, vp->prot);
2358#endif
2359 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2360 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2361 (unsigned long)addr, vp->prot);
2362 /* set the dirty bit */
2363 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2364 /* flush the code inside */
2365 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2366 return 1;
2367#elif defined(VBOX)
2368 addr &= TARGET_PAGE_MASK;
2369
2370 /* if it is not mapped, no need to worry here */
2371 if (addr >= MMAP_AREA_END)
2372 return 0;
2373 return 1;
2374#else
2375 return 0;
2376#endif
2377}
2378#endif /* 0 */
2379
2380#else
2381
2382void tlb_flush(CPUState *env, int flush_global)
2383{
2384}
2385
2386void tlb_flush_page(CPUState *env, target_ulong addr)
2387{
2388}
2389
2390int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2391 target_phys_addr_t paddr, int prot,
2392 int mmu_idx, int is_softmmu)
2393{
2394 return 0;
2395}
2396
2397#ifndef VBOX
2398/* dump memory mappings */
2399void page_dump(FILE *f)
2400{
2401 unsigned long start, end;
2402 int i, j, prot, prot1;
2403 PageDesc *p;
2404
2405 fprintf(f, "%-8s %-8s %-8s %s\n",
2406 "start", "end", "size", "prot");
2407 start = -1;
2408 end = -1;
2409 prot = 0;
2410 for(i = 0; i <= L1_SIZE; i++) {
2411 if (i < L1_SIZE)
2412 p = l1_map[i];
2413 else
2414 p = NULL;
2415 for(j = 0;j < L2_SIZE; j++) {
2416 if (!p)
2417 prot1 = 0;
2418 else
2419 prot1 = p[j].flags;
2420 if (prot1 != prot) {
2421 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2422 if (start != -1) {
2423 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2424 start, end, end - start,
2425 prot & PAGE_READ ? 'r' : '-',
2426 prot & PAGE_WRITE ? 'w' : '-',
2427 prot & PAGE_EXEC ? 'x' : '-');
2428 }
2429 if (prot1 != 0)
2430 start = end;
2431 else
2432 start = -1;
2433 prot = prot1;
2434 }
2435 if (!p)
2436 break;
2437 }
2438 }
2439}
2440#endif /* !VBOX */
2441
2442int page_get_flags(target_ulong address)
2443{
2444 PageDesc *p;
2445
2446 p = page_find(address >> TARGET_PAGE_BITS);
2447 if (!p)
2448 return 0;
2449 return p->flags;
2450}
2451
2452/* modify the flags of a page and invalidate the code if
2453 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2454 depending on PAGE_WRITE */
2455void page_set_flags(target_ulong start, target_ulong end, int flags)
2456{
2457 PageDesc *p;
2458 target_ulong addr;
2459
2460 start = start & TARGET_PAGE_MASK;
2461 end = TARGET_PAGE_ALIGN(end);
2462 if (flags & PAGE_WRITE)
2463 flags |= PAGE_WRITE_ORG;
2464#ifdef VBOX
2465 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2466#endif
2467 spin_lock(&tb_lock);
2468 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2469 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2470 /* if the write protection is set, then we invalidate the code
2471 inside */
2472 if (!(p->flags & PAGE_WRITE) &&
2473 (flags & PAGE_WRITE) &&
2474 p->first_tb) {
2475 tb_invalidate_phys_page(addr, 0, NULL);
2476 }
2477 p->flags = flags;
2478 }
2479 spin_unlock(&tb_lock);
2480}
2481
2482int page_check_range(target_ulong start, target_ulong len, int flags)
2483{
2484 PageDesc *p;
2485 target_ulong end;
2486 target_ulong addr;
2487
2488 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2489 start = start & TARGET_PAGE_MASK;
2490
2491 if( end < start )
2492 /* we've wrapped around */
2493 return -1;
2494 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2495 p = page_find(addr >> TARGET_PAGE_BITS);
2496 if( !p )
2497 return -1;
2498 if( !(p->flags & PAGE_VALID) )
2499 return -1;
2500
2501 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2502 return -1;
2503 if (flags & PAGE_WRITE) {
2504 if (!(p->flags & PAGE_WRITE_ORG))
2505 return -1;
2506 /* unprotect the page if it was put read-only because it
2507 contains translated code */
2508 if (!(p->flags & PAGE_WRITE)) {
2509 if (!page_unprotect(addr, 0, NULL))
2510 return -1;
2511 }
2512 return 0;
2513 }
2514 }
2515 return 0;
2516}
2517
2518/* called from signal handler: invalidate the code and unprotect the
2519 page. Return TRUE if the fault was succesfully handled. */
2520int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2521{
2522 unsigned int page_index, prot, pindex;
2523 PageDesc *p, *p1;
2524 target_ulong host_start, host_end, addr;
2525
2526 /* Technically this isn't safe inside a signal handler. However we
2527 know this only ever happens in a synchronous SEGV handler, so in
2528 practice it seems to be ok. */
2529 mmap_lock();
2530
2531 host_start = address & qemu_host_page_mask;
2532 page_index = host_start >> TARGET_PAGE_BITS;
2533 p1 = page_find(page_index);
2534 if (!p1) {
2535 mmap_unlock();
2536 return 0;
2537 }
2538 host_end = host_start + qemu_host_page_size;
2539 p = p1;
2540 prot = 0;
2541 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2542 prot |= p->flags;
2543 p++;
2544 }
2545 /* if the page was really writable, then we change its
2546 protection back to writable */
2547 if (prot & PAGE_WRITE_ORG) {
2548 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2549 if (!(p1[pindex].flags & PAGE_WRITE)) {
2550 mprotect((void *)g2h(host_start), qemu_host_page_size,
2551 (prot & PAGE_BITS) | PAGE_WRITE);
2552 p1[pindex].flags |= PAGE_WRITE;
2553 /* and since the content will be modified, we must invalidate
2554 the corresponding translated code. */
2555 tb_invalidate_phys_page(address, pc, puc);
2556#ifdef DEBUG_TB_CHECK
2557 tb_invalidate_check(address);
2558#endif
2559 mmap_unlock();
2560 return 1;
2561 }
2562 }
2563 mmap_unlock();
2564 return 0;
2565}
2566
2567static inline void tlb_set_dirty(CPUState *env,
2568 unsigned long addr, target_ulong vaddr)
2569{
2570}
2571#endif /* defined(CONFIG_USER_ONLY) */
2572
2573#if !defined(CONFIG_USER_ONLY)
2574static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2575 ram_addr_t memory);
2576static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2577 ram_addr_t orig_memory);
2578#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2579 need_subpage) \
2580 do { \
2581 if (addr > start_addr) \
2582 start_addr2 = 0; \
2583 else { \
2584 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2585 if (start_addr2 > 0) \
2586 need_subpage = 1; \
2587 } \
2588 \
2589 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2590 end_addr2 = TARGET_PAGE_SIZE - 1; \
2591 else { \
2592 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2593 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2594 need_subpage = 1; \
2595 } \
2596 } while (0)
2597
2598
2599/* register physical memory. 'size' must be a multiple of the target
2600 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2601 io memory page */
2602void cpu_register_physical_memory(target_phys_addr_t start_addr,
2603 unsigned long size,
2604 unsigned long phys_offset)
2605{
2606 target_phys_addr_t addr, end_addr;
2607 PhysPageDesc *p;
2608 CPUState *env;
2609 ram_addr_t orig_size = size;
2610 void *subpage;
2611
2612#ifdef USE_KQEMU
2613 /* XXX: should not depend on cpu context */
2614 env = first_cpu;
2615 if (env->kqemu_enabled) {
2616 kqemu_set_phys_mem(start_addr, size, phys_offset);
2617 }
2618#endif
2619 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2620 end_addr = start_addr + (target_phys_addr_t)size;
2621 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2622 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2623 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2624 ram_addr_t orig_memory = p->phys_offset;
2625 target_phys_addr_t start_addr2, end_addr2;
2626 int need_subpage = 0;
2627
2628 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2629 need_subpage);
2630 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2631 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2632 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2633 &p->phys_offset, orig_memory);
2634 } else {
2635 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2636 >> IO_MEM_SHIFT];
2637 }
2638 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2639 } else {
2640 p->phys_offset = phys_offset;
2641#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2642 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2643 (phys_offset & IO_MEM_ROMD))
2644#else
2645 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2646 || (phys_offset & IO_MEM_ROMD)
2647 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2648#endif
2649 phys_offset += TARGET_PAGE_SIZE;
2650 }
2651 } else {
2652 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2653 p->phys_offset = phys_offset;
2654#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2655 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2656 (phys_offset & IO_MEM_ROMD))
2657#else
2658 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2659 || (phys_offset & IO_MEM_ROMD)
2660 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2661#endif
2662 phys_offset += TARGET_PAGE_SIZE;
2663 else {
2664 target_phys_addr_t start_addr2, end_addr2;
2665 int need_subpage = 0;
2666
2667 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2668 end_addr2, need_subpage);
2669
2670 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2671 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2672 &p->phys_offset, IO_MEM_UNASSIGNED);
2673 subpage_register(subpage, start_addr2, end_addr2,
2674 phys_offset);
2675 }
2676 }
2677 }
2678 }
2679 /* since each CPU stores ram addresses in its TLB cache, we must
2680 reset the modified entries */
2681 /* XXX: slow ! */
2682 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2683 tlb_flush(env, 1);
2684 }
2685}
2686
2687/* XXX: temporary until new memory mapping API */
2688uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2689{
2690 PhysPageDesc *p;
2691
2692 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2693 if (!p)
2694 return IO_MEM_UNASSIGNED;
2695 return p->phys_offset;
2696}
2697
2698#ifndef VBOX
2699/* XXX: better than nothing */
2700ram_addr_t qemu_ram_alloc(ram_addr_t size)
2701{
2702 ram_addr_t addr;
2703 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2704 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2705 (uint64_t)size, (uint64_t)phys_ram_size);
2706 abort();
2707 }
2708 addr = phys_ram_alloc_offset;
2709 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2710 return addr;
2711}
2712
2713void qemu_ram_free(ram_addr_t addr)
2714{
2715}
2716#endif
2717
2718
2719static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2720{
2721#ifdef DEBUG_UNASSIGNED
2722 printf("Unassigned mem read 0x%08x\n", (int)addr);
2723#endif
2724#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2725 do_unassigned_access(addr, 0, 0, 0, 1);
2726#endif
2727 return 0;
2728}
2729
2730static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2731{
2732#ifdef DEBUG_UNASSIGNED
2733 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2734#endif
2735#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2736 do_unassigned_access(addr, 0, 0, 0, 2);
2737#endif
2738 return 0;
2739}
2740
2741static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2742{
2743#ifdef DEBUG_UNASSIGNED
2744 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2745#endif
2746#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2747 do_unassigned_access(addr, 0, 0, 0, 4);
2748#endif
2749 return 0;
2750}
2751
2752static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2753{
2754#ifdef DEBUG_UNASSIGNED
2755 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2756#endif
2757}
2758
2759static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2760{
2761#ifdef DEBUG_UNASSIGNED
2762 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2763#endif
2764#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2765 do_unassigned_access(addr, 1, 0, 0, 2);
2766#endif
2767}
2768
2769static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2770{
2771#ifdef DEBUG_UNASSIGNED
2772 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2773#endif
2774#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2775 do_unassigned_access(addr, 1, 0, 0, 4);
2776#endif
2777}
2778static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2779 unassigned_mem_readb,
2780 unassigned_mem_readw,
2781 unassigned_mem_readl,
2782};
2783
2784static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2785 unassigned_mem_writeb,
2786 unassigned_mem_writew,
2787 unassigned_mem_writel,
2788};
2789
2790static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2791{
2792 unsigned long ram_addr;
2793 int dirty_flags;
2794#if defined(VBOX)
2795 ram_addr = addr;
2796#elif
2797 ram_addr = addr - (unsigned long)phys_ram_base;
2798#endif
2799#ifdef VBOX
2800 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2801 dirty_flags = 0xff;
2802 else
2803#endif /* VBOX */
2804 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2805 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2806#if !defined(CONFIG_USER_ONLY)
2807 tb_invalidate_phys_page_fast(ram_addr, 1);
2808# ifdef VBOX
2809 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2810 dirty_flags = 0xff;
2811 else
2812# endif /* VBOX */
2813 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2814#endif
2815 }
2816#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2817 remR3PhysWriteU8(addr, val);
2818#else
2819 stb_p((uint8_t *)(long)addr, val);
2820#endif
2821#ifdef USE_KQEMU
2822 if (cpu_single_env->kqemu_enabled &&
2823 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2824 kqemu_modify_page(cpu_single_env, ram_addr);
2825#endif
2826 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2827#ifdef VBOX
2828 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2829#endif /* !VBOX */
2830 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2831 /* we remove the notdirty callback only if the code has been
2832 flushed */
2833 if (dirty_flags == 0xff)
2834 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2835}
2836
2837static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2838{
2839 unsigned long ram_addr;
2840 int dirty_flags;
2841#if defined(VBOX)
2842 ram_addr = addr;
2843#else
2844 ram_addr = addr - (unsigned long)phys_ram_base;
2845#endif
2846#ifdef VBOX
2847 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2848 dirty_flags = 0xff;
2849 else
2850#endif /* VBOX */
2851 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2852 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2853#if !defined(CONFIG_USER_ONLY)
2854 tb_invalidate_phys_page_fast(ram_addr, 2);
2855# ifdef VBOX
2856 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2857 dirty_flags = 0xff;
2858 else
2859# endif /* VBOX */
2860 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2861#endif
2862 }
2863#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2864 remR3PhysWriteU16(addr, val);
2865#else
2866 stw_p((uint8_t *)(long)addr, val);
2867#endif
2868
2869#ifdef USE_KQEMU
2870 if (cpu_single_env->kqemu_enabled &&
2871 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2872 kqemu_modify_page(cpu_single_env, ram_addr);
2873#endif
2874 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2875#ifdef VBOX
2876 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2877#endif
2878 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2879 /* we remove the notdirty callback only if the code has been
2880 flushed */
2881 if (dirty_flags == 0xff)
2882 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2883}
2884
2885static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2886{
2887 unsigned long ram_addr;
2888 int dirty_flags;
2889#if defined(VBOX)
2890 ram_addr = addr;
2891#else
2892 ram_addr = addr - (unsigned long)phys_ram_base;
2893#endif
2894#ifdef VBOX
2895 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2896 dirty_flags = 0xff;
2897 else
2898#endif /* VBOX */
2899 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2900 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2901#if !defined(CONFIG_USER_ONLY)
2902 tb_invalidate_phys_page_fast(ram_addr, 4);
2903# ifdef VBOX
2904 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2905 dirty_flags = 0xff;
2906 else
2907# endif /* VBOX */
2908 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2909#endif
2910 }
2911#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2912 remR3PhysWriteU32(addr, val);
2913#else
2914 stl_p((uint8_t *)(long)addr, val);
2915#endif
2916#ifdef USE_KQEMU
2917 if (cpu_single_env->kqemu_enabled &&
2918 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2919 kqemu_modify_page(cpu_single_env, ram_addr);
2920#endif
2921 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2922#ifdef VBOX
2923 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2924#endif
2925 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2926 /* we remove the notdirty callback only if the code has been
2927 flushed */
2928 if (dirty_flags == 0xff)
2929 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2930}
2931
2932static CPUReadMemoryFunc *error_mem_read[3] = {
2933 NULL, /* never used */
2934 NULL, /* never used */
2935 NULL, /* never used */
2936};
2937
2938static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2939 notdirty_mem_writeb,
2940 notdirty_mem_writew,
2941 notdirty_mem_writel,
2942};
2943
2944
2945/* Generate a debug exception if a watchpoint has been hit. */
2946static void check_watchpoint(int offset, int flags)
2947{
2948 CPUState *env = cpu_single_env;
2949 target_ulong vaddr;
2950 int i;
2951
2952 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2953 for (i = 0; i < env->nb_watchpoints; i++) {
2954 if (vaddr == env->watchpoint[i].vaddr
2955 && (env->watchpoint[i].type & flags)) {
2956 env->watchpoint_hit = i + 1;
2957 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2958 break;
2959 }
2960 }
2961}
2962
2963/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2964 so these check for a hit then pass through to the normal out-of-line
2965 phys routines. */
2966static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2967{
2968 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2969 return ldub_phys(addr);
2970}
2971
2972static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2973{
2974 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2975 return lduw_phys(addr);
2976}
2977
2978static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2979{
2980 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2981 return ldl_phys(addr);
2982}
2983
2984static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2985 uint32_t val)
2986{
2987 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2988 stb_phys(addr, val);
2989}
2990
2991static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2992 uint32_t val)
2993{
2994 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2995 stw_phys(addr, val);
2996}
2997
2998static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2999 uint32_t val)
3000{
3001 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3002 stl_phys(addr, val);
3003}
3004
3005static CPUReadMemoryFunc *watch_mem_read[3] = {
3006 watch_mem_readb,
3007 watch_mem_readw,
3008 watch_mem_readl,
3009};
3010
3011static CPUWriteMemoryFunc *watch_mem_write[3] = {
3012 watch_mem_writeb,
3013 watch_mem_writew,
3014 watch_mem_writel,
3015};
3016
3017static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3018 unsigned int len)
3019{
3020 uint32_t ret;
3021 unsigned int idx;
3022
3023 idx = SUBPAGE_IDX(addr - mmio->base);
3024#if defined(DEBUG_SUBPAGE)
3025 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3026 mmio, len, addr, idx);
3027#endif
3028 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
3029
3030 return ret;
3031}
3032
3033static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3034 uint32_t value, unsigned int len)
3035{
3036 unsigned int idx;
3037
3038 idx = SUBPAGE_IDX(addr - mmio->base);
3039#if defined(DEBUG_SUBPAGE)
3040 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3041 mmio, len, addr, idx, value);
3042#endif
3043 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
3044}
3045
3046static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3047{
3048#if defined(DEBUG_SUBPAGE)
3049 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3050#endif
3051
3052 return subpage_readlen(opaque, addr, 0);
3053}
3054
3055static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3056 uint32_t value)
3057{
3058#if defined(DEBUG_SUBPAGE)
3059 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3060#endif
3061 subpage_writelen(opaque, addr, value, 0);
3062}
3063
3064static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3065{
3066#if defined(DEBUG_SUBPAGE)
3067 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3068#endif
3069
3070 return subpage_readlen(opaque, addr, 1);
3071}
3072
3073static void subpage_writew (void *opaque, target_phys_addr_t addr,
3074 uint32_t value)
3075{
3076#if defined(DEBUG_SUBPAGE)
3077 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3078#endif
3079 subpage_writelen(opaque, addr, value, 1);
3080}
3081
3082static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3083{
3084#if defined(DEBUG_SUBPAGE)
3085 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3086#endif
3087
3088 return subpage_readlen(opaque, addr, 2);
3089}
3090
3091static void subpage_writel (void *opaque,
3092 target_phys_addr_t addr, uint32_t value)
3093{
3094#if defined(DEBUG_SUBPAGE)
3095 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3096#endif
3097 subpage_writelen(opaque, addr, value, 2);
3098}
3099
3100static CPUReadMemoryFunc *subpage_read[] = {
3101 &subpage_readb,
3102 &subpage_readw,
3103 &subpage_readl,
3104};
3105
3106static CPUWriteMemoryFunc *subpage_write[] = {
3107 &subpage_writeb,
3108 &subpage_writew,
3109 &subpage_writel,
3110};
3111
3112static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3113 ram_addr_t memory)
3114{
3115 int idx, eidx;
3116 unsigned int i;
3117
3118 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3119 return -1;
3120 idx = SUBPAGE_IDX(start);
3121 eidx = SUBPAGE_IDX(end);
3122#if defined(DEBUG_SUBPAGE)
3123 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3124 mmio, start, end, idx, eidx, memory);
3125#endif
3126 memory >>= IO_MEM_SHIFT;
3127 for (; idx <= eidx; idx++) {
3128 for (i = 0; i < 4; i++) {
3129 if (io_mem_read[memory][i]) {
3130 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3131 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3132 }
3133 if (io_mem_write[memory][i]) {
3134 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3135 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3136 }
3137 }
3138 }
3139
3140 return 0;
3141}
3142
3143static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3144 ram_addr_t orig_memory)
3145{
3146 subpage_t *mmio;
3147 int subpage_memory;
3148
3149 mmio = qemu_mallocz(sizeof(subpage_t));
3150 if (mmio != NULL) {
3151 mmio->base = base;
3152 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3153#if defined(DEBUG_SUBPAGE)
3154 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3155 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3156#endif
3157 *phys = subpage_memory | IO_MEM_SUBPAGE;
3158 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
3159 }
3160
3161 return mmio;
3162}
3163
3164static void io_mem_init(void)
3165{
3166 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3167 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3168 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3169#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
3170 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3171 io_mem_nb = 6;
3172#else
3173 io_mem_nb = 5;
3174#endif
3175
3176 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3177 watch_mem_write, NULL);
3178
3179#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
3180 /* alloc dirty bits array */
3181 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3182 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
3183#endif /* !VBOX */
3184}
3185
3186/* mem_read and mem_write are arrays of functions containing the
3187 function to access byte (index 0), word (index 1) and dword (index
3188 2). Functions can be omitted with a NULL function pointer. The
3189 registered functions may be modified dynamically later.
3190 If io_index is non zero, the corresponding io zone is
3191 modified. If it is zero, a new io zone is allocated. The return
3192 value can be used with cpu_register_physical_memory(). (-1) is
3193 returned if error. */
3194int cpu_register_io_memory(int io_index,
3195 CPUReadMemoryFunc **mem_read,
3196 CPUWriteMemoryFunc **mem_write,
3197 void *opaque)
3198{
3199 int i, subwidth = 0;
3200
3201 if (io_index <= 0) {
3202 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
3203 return -1;
3204 io_index = io_mem_nb++;
3205 } else {
3206 if (io_index >= IO_MEM_NB_ENTRIES)
3207 return -1;
3208 }
3209
3210 for(i = 0;i < 3; i++) {
3211 if (!mem_read[i] || !mem_write[i])
3212 subwidth = IO_MEM_SUBWIDTH;
3213 io_mem_read[io_index][i] = mem_read[i];
3214 io_mem_write[io_index][i] = mem_write[i];
3215 }
3216 io_mem_opaque[io_index] = opaque;
3217 return (io_index << IO_MEM_SHIFT) | subwidth;
3218}
3219
3220CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3221{
3222 return io_mem_write[io_index >> IO_MEM_SHIFT];
3223}
3224
3225CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3226{
3227 return io_mem_read[io_index >> IO_MEM_SHIFT];
3228}
3229#endif /* !defined(CONFIG_USER_ONLY) */
3230
3231/* physical memory access (slow version, mainly for debug) */
3232#if defined(CONFIG_USER_ONLY)
3233void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3234 int len, int is_write)
3235{
3236 int l, flags;
3237 target_ulong page;
3238 void * p;
3239
3240 while (len > 0) {
3241 page = addr & TARGET_PAGE_MASK;
3242 l = (page + TARGET_PAGE_SIZE) - addr;
3243 if (l > len)
3244 l = len;
3245 flags = page_get_flags(page);
3246 if (!(flags & PAGE_VALID))
3247 return;
3248 if (is_write) {
3249 if (!(flags & PAGE_WRITE))
3250 return;
3251 /* XXX: this code should not depend on lock_user */
3252 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3253 /* FIXME - should this return an error rather than just fail? */
3254 return;
3255 memcpy(p, buf, len);
3256 unlock_user(p, addr, len);
3257 } else {
3258 if (!(flags & PAGE_READ))
3259 return;
3260 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3261 /* FIXME - should this return an error rather than just fail? */
3262 return;
3263 memcpy(buf, p, len);
3264 unlock_user(p, addr, 0);
3265 }
3266 len -= l;
3267 buf += l;
3268 addr += l;
3269 }
3270}
3271
3272#else
3273void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3274 int len, int is_write)
3275{
3276 int l, io_index;
3277 uint8_t *ptr;
3278 uint32_t val;
3279 target_phys_addr_t page;
3280 unsigned long pd;
3281 PhysPageDesc *p;
3282
3283 while (len > 0) {
3284 page = addr & TARGET_PAGE_MASK;
3285 l = (page + TARGET_PAGE_SIZE) - addr;
3286 if (l > len)
3287 l = len;
3288 p = phys_page_find(page >> TARGET_PAGE_BITS);
3289 if (!p) {
3290 pd = IO_MEM_UNASSIGNED;
3291 } else {
3292 pd = p->phys_offset;
3293 }
3294
3295 if (is_write) {
3296 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3297 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3298 /* XXX: could force cpu_single_env to NULL to avoid
3299 potential bugs */
3300 if (l >= 4 && ((addr & 3) == 0)) {
3301 /* 32 bit write access */
3302#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3303 val = ldl_p(buf);
3304#else
3305 val = *(const uint32_t *)buf;
3306#endif
3307 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3308 l = 4;
3309 } else if (l >= 2 && ((addr & 1) == 0)) {
3310 /* 16 bit write access */
3311#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3312 val = lduw_p(buf);
3313#else
3314 val = *(const uint16_t *)buf;
3315#endif
3316 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3317 l = 2;
3318 } else {
3319 /* 8 bit write access */
3320#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3321 val = ldub_p(buf);
3322#else
3323 val = *(const uint8_t *)buf;
3324#endif
3325 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
3326 l = 1;
3327 }
3328 } else {
3329 unsigned long addr1;
3330 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3331 /* RAM case */
3332#ifdef VBOX
3333 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3334#else
3335 ptr = phys_ram_base + addr1;
3336 memcpy(ptr, buf, l);
3337#endif
3338 if (!cpu_physical_memory_is_dirty(addr1)) {
3339 /* invalidate code */
3340 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3341 /* set dirty bit */
3342#ifdef VBOX
3343 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3344#endif
3345 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3346 (0xff & ~CODE_DIRTY_FLAG);
3347 }
3348 }
3349 } else {
3350 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3351 !(pd & IO_MEM_ROMD)) {
3352 /* I/O case */
3353 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3354 if (l >= 4 && ((addr & 3) == 0)) {
3355 /* 32 bit read access */
3356 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3357#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3358 stl_p(buf, val);
3359#else
3360 *(uint32_t *)buf = val;
3361#endif
3362 l = 4;
3363 } else if (l >= 2 && ((addr & 1) == 0)) {
3364 /* 16 bit read access */
3365 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3366#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3367 stw_p(buf, val);
3368#else
3369 *(uint16_t *)buf = val;
3370#endif
3371 l = 2;
3372 } else {
3373 /* 8 bit read access */
3374 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3375#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3376 stb_p(buf, val);
3377#else
3378 *(uint8_t *)buf = val;
3379#endif
3380 l = 1;
3381 }
3382 } else {
3383 /* RAM case */
3384#ifdef VBOX
3385 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3386#else
3387 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3388 (addr & ~TARGET_PAGE_MASK);
3389 memcpy(buf, ptr, l);
3390#endif
3391 }
3392 }
3393 len -= l;
3394 buf += l;
3395 addr += l;
3396 }
3397}
3398
3399#ifndef VBOX
3400/* used for ROM loading : can write in RAM and ROM */
3401void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3402 const uint8_t *buf, int len)
3403{
3404 int l;
3405 uint8_t *ptr;
3406 target_phys_addr_t page;
3407 unsigned long pd;
3408 PhysPageDesc *p;
3409
3410 while (len > 0) {
3411 page = addr & TARGET_PAGE_MASK;
3412 l = (page + TARGET_PAGE_SIZE) - addr;
3413 if (l > len)
3414 l = len;
3415 p = phys_page_find(page >> TARGET_PAGE_BITS);
3416 if (!p) {
3417 pd = IO_MEM_UNASSIGNED;
3418 } else {
3419 pd = p->phys_offset;
3420 }
3421
3422 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3423 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3424 !(pd & IO_MEM_ROMD)) {
3425 /* do nothing */
3426 } else {
3427 unsigned long addr1;
3428 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3429 /* ROM/RAM case */
3430 ptr = phys_ram_base + addr1;
3431 memcpy(ptr, buf, l);
3432 }
3433 len -= l;
3434 buf += l;
3435 addr += l;
3436 }
3437}
3438#endif /* !VBOX */
3439
3440
3441/* warning: addr must be aligned */
3442uint32_t ldl_phys(target_phys_addr_t addr)
3443{
3444 int io_index;
3445 uint8_t *ptr;
3446 uint32_t val;
3447 unsigned long pd;
3448 PhysPageDesc *p;
3449
3450 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3451 if (!p) {
3452 pd = IO_MEM_UNASSIGNED;
3453 } else {
3454 pd = p->phys_offset;
3455 }
3456
3457 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3458 !(pd & IO_MEM_ROMD)) {
3459 /* I/O case */
3460 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3461 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3462 } else {
3463 /* RAM case */
3464#ifndef VBOX
3465 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3466 (addr & ~TARGET_PAGE_MASK);
3467 val = ldl_p(ptr);
3468#else
3469 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3470#endif
3471 }
3472 return val;
3473}
3474
3475/* warning: addr must be aligned */
3476uint64_t ldq_phys(target_phys_addr_t addr)
3477{
3478 int io_index;
3479 uint8_t *ptr;
3480 uint64_t val;
3481 unsigned long pd;
3482 PhysPageDesc *p;
3483
3484 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3485 if (!p) {
3486 pd = IO_MEM_UNASSIGNED;
3487 } else {
3488 pd = p->phys_offset;
3489 }
3490
3491 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3492 !(pd & IO_MEM_ROMD)) {
3493 /* I/O case */
3494 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3495#ifdef TARGET_WORDS_BIGENDIAN
3496 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3497 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3498#else
3499 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3500 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3501#endif
3502 } else {
3503 /* RAM case */
3504#ifndef VBOX
3505 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3506 (addr & ~TARGET_PAGE_MASK);
3507 val = ldq_p(ptr);
3508#else
3509 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3510#endif
3511 }
3512 return val;
3513}
3514
3515/* XXX: optimize */
3516uint32_t ldub_phys(target_phys_addr_t addr)
3517{
3518 uint8_t val;
3519 cpu_physical_memory_read(addr, &val, 1);
3520 return val;
3521}
3522
3523/* XXX: optimize */
3524uint32_t lduw_phys(target_phys_addr_t addr)
3525{
3526 uint16_t val;
3527 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3528 return tswap16(val);
3529}
3530
3531/* warning: addr must be aligned. The ram page is not masked as dirty
3532 and the code inside is not invalidated. It is useful if the dirty
3533 bits are used to track modified PTEs */
3534void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3535{
3536 int io_index;
3537 uint8_t *ptr;
3538 unsigned long pd;
3539 PhysPageDesc *p;
3540
3541 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3542 if (!p) {
3543 pd = IO_MEM_UNASSIGNED;
3544 } else {
3545 pd = p->phys_offset;
3546 }
3547
3548 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3549 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3550 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3551 } else {
3552#ifndef VBOX
3553 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3554 (addr & ~TARGET_PAGE_MASK);
3555 stl_p(ptr, val);
3556#else
3557 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3558#endif
3559#ifndef VBOX
3560 if (unlikely(in_migration)) {
3561 if (!cpu_physical_memory_is_dirty(addr1)) {
3562 /* invalidate code */
3563 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3564 /* set dirty bit */
3565 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3566 (0xff & ~CODE_DIRTY_FLAG);
3567 }
3568 }
3569#endif
3570 }
3571}
3572
3573void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3574{
3575 int io_index;
3576 uint8_t *ptr;
3577 unsigned long pd;
3578 PhysPageDesc *p;
3579
3580 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3581 if (!p) {
3582 pd = IO_MEM_UNASSIGNED;
3583 } else {
3584 pd = p->phys_offset;
3585 }
3586
3587 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3588 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3589#ifdef TARGET_WORDS_BIGENDIAN
3590 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3591 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3592#else
3593 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3594 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3595#endif
3596 } else {
3597#ifndef VBOX
3598 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3599 (addr & ~TARGET_PAGE_MASK);
3600 stq_p(ptr, val);
3601#else
3602 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3603#endif
3604 }
3605}
3606
3607
3608/* warning: addr must be aligned */
3609void stl_phys(target_phys_addr_t addr, uint32_t val)
3610{
3611 int io_index;
3612 uint8_t *ptr;
3613 unsigned long pd;
3614 PhysPageDesc *p;
3615
3616 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3617 if (!p) {
3618 pd = IO_MEM_UNASSIGNED;
3619 } else {
3620 pd = p->phys_offset;
3621 }
3622
3623 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3624 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3625 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3626 } else {
3627 unsigned long addr1;
3628 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3629 /* RAM case */
3630#ifndef VBOX
3631 ptr = phys_ram_base + addr1;
3632 stl_p(ptr, val);
3633#else
3634 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3635#endif
3636 if (!cpu_physical_memory_is_dirty(addr1)) {
3637 /* invalidate code */
3638 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3639 /* set dirty bit */
3640#ifdef VBOX
3641 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3642#endif
3643 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3644 (0xff & ~CODE_DIRTY_FLAG);
3645 }
3646 }
3647}
3648
3649/* XXX: optimize */
3650void stb_phys(target_phys_addr_t addr, uint32_t val)
3651{
3652 uint8_t v = val;
3653 cpu_physical_memory_write(addr, &v, 1);
3654}
3655
3656/* XXX: optimize */
3657void stw_phys(target_phys_addr_t addr, uint32_t val)
3658{
3659 uint16_t v = tswap16(val);
3660 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3661}
3662
3663/* XXX: optimize */
3664void stq_phys(target_phys_addr_t addr, uint64_t val)
3665{
3666 val = tswap64(val);
3667 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3668}
3669
3670#endif
3671
3672/* virtual memory access for debug */
3673int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3674 uint8_t *buf, int len, int is_write)
3675{
3676 int l;
3677 target_ulong page, phys_addr;
3678
3679 while (len > 0) {
3680 page = addr & TARGET_PAGE_MASK;
3681 phys_addr = cpu_get_phys_page_debug(env, page);
3682 /* if no physical page mapped, return an error */
3683 if (phys_addr == -1)
3684 return -1;
3685 l = (page + TARGET_PAGE_SIZE) - addr;
3686 if (l > len)
3687 l = len;
3688 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3689 buf, l, is_write);
3690 len -= l;
3691 buf += l;
3692 addr += l;
3693 }
3694 return 0;
3695}
3696
3697/* in deterministic execution mode, instructions doing device I/Os
3698 must be at the end of the TB */
3699void cpu_io_recompile(CPUState *env, void *retaddr)
3700{
3701 TranslationBlock *tb;
3702 uint32_t n, cflags;
3703 target_ulong pc, cs_base;
3704 uint64_t flags;
3705
3706 tb = tb_find_pc((unsigned long)retaddr);
3707 if (!tb) {
3708 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3709 retaddr);
3710 }
3711 n = env->icount_decr.u16.low + tb->icount;
3712 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3713 /* Calculate how many instructions had been executed before the fault
3714 occurred. */
3715 n = n - env->icount_decr.u16.low;
3716 /* Generate a new TB ending on the I/O insn. */
3717 n++;
3718 /* On MIPS and SH, delay slot instructions can only be restarted if
3719 they were already the first instruction in the TB. If this is not
3720 the first instruction in a TB then re-execute the preceding
3721 branch. */
3722#if defined(TARGET_MIPS)
3723 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3724 env->active_tc.PC -= 4;
3725 env->icount_decr.u16.low++;
3726 env->hflags &= ~MIPS_HFLAG_BMASK;
3727 }
3728#elif defined(TARGET_SH4)
3729 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3730 && n > 1) {
3731 env->pc -= 2;
3732 env->icount_decr.u16.low++;
3733 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3734 }
3735#endif
3736 /* This should never happen. */
3737 if (n > CF_COUNT_MASK)
3738 cpu_abort(env, "TB too big during recompile");
3739
3740 cflags = n | CF_LAST_IO;
3741 pc = tb->pc;
3742 cs_base = tb->cs_base;
3743 flags = tb->flags;
3744 tb_phys_invalidate(tb, -1);
3745 /* FIXME: In theory this could raise an exception. In practice
3746 we have already translated the block once so it's probably ok. */
3747 tb_gen_code(env, pc, cs_base, flags, cflags);
3748 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3749 the first in the TB) then we end up generating a whole new TB and
3750 repeating the fault, which is horribly inefficient.
3751 Better would be to execute just this insn uncached, or generate a
3752 second new TB. */
3753 cpu_resume_from_signal(env, NULL);
3754}
3755
3756#ifndef VBOX
3757void dump_exec_info(FILE *f,
3758 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3759{
3760 int i, target_code_size, max_target_code_size;
3761 int direct_jmp_count, direct_jmp2_count, cross_page;
3762 TranslationBlock *tb;
3763
3764 target_code_size = 0;
3765 max_target_code_size = 0;
3766 cross_page = 0;
3767 direct_jmp_count = 0;
3768 direct_jmp2_count = 0;
3769 for(i = 0; i < nb_tbs; i++) {
3770 tb = &tbs[i];
3771 target_code_size += tb->size;
3772 if (tb->size > max_target_code_size)
3773 max_target_code_size = tb->size;
3774 if (tb->page_addr[1] != -1)
3775 cross_page++;
3776 if (tb->tb_next_offset[0] != 0xffff) {
3777 direct_jmp_count++;
3778 if (tb->tb_next_offset[1] != 0xffff) {
3779 direct_jmp2_count++;
3780 }
3781 }
3782 }
3783 /* XXX: avoid using doubles ? */
3784 cpu_fprintf(f, "Translation buffer state:\n");
3785 cpu_fprintf(f, "gen code size %ld/%ld\n",
3786 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3787 cpu_fprintf(f, "TB count %d/%d\n",
3788 nb_tbs, code_gen_max_blocks);
3789 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3790 nb_tbs ? target_code_size / nb_tbs : 0,
3791 max_target_code_size);
3792 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3793 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3794 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3795 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3796 cross_page,
3797 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3798 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3799 direct_jmp_count,
3800 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3801 direct_jmp2_count,
3802 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3803 cpu_fprintf(f, "\nStatistics:\n");
3804 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3805 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3806 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3807 tcg_dump_info(f, cpu_fprintf);
3808}
3809#endif /* !VBOX */
3810
3811#if !defined(CONFIG_USER_ONLY)
3812
3813#define MMUSUFFIX _cmmu
3814#define GETPC() NULL
3815#define env cpu_single_env
3816#define SOFTMMU_CODE_ACCESS
3817
3818#define SHIFT 0
3819#include "softmmu_template.h"
3820
3821#define SHIFT 1
3822#include "softmmu_template.h"
3823
3824#define SHIFT 2
3825#include "softmmu_template.h"
3826
3827#define SHIFT 3
3828#include "softmmu_template.h"
3829
3830#undef env
3831
3832#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette