VirtualBox

source: vbox/trunk/src/recompiler_new/exec.c@ 18472

Last change on this file since 18472 was 18349, checked in by vboxsync, 16 years ago

REM: Corrected L1_BITS (crashed in tb_alloc_page during ubuntu 8.10/64 boot with ~11GBs ).

  • Property svn:eol-style set to native
File size: 113.5 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51#endif /* VBOX */
52
53#include "cpu.h"
54#include "exec-all.h"
55#if defined(CONFIG_USER_ONLY)
56#include <qemu.h>
57#endif
58
59//#define DEBUG_TB_INVALIDATE
60//#define DEBUG_FLUSH
61//#define DEBUG_TLB
62//#define DEBUG_UNASSIGNED
63
64/* make various TB consistency checks */
65//#define DEBUG_TB_CHECK
66//#define DEBUG_TLB_CHECK
67
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
73#define SMC_BITMAP_USE_THRESHOLD 10
74
75#define MMAP_AREA_START 0x00000000
76#define MMAP_AREA_END 0xa8000000
77
78#if defined(TARGET_SPARC64)
79#define TARGET_PHYS_ADDR_SPACE_BITS 41
80#elif defined(TARGET_SPARC)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
82#elif defined(TARGET_ALPHA)
83#define TARGET_PHYS_ADDR_SPACE_BITS 42
84#define TARGET_VIRT_ADDR_SPACE_BITS 42
85#elif defined(TARGET_PPC64)
86#define TARGET_PHYS_ADDR_SPACE_BITS 42
87#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
88#define TARGET_PHYS_ADDR_SPACE_BITS 42
89#elif defined(TARGET_I386) && !defined(USE_KQEMU)
90#define TARGET_PHYS_ADDR_SPACE_BITS 36
91#else
92/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
93#define TARGET_PHYS_ADDR_SPACE_BITS 32
94#endif
95
96static TranslationBlock *tbs;
97int code_gen_max_blocks;
98TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
99static int nb_tbs;
100/* any access to the tbs or the page table must use this lock */
101spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
102
103#ifndef VBOX
104#if defined(__arm__) || defined(__sparc_v9__)
105/* The prologue must be reachable with a direct jump. ARM and Sparc64
106 have limited branch ranges (possibly also PPC) so place it in a
107 section close to code segment. */
108#define code_gen_section \
109 __attribute__((__section__(".gen_code"))) \
110 __attribute__((aligned (32)))
111#else
112#define code_gen_section \
113 __attribute__((aligned (32)))
114#endif
115uint8_t code_gen_prologue[1024] code_gen_section;
116
117#else /* VBOX */
118extern uint8_t* code_gen_prologue;
119#endif /* VBOX */
120
121static uint8_t *code_gen_buffer;
122static unsigned long code_gen_buffer_size;
123/* threshold to flush the translated code buffer */
124static unsigned long code_gen_buffer_max_size;
125uint8_t *code_gen_ptr;
126
127#ifndef VBOX
128#if !defined(CONFIG_USER_ONLY)
129ram_addr_t phys_ram_size;
130int phys_ram_fd;
131uint8_t *phys_ram_base;
132uint8_t *phys_ram_dirty;
133static int in_migration;
134static ram_addr_t phys_ram_alloc_offset = 0;
135#endif
136#else /* VBOX */
137RTGCPHYS phys_ram_size;
138/* we have memory ranges (the high PC-BIOS mapping) which
139 causes some pages to fall outside the dirty map here. */
140RTGCPHYS phys_ram_dirty_size;
141#endif /* VBOX */
142#if !defined(VBOX)
143uint8_t *phys_ram_base;
144#endif
145uint8_t *phys_ram_dirty;
146
147CPUState *first_cpu;
148/* current CPU in the current thread. It is only valid inside
149 cpu_exec() */
150CPUState *cpu_single_env;
151/* 0 = Do not count executed instructions.
152 1 = Precise instruction counting.
153 2 = Adaptive rate instruction counting. */
154int use_icount = 0;
155/* Current instruction counter. While executing translated code this may
156 include some instructions that have not yet been executed. */
157int64_t qemu_icount;
158
159typedef struct PageDesc {
160 /* list of TBs intersecting this ram page */
161 TranslationBlock *first_tb;
162 /* in order to optimize self modifying code, we count the number
163 of lookups we do to a given page to use a bitmap */
164 unsigned int code_write_count;
165 uint8_t *code_bitmap;
166#if defined(CONFIG_USER_ONLY)
167 unsigned long flags;
168#endif
169} PageDesc;
170
171typedef struct PhysPageDesc {
172 /* offset in host memory of the page + io_index in the low 12 bits */
173 ram_addr_t phys_offset;
174} PhysPageDesc;
175
176#define L2_BITS 10
177#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
178/* XXX: this is a temporary hack for alpha target.
179 * In the future, this is to be replaced by a multi-level table
180 * to actually be able to handle the complete 64 bits address space.
181 */
182#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
183#else
184# ifdef VBOX /* > 4GB please. */
185#define L1_BITS (TARGET_PHYS_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
186# else
187#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
188# endif
189#endif
190
191#define L1_SIZE (1 << L1_BITS)
192#define L2_SIZE (1 << L2_BITS)
193
194static void io_mem_init(void);
195
196unsigned long qemu_real_host_page_size;
197unsigned long qemu_host_page_bits;
198unsigned long qemu_host_page_size;
199unsigned long qemu_host_page_mask;
200
201/* XXX: for system emulation, it could just be an array */
202static PageDesc *l1_map[L1_SIZE];
203static PhysPageDesc **l1_phys_map;
204
205#if !defined(CONFIG_USER_ONLY)
206static void io_mem_init(void);
207
208/* io memory support */
209CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
210CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
211void *io_mem_opaque[IO_MEM_NB_ENTRIES];
212static int io_mem_nb;
213static int io_mem_watch;
214#endif
215
216#ifndef VBOX
217/* log support */
218static const char *logfilename = "/tmp/qemu.log";
219#endif /* !VBOX */
220FILE *logfile;
221int loglevel;
222#ifndef VBOX
223static int log_append = 0;
224#endif
225
226/* statistics */
227static int tlb_flush_count;
228static int tb_flush_count;
229#ifndef VBOX
230static int tb_phys_invalidate_count;
231#endif /* !VBOX */
232
233#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
234typedef struct subpage_t {
235 target_phys_addr_t base;
236 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
237 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
238 void *opaque[TARGET_PAGE_SIZE][2][4];
239} subpage_t;
240
241
242#ifndef VBOX
243#ifdef _WIN32
244static void map_exec(void *addr, long size)
245{
246 DWORD old_protect;
247 VirtualProtect(addr, size,
248 PAGE_EXECUTE_READWRITE, &old_protect);
249
250}
251#else
252static void map_exec(void *addr, long size)
253{
254 unsigned long start, end, page_size;
255
256 page_size = getpagesize();
257 start = (unsigned long)addr;
258 start &= ~(page_size - 1);
259
260 end = (unsigned long)addr + size;
261 end += page_size - 1;
262 end &= ~(page_size - 1);
263
264 mprotect((void *)start, end - start,
265 PROT_READ | PROT_WRITE | PROT_EXEC);
266}
267#endif
268#else // VBOX
269static void map_exec(void *addr, long size)
270{
271 RTMemProtect(addr, size,
272 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
273}
274#endif
275
276static void page_init(void)
277{
278 /* NOTE: we can always suppose that qemu_host_page_size >=
279 TARGET_PAGE_SIZE */
280#ifdef VBOX
281 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
282 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
283 qemu_real_host_page_size = PAGE_SIZE;
284#else /* !VBOX */
285#ifdef _WIN32
286 {
287 SYSTEM_INFO system_info;
288 DWORD old_protect;
289
290 GetSystemInfo(&system_info);
291 qemu_real_host_page_size = system_info.dwPageSize;
292 }
293#else
294 qemu_real_host_page_size = getpagesize();
295#endif
296#endif /* !VBOX */
297
298 if (qemu_host_page_size == 0)
299 qemu_host_page_size = qemu_real_host_page_size;
300 if (qemu_host_page_size < TARGET_PAGE_SIZE)
301 qemu_host_page_size = TARGET_PAGE_SIZE;
302 qemu_host_page_bits = 0;
303#ifndef VBOX
304 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
305#else
306 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
307#endif
308 qemu_host_page_bits++;
309 qemu_host_page_mask = ~(qemu_host_page_size - 1);
310 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
311 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
312#ifdef VBOX
313 /* We use other means to set reserved bit on our pages */
314#else
315#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
316 {
317 long long startaddr, endaddr;
318 FILE *f;
319 int n;
320
321 mmap_lock();
322 last_brk = (unsigned long)sbrk(0);
323 f = fopen("/proc/self/maps", "r");
324 if (f) {
325 do {
326 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
327 if (n == 2) {
328 startaddr = MIN(startaddr,
329 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
330 endaddr = MIN(endaddr,
331 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
332 page_set_flags(startaddr & TARGET_PAGE_MASK,
333 TARGET_PAGE_ALIGN(endaddr),
334 PAGE_RESERVED);
335 }
336 } while (!feof(f));
337 fclose(f);
338 }
339 mmap_unlock();
340 }
341#endif
342#endif
343}
344
345#ifndef VBOX
346static inline PageDesc **page_l1_map(target_ulong index)
347#else
348DECLINLINE(PageDesc **) page_l1_map(target_ulong index)
349#endif
350{
351#if TARGET_LONG_BITS > 32
352 /* Host memory outside guest VM. For 32-bit targets we have already
353 excluded high addresses. */
354# ifndef VBOX
355 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
356 return NULL;
357# else /* VBOX */
358 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE,
359 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x\n",
360 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE),
361 NULL);
362# endif /* VBOX */
363
364#endif
365 return &l1_map[index >> L2_BITS];
366}
367
368#ifndef VBOX
369static inline PageDesc *page_find_alloc(target_ulong index)
370#else
371DECLINLINE(PageDesc *) page_find_alloc(target_ulong index)
372#endif
373{
374 PageDesc **lp, *p;
375 lp = page_l1_map(index);
376 if (!lp)
377 return NULL;
378
379 p = *lp;
380 if (!p) {
381 /* allocate if not found */
382#if defined(CONFIG_USER_ONLY)
383 unsigned long addr;
384 size_t len = sizeof(PageDesc) * L2_SIZE;
385 /* Don't use qemu_malloc because it may recurse. */
386 p = mmap(0, len, PROT_READ | PROT_WRITE,
387 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
388 *lp = p;
389 addr = h2g(p);
390 if (addr == (target_ulong)addr) {
391 page_set_flags(addr & TARGET_PAGE_MASK,
392 TARGET_PAGE_ALIGN(addr + len),
393 PAGE_RESERVED);
394 }
395#else
396 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
397 *lp = p;
398#endif
399 }
400 return p + (index & (L2_SIZE - 1));
401}
402
403#ifndef VBOX
404static inline PageDesc *page_find(target_ulong index)
405#else
406DECLINLINE(PageDesc *) page_find(target_ulong index)
407#endif
408{
409 PageDesc **lp, *p;
410 lp = page_l1_map(index);
411 if (!lp)
412 return NULL;
413
414 p = *lp;
415 if (!p)
416 return 0;
417 return p + (index & (L2_SIZE - 1));
418}
419
420static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
421{
422 void **lp, **p;
423 PhysPageDesc *pd;
424
425 p = (void **)l1_phys_map;
426#if TARGET_PHYS_ADDR_SPACE_BITS > 32
427
428#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
429#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
430#endif
431 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
432 p = *lp;
433 if (!p) {
434 /* allocate if not found */
435 if (!alloc)
436 return NULL;
437 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
438 memset(p, 0, sizeof(void *) * L1_SIZE);
439 *lp = p;
440 }
441#endif
442 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
443 pd = *lp;
444 if (!pd) {
445 int i;
446 /* allocate if not found */
447 if (!alloc)
448 return NULL;
449 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
450 *lp = pd;
451 for (i = 0; i < L2_SIZE; i++)
452 pd[i].phys_offset = IO_MEM_UNASSIGNED;
453 }
454#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
455 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
456 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
457 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
458 return pd;
459#else
460 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
461#endif
462}
463
464#ifndef VBOX
465static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
466#else
467DECLINLINE(PhysPageDesc *) phys_page_find(target_phys_addr_t index)
468#endif
469{
470 return phys_page_find_alloc(index, 0);
471}
472
473#if !defined(CONFIG_USER_ONLY)
474static void tlb_protect_code(ram_addr_t ram_addr);
475static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
476 target_ulong vaddr);
477#define mmap_lock() do { } while(0)
478#define mmap_unlock() do { } while(0)
479#endif
480
481#ifdef VBOX
482/*
483 * We don't need such huge codegen buffer size, as execute most of the code
484 * in raw or hwacc mode
485 */
486#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
487#else
488#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
489#endif
490
491#if defined(CONFIG_USER_ONLY)
492/* Currently it is not recommanded to allocate big chunks of data in
493 user mode. It will change when a dedicated libc will be used */
494#define USE_STATIC_CODE_GEN_BUFFER
495#endif
496
497/* VBox allocates codegen buffer dynamically */
498#ifndef VBOX
499#ifdef USE_STATIC_CODE_GEN_BUFFER
500static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
501#endif
502#endif
503
504static void code_gen_alloc(unsigned long tb_size)
505{
506#ifdef USE_STATIC_CODE_GEN_BUFFER
507 code_gen_buffer = static_code_gen_buffer;
508 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
509 map_exec(code_gen_buffer, code_gen_buffer_size);
510#else
511#ifdef VBOX
512 /* We cannot use phys_ram_size here, as it's 0 now,
513 * it only gets initialized once RAM registration callback
514 * (REMR3NotifyPhysRamRegister()) called.
515 */
516 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
517#else
518 code_gen_buffer_size = tb_size;
519 if (code_gen_buffer_size == 0) {
520#if defined(CONFIG_USER_ONLY)
521 /* in user mode, phys_ram_size is not meaningful */
522 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
523#else
524 /* XXX: needs ajustments */
525 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
526#endif
527
528 }
529 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
530 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
531#endif /* VBOX */
532
533 /* The code gen buffer location may have constraints depending on
534 the host cpu and OS */
535#ifdef VBOX
536 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
537
538 if (!code_gen_buffer) {
539 LogRel(("REM: failed allocate codegen buffer %lld\n",
540 code_gen_buffer_size));
541 return;
542 }
543#else //!VBOX
544#if defined(__linux__)
545 {
546 int flags;
547 void *start = NULL;
548
549 flags = MAP_PRIVATE | MAP_ANONYMOUS;
550#if defined(__x86_64__)
551 flags |= MAP_32BIT;
552 /* Cannot map more than that */
553 if (code_gen_buffer_size > (800 * 1024 * 1024))
554 code_gen_buffer_size = (800 * 1024 * 1024);
555#elif defined(__sparc_v9__)
556 // Map the buffer below 2G, so we can use direct calls and branches
557 flags |= MAP_FIXED;
558 start = (void *) 0x60000000UL;
559 if (code_gen_buffer_size > (512 * 1024 * 1024))
560 code_gen_buffer_size = (512 * 1024 * 1024);
561#endif
562 code_gen_buffer = mmap(start, code_gen_buffer_size,
563 PROT_WRITE | PROT_READ | PROT_EXEC,
564 flags, -1, 0);
565 if (code_gen_buffer == MAP_FAILED) {
566 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
567 exit(1);
568 }
569 }
570#elif defined(__FreeBSD__)
571 {
572 int flags;
573 void *addr = NULL;
574 flags = MAP_PRIVATE | MAP_ANONYMOUS;
575#if defined(__x86_64__)
576 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
577 * 0x40000000 is free */
578 flags |= MAP_FIXED;
579 addr = (void *)0x40000000;
580 /* Cannot map more than that */
581 if (code_gen_buffer_size > (800 * 1024 * 1024))
582 code_gen_buffer_size = (800 * 1024 * 1024);
583#endif
584 code_gen_buffer = mmap(addr, code_gen_buffer_size,
585 PROT_WRITE | PROT_READ | PROT_EXEC,
586 flags, -1, 0);
587 if (code_gen_buffer == MAP_FAILED) {
588 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
589 exit(1);
590 }
591 }
592#else
593 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
594 if (!code_gen_buffer) {
595 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
596 exit(1);
597 }
598 map_exec(code_gen_buffer, code_gen_buffer_size);
599#endif
600 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
601#endif /* !VBOX */
602#endif /* !USE_STATIC_CODE_GEN_BUFFER */
603#ifndef VBOX
604 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
605#else
606 map_exec(code_gen_prologue, _1K);
607#endif
608
609 code_gen_buffer_max_size = code_gen_buffer_size -
610 code_gen_max_block_size();
611 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
612 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
613}
614
615/* Must be called before using the QEMU cpus. 'tb_size' is the size
616 (in bytes) allocated to the translation buffer. Zero means default
617 size. */
618void cpu_exec_init_all(unsigned long tb_size)
619{
620 cpu_gen_init();
621 code_gen_alloc(tb_size);
622 code_gen_ptr = code_gen_buffer;
623 page_init();
624#if !defined(CONFIG_USER_ONLY)
625 io_mem_init();
626#endif
627}
628
629#ifndef VBOX
630#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
631
632#define CPU_COMMON_SAVE_VERSION 1
633
634static void cpu_common_save(QEMUFile *f, void *opaque)
635{
636 CPUState *env = opaque;
637
638 qemu_put_be32s(f, &env->halted);
639 qemu_put_be32s(f, &env->interrupt_request);
640}
641
642static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
643{
644 CPUState *env = opaque;
645
646 if (version_id != CPU_COMMON_SAVE_VERSION)
647 return -EINVAL;
648
649 qemu_get_be32s(f, &env->halted);
650 qemu_get_be32s(f, &env->interrupt_request);
651 tlb_flush(env, 1);
652
653 return 0;
654}
655#endif
656#endif //!VBOX
657
658void cpu_exec_init(CPUState *env)
659{
660 CPUState **penv;
661 int cpu_index;
662
663 env->next_cpu = NULL;
664 penv = &first_cpu;
665 cpu_index = 0;
666 while (*penv != NULL) {
667 penv = (CPUState **)&(*penv)->next_cpu;
668 cpu_index++;
669 }
670 env->cpu_index = cpu_index;
671 env->nb_watchpoints = 0;
672 *penv = env;
673#ifndef VBOX
674#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
675 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
676 cpu_common_save, cpu_common_load, env);
677 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
678 cpu_save, cpu_load, env);
679#endif
680#endif // !VBOX
681}
682
683#ifndef VBOX
684static inline void invalidate_page_bitmap(PageDesc *p)
685#else
686DECLINLINE(void) invalidate_page_bitmap(PageDesc *p)
687#endif
688{
689 if (p->code_bitmap) {
690 qemu_free(p->code_bitmap);
691 p->code_bitmap = NULL;
692 }
693 p->code_write_count = 0;
694}
695
696/* set to NULL all the 'first_tb' fields in all PageDescs */
697static void page_flush_tb(void)
698{
699 int i, j;
700 PageDesc *p;
701
702 for(i = 0; i < L1_SIZE; i++) {
703 p = l1_map[i];
704 if (p) {
705 for(j = 0; j < L2_SIZE; j++) {
706 p->first_tb = NULL;
707 invalidate_page_bitmap(p);
708 p++;
709 }
710 }
711 }
712}
713
714/* flush all the translation blocks */
715/* XXX: tb_flush is currently not thread safe */
716void tb_flush(CPUState *env1)
717{
718 CPUState *env;
719#if defined(DEBUG_FLUSH)
720 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
721 (unsigned long)(code_gen_ptr - code_gen_buffer),
722 nb_tbs, nb_tbs > 0 ?
723 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
724#endif
725 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
726 cpu_abort(env1, "Internal error: code buffer overflow\n");
727
728 nb_tbs = 0;
729
730 for(env = first_cpu; env != NULL; env = env->next_cpu) {
731 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
732 }
733
734 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
735 page_flush_tb();
736
737 code_gen_ptr = code_gen_buffer;
738 /* XXX: flush processor icache at this point if cache flush is
739 expensive */
740 tb_flush_count++;
741}
742
743#ifdef DEBUG_TB_CHECK
744static void tb_invalidate_check(target_ulong address)
745{
746 TranslationBlock *tb;
747 int i;
748 address &= TARGET_PAGE_MASK;
749 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
750 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
751 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
752 address >= tb->pc + tb->size)) {
753 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
754 address, (long)tb->pc, tb->size);
755 }
756 }
757 }
758}
759
760/* verify that all the pages have correct rights for code */
761static void tb_page_check(void)
762{
763 TranslationBlock *tb;
764 int i, flags1, flags2;
765
766 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
767 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
768 flags1 = page_get_flags(tb->pc);
769 flags2 = page_get_flags(tb->pc + tb->size - 1);
770 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
771 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
772 (long)tb->pc, tb->size, flags1, flags2);
773 }
774 }
775 }
776}
777
778static void tb_jmp_check(TranslationBlock *tb)
779{
780 TranslationBlock *tb1;
781 unsigned int n1;
782
783 /* suppress any remaining jumps to this TB */
784 tb1 = tb->jmp_first;
785 for(;;) {
786 n1 = (long)tb1 & 3;
787 tb1 = (TranslationBlock *)((long)tb1 & ~3);
788 if (n1 == 2)
789 break;
790 tb1 = tb1->jmp_next[n1];
791 }
792 /* check end of list */
793 if (tb1 != tb) {
794 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
795 }
796}
797#endif // DEBUG_TB_CHECK
798
799/* invalidate one TB */
800#ifndef VBOX
801static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
802 int next_offset)
803#else
804DECLINLINE(void) tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
805 int next_offset)
806#endif
807{
808 TranslationBlock *tb1;
809 for(;;) {
810 tb1 = *ptb;
811 if (tb1 == tb) {
812 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
813 break;
814 }
815 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
816 }
817}
818
819#ifndef VBOX
820static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
821#else
822DECLINLINE(void) tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
823#endif
824{
825 TranslationBlock *tb1;
826 unsigned int n1;
827
828 for(;;) {
829 tb1 = *ptb;
830 n1 = (long)tb1 & 3;
831 tb1 = (TranslationBlock *)((long)tb1 & ~3);
832 if (tb1 == tb) {
833 *ptb = tb1->page_next[n1];
834 break;
835 }
836 ptb = &tb1->page_next[n1];
837 }
838}
839
840#ifndef VBOX
841static inline void tb_jmp_remove(TranslationBlock *tb, int n)
842#else
843DECLINLINE(void) tb_jmp_remove(TranslationBlock *tb, int n)
844#endif
845{
846 TranslationBlock *tb1, **ptb;
847 unsigned int n1;
848
849 ptb = &tb->jmp_next[n];
850 tb1 = *ptb;
851 if (tb1) {
852 /* find tb(n) in circular list */
853 for(;;) {
854 tb1 = *ptb;
855 n1 = (long)tb1 & 3;
856 tb1 = (TranslationBlock *)((long)tb1 & ~3);
857 if (n1 == n && tb1 == tb)
858 break;
859 if (n1 == 2) {
860 ptb = &tb1->jmp_first;
861 } else {
862 ptb = &tb1->jmp_next[n1];
863 }
864 }
865 /* now we can suppress tb(n) from the list */
866 *ptb = tb->jmp_next[n];
867
868 tb->jmp_next[n] = NULL;
869 }
870}
871
872/* reset the jump entry 'n' of a TB so that it is not chained to
873 another TB */
874#ifndef VBOX
875static inline void tb_reset_jump(TranslationBlock *tb, int n)
876#else
877DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
878#endif
879{
880 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
881}
882
883void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
884{
885 CPUState *env;
886 PageDesc *p;
887 unsigned int h, n1;
888 target_phys_addr_t phys_pc;
889 TranslationBlock *tb1, *tb2;
890
891 /* remove the TB from the hash list */
892 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
893 h = tb_phys_hash_func(phys_pc);
894 tb_remove(&tb_phys_hash[h], tb,
895 offsetof(TranslationBlock, phys_hash_next));
896
897 /* remove the TB from the page list */
898 if (tb->page_addr[0] != page_addr) {
899 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
900 tb_page_remove(&p->first_tb, tb);
901 invalidate_page_bitmap(p);
902 }
903 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
904 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
905 tb_page_remove(&p->first_tb, tb);
906 invalidate_page_bitmap(p);
907 }
908
909 tb_invalidated_flag = 1;
910
911 /* remove the TB from the hash list */
912 h = tb_jmp_cache_hash_func(tb->pc);
913 for(env = first_cpu; env != NULL; env = env->next_cpu) {
914 if (env->tb_jmp_cache[h] == tb)
915 env->tb_jmp_cache[h] = NULL;
916 }
917
918 /* suppress this TB from the two jump lists */
919 tb_jmp_remove(tb, 0);
920 tb_jmp_remove(tb, 1);
921
922 /* suppress any remaining jumps to this TB */
923 tb1 = tb->jmp_first;
924 for(;;) {
925 n1 = (long)tb1 & 3;
926 if (n1 == 2)
927 break;
928 tb1 = (TranslationBlock *)((long)tb1 & ~3);
929 tb2 = tb1->jmp_next[n1];
930 tb_reset_jump(tb1, n1);
931 tb1->jmp_next[n1] = NULL;
932 tb1 = tb2;
933 }
934 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
935
936#ifndef VBOX
937 tb_phys_invalidate_count++;
938#endif
939}
940
941
942#ifdef VBOX
943void tb_invalidate_virt(CPUState *env, uint32_t eip)
944{
945# if 1
946 tb_flush(env);
947# else
948 uint8_t *cs_base, *pc;
949 unsigned int flags, h, phys_pc;
950 TranslationBlock *tb, **ptb;
951
952 flags = env->hflags;
953 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
954 cs_base = env->segs[R_CS].base;
955 pc = cs_base + eip;
956
957 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
958 flags);
959
960 if(tb)
961 {
962# ifdef DEBUG
963 printf("invalidating TB (%08X) at %08X\n", tb, eip);
964# endif
965 tb_invalidate(tb);
966 //Note: this will leak TBs, but the whole cache will be flushed
967 // when it happens too often
968 tb->pc = 0;
969 tb->cs_base = 0;
970 tb->flags = 0;
971 }
972# endif
973}
974
975# ifdef VBOX_STRICT
976/**
977 * Gets the page offset.
978 */
979unsigned long get_phys_page_offset(target_ulong addr)
980{
981 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
982 return p ? p->phys_offset : 0;
983}
984# endif /* VBOX_STRICT */
985#endif /* VBOX */
986
987#ifndef VBOX
988static inline void set_bits(uint8_t *tab, int start, int len)
989#else
990DECLINLINE(void) set_bits(uint8_t *tab, int start, int len)
991#endif
992{
993 int end, mask, end1;
994
995 end = start + len;
996 tab += start >> 3;
997 mask = 0xff << (start & 7);
998 if ((start & ~7) == (end & ~7)) {
999 if (start < end) {
1000 mask &= ~(0xff << (end & 7));
1001 *tab |= mask;
1002 }
1003 } else {
1004 *tab++ |= mask;
1005 start = (start + 8) & ~7;
1006 end1 = end & ~7;
1007 while (start < end1) {
1008 *tab++ = 0xff;
1009 start += 8;
1010 }
1011 if (start < end) {
1012 mask = ~(0xff << (end & 7));
1013 *tab |= mask;
1014 }
1015 }
1016}
1017
1018static void build_page_bitmap(PageDesc *p)
1019{
1020 int n, tb_start, tb_end;
1021 TranslationBlock *tb;
1022
1023 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
1024 if (!p->code_bitmap)
1025 return;
1026 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
1027
1028 tb = p->first_tb;
1029 while (tb != NULL) {
1030 n = (long)tb & 3;
1031 tb = (TranslationBlock *)((long)tb & ~3);
1032 /* NOTE: this is subtle as a TB may span two physical pages */
1033 if (n == 0) {
1034 /* NOTE: tb_end may be after the end of the page, but
1035 it is not a problem */
1036 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1037 tb_end = tb_start + tb->size;
1038 if (tb_end > TARGET_PAGE_SIZE)
1039 tb_end = TARGET_PAGE_SIZE;
1040 } else {
1041 tb_start = 0;
1042 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1043 }
1044 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1045 tb = tb->page_next[n];
1046 }
1047}
1048
1049TranslationBlock *tb_gen_code(CPUState *env,
1050 target_ulong pc, target_ulong cs_base,
1051 int flags, int cflags)
1052{
1053 TranslationBlock *tb;
1054 uint8_t *tc_ptr;
1055 target_ulong phys_pc, phys_page2, virt_page2;
1056 int code_gen_size;
1057
1058 phys_pc = get_phys_addr_code(env, pc);
1059 tb = tb_alloc(pc);
1060 if (!tb) {
1061 /* flush must be done */
1062 tb_flush(env);
1063 /* cannot fail at this point */
1064 tb = tb_alloc(pc);
1065 /* Don't forget to invalidate previous TB info. */
1066 tb_invalidated_flag = 1;
1067 }
1068 tc_ptr = code_gen_ptr;
1069 tb->tc_ptr = tc_ptr;
1070 tb->cs_base = cs_base;
1071 tb->flags = flags;
1072 tb->cflags = cflags;
1073 cpu_gen_code(env, tb, &code_gen_size);
1074 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1075
1076 /* check next page if needed */
1077 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1078 phys_page2 = -1;
1079 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1080 phys_page2 = get_phys_addr_code(env, virt_page2);
1081 }
1082 tb_link_phys(tb, phys_pc, phys_page2);
1083 return tb;
1084}
1085
1086/* invalidate all TBs which intersect with the target physical page
1087 starting in range [start;end[. NOTE: start and end must refer to
1088 the same physical page. 'is_cpu_write_access' should be true if called
1089 from a real cpu write access: the virtual CPU will exit the current
1090 TB if code is modified inside this TB. */
1091void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1092 int is_cpu_write_access)
1093{
1094 int n, current_tb_modified, current_tb_not_found, current_flags;
1095 CPUState *env = cpu_single_env;
1096 PageDesc *p;
1097 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1098 target_ulong tb_start, tb_end;
1099 target_ulong current_pc, current_cs_base;
1100
1101 p = page_find(start >> TARGET_PAGE_BITS);
1102 if (!p)
1103 return;
1104 if (!p->code_bitmap &&
1105 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1106 is_cpu_write_access) {
1107 /* build code bitmap */
1108 build_page_bitmap(p);
1109 }
1110
1111 /* we remove all the TBs in the range [start, end[ */
1112 /* XXX: see if in some cases it could be faster to invalidate all the code */
1113 current_tb_not_found = is_cpu_write_access;
1114 current_tb_modified = 0;
1115 current_tb = NULL; /* avoid warning */
1116 current_pc = 0; /* avoid warning */
1117 current_cs_base = 0; /* avoid warning */
1118 current_flags = 0; /* avoid warning */
1119 tb = p->first_tb;
1120 while (tb != NULL) {
1121 n = (long)tb & 3;
1122 tb = (TranslationBlock *)((long)tb & ~3);
1123 tb_next = tb->page_next[n];
1124 /* NOTE: this is subtle as a TB may span two physical pages */
1125 if (n == 0) {
1126 /* NOTE: tb_end may be after the end of the page, but
1127 it is not a problem */
1128 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1129 tb_end = tb_start + tb->size;
1130 } else {
1131 tb_start = tb->page_addr[1];
1132 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1133 }
1134 if (!(tb_end <= start || tb_start >= end)) {
1135#ifdef TARGET_HAS_PRECISE_SMC
1136 if (current_tb_not_found) {
1137 current_tb_not_found = 0;
1138 current_tb = NULL;
1139 if (env->mem_io_pc) {
1140 /* now we have a real cpu fault */
1141 current_tb = tb_find_pc(env->mem_io_pc);
1142 }
1143 }
1144 if (current_tb == tb &&
1145 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1146 /* If we are modifying the current TB, we must stop
1147 its execution. We could be more precise by checking
1148 that the modification is after the current PC, but it
1149 would require a specialized function to partially
1150 restore the CPU state */
1151
1152 current_tb_modified = 1;
1153 cpu_restore_state(current_tb, env,
1154 env->mem_io_pc, NULL);
1155#if defined(TARGET_I386)
1156 current_flags = env->hflags;
1157 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1158 current_cs_base = (target_ulong)env->segs[R_CS].base;
1159 current_pc = current_cs_base + env->eip;
1160#else
1161#error unsupported CPU
1162#endif
1163 }
1164#endif /* TARGET_HAS_PRECISE_SMC */
1165 /* we need to do that to handle the case where a signal
1166 occurs while doing tb_phys_invalidate() */
1167 saved_tb = NULL;
1168 if (env) {
1169 saved_tb = env->current_tb;
1170 env->current_tb = NULL;
1171 }
1172 tb_phys_invalidate(tb, -1);
1173 if (env) {
1174 env->current_tb = saved_tb;
1175 if (env->interrupt_request && env->current_tb)
1176 cpu_interrupt(env, env->interrupt_request);
1177 }
1178 }
1179 tb = tb_next;
1180 }
1181#if !defined(CONFIG_USER_ONLY)
1182 /* if no code remaining, no need to continue to use slow writes */
1183 if (!p->first_tb) {
1184 invalidate_page_bitmap(p);
1185 if (is_cpu_write_access) {
1186 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1187 }
1188 }
1189#endif
1190#ifdef TARGET_HAS_PRECISE_SMC
1191 if (current_tb_modified) {
1192 /* we generate a block containing just the instruction
1193 modifying the memory. It will ensure that it cannot modify
1194 itself */
1195 env->current_tb = NULL;
1196 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1197 cpu_resume_from_signal(env, NULL);
1198 }
1199#endif
1200}
1201
1202
1203/* len must be <= 8 and start must be a multiple of len */
1204#ifndef VBOX
1205static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1206#else
1207DECLINLINE(void) tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1208#endif
1209{
1210 PageDesc *p;
1211 int offset, b;
1212#if 0
1213 if (1) {
1214 if (loglevel) {
1215 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1216 cpu_single_env->mem_io_vaddr, len,
1217 cpu_single_env->eip,
1218 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1219 }
1220 }
1221#endif
1222 p = page_find(start >> TARGET_PAGE_BITS);
1223 if (!p)
1224 return;
1225 if (p->code_bitmap) {
1226 offset = start & ~TARGET_PAGE_MASK;
1227 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1228 if (b & ((1 << len) - 1))
1229 goto do_invalidate;
1230 } else {
1231 do_invalidate:
1232 tb_invalidate_phys_page_range(start, start + len, 1);
1233 }
1234}
1235
1236
1237#if !defined(CONFIG_SOFTMMU)
1238static void tb_invalidate_phys_page(target_phys_addr_t addr,
1239 unsigned long pc, void *puc)
1240{
1241 int n, current_flags, current_tb_modified;
1242 target_ulong current_pc, current_cs_base;
1243 PageDesc *p;
1244 TranslationBlock *tb, *current_tb;
1245#ifdef TARGET_HAS_PRECISE_SMC
1246 CPUState *env = cpu_single_env;
1247#endif
1248
1249 addr &= TARGET_PAGE_MASK;
1250 p = page_find(addr >> TARGET_PAGE_BITS);
1251 if (!p)
1252 return;
1253 tb = p->first_tb;
1254 current_tb_modified = 0;
1255 current_tb = NULL;
1256 current_pc = 0; /* avoid warning */
1257 current_cs_base = 0; /* avoid warning */
1258 current_flags = 0; /* avoid warning */
1259#ifdef TARGET_HAS_PRECISE_SMC
1260 if (tb && pc != 0) {
1261 current_tb = tb_find_pc(pc);
1262 }
1263#endif
1264 while (tb != NULL) {
1265 n = (long)tb & 3;
1266 tb = (TranslationBlock *)((long)tb & ~3);
1267#ifdef TARGET_HAS_PRECISE_SMC
1268 if (current_tb == tb &&
1269 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1270 /* If we are modifying the current TB, we must stop
1271 its execution. We could be more precise by checking
1272 that the modification is after the current PC, but it
1273 would require a specialized function to partially
1274 restore the CPU state */
1275
1276 current_tb_modified = 1;
1277 cpu_restore_state(current_tb, env, pc, puc);
1278#if defined(TARGET_I386)
1279 current_flags = env->hflags;
1280 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1281 current_cs_base = (target_ulong)env->segs[R_CS].base;
1282 current_pc = current_cs_base + env->eip;
1283#else
1284#error unsupported CPU
1285#endif
1286 }
1287#endif /* TARGET_HAS_PRECISE_SMC */
1288 tb_phys_invalidate(tb, addr);
1289 tb = tb->page_next[n];
1290 }
1291 p->first_tb = NULL;
1292#ifdef TARGET_HAS_PRECISE_SMC
1293 if (current_tb_modified) {
1294 /* we generate a block containing just the instruction
1295 modifying the memory. It will ensure that it cannot modify
1296 itself */
1297 env->current_tb = NULL;
1298 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1299 cpu_resume_from_signal(env, puc);
1300 }
1301#endif
1302}
1303#endif
1304
1305/* add the tb in the target page and protect it if necessary */
1306#ifndef VBOX
1307static inline void tb_alloc_page(TranslationBlock *tb,
1308 unsigned int n, target_ulong page_addr)
1309#else
1310DECLINLINE(void) tb_alloc_page(TranslationBlock *tb,
1311 unsigned int n, target_ulong page_addr)
1312#endif
1313{
1314 PageDesc *p;
1315 TranslationBlock *last_first_tb;
1316
1317 tb->page_addr[n] = page_addr;
1318 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1319 tb->page_next[n] = p->first_tb;
1320 last_first_tb = p->first_tb;
1321 p->first_tb = (TranslationBlock *)((long)tb | n);
1322 invalidate_page_bitmap(p);
1323
1324#if defined(TARGET_HAS_SMC) || 1
1325
1326#if defined(CONFIG_USER_ONLY)
1327 if (p->flags & PAGE_WRITE) {
1328 target_ulong addr;
1329 PageDesc *p2;
1330 int prot;
1331
1332 /* force the host page as non writable (writes will have a
1333 page fault + mprotect overhead) */
1334 page_addr &= qemu_host_page_mask;
1335 prot = 0;
1336 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1337 addr += TARGET_PAGE_SIZE) {
1338
1339 p2 = page_find (addr >> TARGET_PAGE_BITS);
1340 if (!p2)
1341 continue;
1342 prot |= p2->flags;
1343 p2->flags &= ~PAGE_WRITE;
1344 page_get_flags(addr);
1345 }
1346 mprotect(g2h(page_addr), qemu_host_page_size,
1347 (prot & PAGE_BITS) & ~PAGE_WRITE);
1348#ifdef DEBUG_TB_INVALIDATE
1349 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1350 page_addr);
1351#endif
1352 }
1353#else
1354 /* if some code is already present, then the pages are already
1355 protected. So we handle the case where only the first TB is
1356 allocated in a physical page */
1357 if (!last_first_tb) {
1358 tlb_protect_code(page_addr);
1359 }
1360#endif
1361
1362#endif /* TARGET_HAS_SMC */
1363}
1364
1365/* Allocate a new translation block. Flush the translation buffer if
1366 too many translation blocks or too much generated code. */
1367TranslationBlock *tb_alloc(target_ulong pc)
1368{
1369 TranslationBlock *tb;
1370
1371 if (nb_tbs >= code_gen_max_blocks ||
1372#ifndef VBOX
1373 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1374#else
1375 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1376#endif
1377 return NULL;
1378 tb = &tbs[nb_tbs++];
1379 tb->pc = pc;
1380 tb->cflags = 0;
1381 return tb;
1382}
1383
1384void tb_free(TranslationBlock *tb)
1385{
1386 /* In practice this is mostly used for single use temporary TB
1387 Ignore the hard cases and just back up if this TB happens to
1388 be the last one generated. */
1389 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1390 code_gen_ptr = tb->tc_ptr;
1391 nb_tbs--;
1392 }
1393}
1394
1395/* add a new TB and link it to the physical page tables. phys_page2 is
1396 (-1) to indicate that only one page contains the TB. */
1397void tb_link_phys(TranslationBlock *tb,
1398 target_ulong phys_pc, target_ulong phys_page2)
1399{
1400 unsigned int h;
1401 TranslationBlock **ptb;
1402
1403 /* Grab the mmap lock to stop another thread invalidating this TB
1404 before we are done. */
1405 mmap_lock();
1406 /* add in the physical hash table */
1407 h = tb_phys_hash_func(phys_pc);
1408 ptb = &tb_phys_hash[h];
1409 tb->phys_hash_next = *ptb;
1410 *ptb = tb;
1411
1412 /* add in the page list */
1413 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1414 if (phys_page2 != -1)
1415 tb_alloc_page(tb, 1, phys_page2);
1416 else
1417 tb->page_addr[1] = -1;
1418
1419 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1420 tb->jmp_next[0] = NULL;
1421 tb->jmp_next[1] = NULL;
1422
1423 /* init original jump addresses */
1424 if (tb->tb_next_offset[0] != 0xffff)
1425 tb_reset_jump(tb, 0);
1426 if (tb->tb_next_offset[1] != 0xffff)
1427 tb_reset_jump(tb, 1);
1428
1429#ifdef DEBUG_TB_CHECK
1430 tb_page_check();
1431#endif
1432 mmap_unlock();
1433}
1434
1435/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1436 tb[1].tc_ptr. Return NULL if not found */
1437TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1438{
1439 int m_min, m_max, m;
1440 unsigned long v;
1441 TranslationBlock *tb;
1442
1443 if (nb_tbs <= 0)
1444 return NULL;
1445 if (tc_ptr < (unsigned long)code_gen_buffer ||
1446 tc_ptr >= (unsigned long)code_gen_ptr)
1447 return NULL;
1448 /* binary search (cf Knuth) */
1449 m_min = 0;
1450 m_max = nb_tbs - 1;
1451 while (m_min <= m_max) {
1452 m = (m_min + m_max) >> 1;
1453 tb = &tbs[m];
1454 v = (unsigned long)tb->tc_ptr;
1455 if (v == tc_ptr)
1456 return tb;
1457 else if (tc_ptr < v) {
1458 m_max = m - 1;
1459 } else {
1460 m_min = m + 1;
1461 }
1462 }
1463 return &tbs[m_max];
1464}
1465
1466static void tb_reset_jump_recursive(TranslationBlock *tb);
1467
1468#ifndef VBOX
1469static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1470#else
1471DECLINLINE(void) tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1472#endif
1473{
1474 TranslationBlock *tb1, *tb_next, **ptb;
1475 unsigned int n1;
1476
1477 tb1 = tb->jmp_next[n];
1478 if (tb1 != NULL) {
1479 /* find head of list */
1480 for(;;) {
1481 n1 = (long)tb1 & 3;
1482 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1483 if (n1 == 2)
1484 break;
1485 tb1 = tb1->jmp_next[n1];
1486 }
1487 /* we are now sure now that tb jumps to tb1 */
1488 tb_next = tb1;
1489
1490 /* remove tb from the jmp_first list */
1491 ptb = &tb_next->jmp_first;
1492 for(;;) {
1493 tb1 = *ptb;
1494 n1 = (long)tb1 & 3;
1495 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1496 if (n1 == n && tb1 == tb)
1497 break;
1498 ptb = &tb1->jmp_next[n1];
1499 }
1500 *ptb = tb->jmp_next[n];
1501 tb->jmp_next[n] = NULL;
1502
1503 /* suppress the jump to next tb in generated code */
1504 tb_reset_jump(tb, n);
1505
1506 /* suppress jumps in the tb on which we could have jumped */
1507 tb_reset_jump_recursive(tb_next);
1508 }
1509}
1510
1511static void tb_reset_jump_recursive(TranslationBlock *tb)
1512{
1513 tb_reset_jump_recursive2(tb, 0);
1514 tb_reset_jump_recursive2(tb, 1);
1515}
1516
1517#if defined(TARGET_HAS_ICE)
1518static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1519{
1520 target_ulong addr, pd;
1521 ram_addr_t ram_addr;
1522 PhysPageDesc *p;
1523
1524 addr = cpu_get_phys_page_debug(env, pc);
1525 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1526 if (!p) {
1527 pd = IO_MEM_UNASSIGNED;
1528 } else {
1529 pd = p->phys_offset;
1530 }
1531 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1532 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1533}
1534#endif
1535
1536/* Add a watchpoint. */
1537int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1538{
1539 int i;
1540
1541 for (i = 0; i < env->nb_watchpoints; i++) {
1542 if (addr == env->watchpoint[i].vaddr)
1543 return 0;
1544 }
1545 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1546 return -1;
1547
1548 i = env->nb_watchpoints++;
1549 env->watchpoint[i].vaddr = addr;
1550 env->watchpoint[i].type = type;
1551 tlb_flush_page(env, addr);
1552 /* FIXME: This flush is needed because of the hack to make memory ops
1553 terminate the TB. It can be removed once the proper IO trap and
1554 re-execute bits are in. */
1555 tb_flush(env);
1556 return i;
1557}
1558
1559/* Remove a watchpoint. */
1560int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1561{
1562 int i;
1563
1564 for (i = 0; i < env->nb_watchpoints; i++) {
1565 if (addr == env->watchpoint[i].vaddr) {
1566 env->nb_watchpoints--;
1567 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1568 tlb_flush_page(env, addr);
1569 return 0;
1570 }
1571 }
1572 return -1;
1573}
1574
1575/* Remove all watchpoints. */
1576void cpu_watchpoint_remove_all(CPUState *env) {
1577 int i;
1578
1579 for (i = 0; i < env->nb_watchpoints; i++) {
1580 tlb_flush_page(env, env->watchpoint[i].vaddr);
1581 }
1582 env->nb_watchpoints = 0;
1583}
1584
1585/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1586 breakpoint is reached */
1587int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1588{
1589#if defined(TARGET_HAS_ICE)
1590 int i;
1591
1592 for(i = 0; i < env->nb_breakpoints; i++) {
1593 if (env->breakpoints[i] == pc)
1594 return 0;
1595 }
1596
1597 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1598 return -1;
1599 env->breakpoints[env->nb_breakpoints++] = pc;
1600
1601 breakpoint_invalidate(env, pc);
1602 return 0;
1603#else
1604 return -1;
1605#endif
1606}
1607
1608/* remove all breakpoints */
1609void cpu_breakpoint_remove_all(CPUState *env) {
1610#if defined(TARGET_HAS_ICE)
1611 int i;
1612 for(i = 0; i < env->nb_breakpoints; i++) {
1613 breakpoint_invalidate(env, env->breakpoints[i]);
1614 }
1615 env->nb_breakpoints = 0;
1616#endif
1617}
1618
1619/* remove a breakpoint */
1620int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1621{
1622#if defined(TARGET_HAS_ICE)
1623 int i;
1624 for(i = 0; i < env->nb_breakpoints; i++) {
1625 if (env->breakpoints[i] == pc)
1626 goto found;
1627 }
1628 return -1;
1629 found:
1630 env->nb_breakpoints--;
1631 if (i < env->nb_breakpoints)
1632 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1633
1634 breakpoint_invalidate(env, pc);
1635 return 0;
1636#else
1637 return -1;
1638#endif
1639}
1640
1641/* enable or disable single step mode. EXCP_DEBUG is returned by the
1642 CPU loop after each instruction */
1643void cpu_single_step(CPUState *env, int enabled)
1644{
1645#if defined(TARGET_HAS_ICE)
1646 if (env->singlestep_enabled != enabled) {
1647 env->singlestep_enabled = enabled;
1648 /* must flush all the translated code to avoid inconsistancies */
1649 /* XXX: only flush what is necessary */
1650 tb_flush(env);
1651 }
1652#endif
1653}
1654
1655#ifndef VBOX
1656/* enable or disable low levels log */
1657void cpu_set_log(int log_flags)
1658{
1659 loglevel = log_flags;
1660 if (loglevel && !logfile) {
1661 logfile = fopen(logfilename, "w");
1662 if (!logfile) {
1663 perror(logfilename);
1664 _exit(1);
1665 }
1666#if !defined(CONFIG_SOFTMMU)
1667 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1668 {
1669 static uint8_t logfile_buf[4096];
1670 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1671 }
1672#else
1673 setvbuf(logfile, NULL, _IOLBF, 0);
1674#endif
1675 }
1676}
1677
1678void cpu_set_log_filename(const char *filename)
1679{
1680 logfilename = strdup(filename);
1681}
1682#endif /* !VBOX */
1683
1684/* mask must never be zero, except for A20 change call */
1685void cpu_interrupt(CPUState *env, int mask)
1686{
1687#if !defined(USE_NPTL)
1688 TranslationBlock *tb;
1689 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1690#endif
1691 int old_mask;
1692
1693 old_mask = env->interrupt_request;
1694#ifdef VBOX
1695 VM_ASSERT_EMT(env->pVM);
1696 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1697#else /* !VBOX */
1698 /* FIXME: This is probably not threadsafe. A different thread could
1699 be in the middle of a read-modify-write operation. */
1700 env->interrupt_request |= mask;
1701#endif /* !VBOX */
1702#if defined(USE_NPTL)
1703 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1704 problem and hope the cpu will stop of its own accord. For userspace
1705 emulation this often isn't actually as bad as it sounds. Often
1706 signals are used primarily to interrupt blocking syscalls. */
1707#else
1708 if (use_icount) {
1709 env->icount_decr.u16.high = 0xffff;
1710#ifndef CONFIG_USER_ONLY
1711 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1712 an async event happened and we need to process it. */
1713 if (!can_do_io(env)
1714 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1715 cpu_abort(env, "Raised interrupt while not in I/O function");
1716 }
1717#endif
1718 } else {
1719 tb = env->current_tb;
1720 /* if the cpu is currently executing code, we must unlink it and
1721 all the potentially executing TB */
1722 if (tb && !testandset(&interrupt_lock)) {
1723 env->current_tb = NULL;
1724 tb_reset_jump_recursive(tb);
1725 resetlock(&interrupt_lock);
1726 }
1727 }
1728#endif
1729}
1730
1731void cpu_reset_interrupt(CPUState *env, int mask)
1732{
1733#ifdef VBOX
1734 /*
1735 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1736 * for future changes!
1737 */
1738 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1739#else /* !VBOX */
1740 env->interrupt_request &= ~mask;
1741#endif /* !VBOX */
1742}
1743
1744#ifndef VBOX
1745CPULogItem cpu_log_items[] = {
1746 { CPU_LOG_TB_OUT_ASM, "out_asm",
1747 "show generated host assembly code for each compiled TB" },
1748 { CPU_LOG_TB_IN_ASM, "in_asm",
1749 "show target assembly code for each compiled TB" },
1750 { CPU_LOG_TB_OP, "op",
1751 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1752#ifdef TARGET_I386
1753 { CPU_LOG_TB_OP_OPT, "op_opt",
1754 "show micro ops after optimization for each compiled TB" },
1755#endif
1756 { CPU_LOG_INT, "int",
1757 "show interrupts/exceptions in short format" },
1758 { CPU_LOG_EXEC, "exec",
1759 "show trace before each executed TB (lots of logs)" },
1760 { CPU_LOG_TB_CPU, "cpu",
1761 "show CPU state before bloc translation" },
1762#ifdef TARGET_I386
1763 { CPU_LOG_PCALL, "pcall",
1764 "show protected mode far calls/returns/exceptions" },
1765#endif
1766#ifdef DEBUG_IOPORT
1767 { CPU_LOG_IOPORT, "ioport",
1768 "show all i/o ports accesses" },
1769#endif
1770 { 0, NULL, NULL },
1771};
1772
1773static int cmp1(const char *s1, int n, const char *s2)
1774{
1775 if (strlen(s2) != n)
1776 return 0;
1777 return memcmp(s1, s2, n) == 0;
1778}
1779
1780/* takes a comma separated list of log masks. Return 0 if error. */
1781int cpu_str_to_log_mask(const char *str)
1782{
1783 CPULogItem *item;
1784 int mask;
1785 const char *p, *p1;
1786
1787 p = str;
1788 mask = 0;
1789 for(;;) {
1790 p1 = strchr(p, ',');
1791 if (!p1)
1792 p1 = p + strlen(p);
1793 if(cmp1(p,p1-p,"all")) {
1794 for(item = cpu_log_items; item->mask != 0; item++) {
1795 mask |= item->mask;
1796 }
1797 } else {
1798 for(item = cpu_log_items; item->mask != 0; item++) {
1799 if (cmp1(p, p1 - p, item->name))
1800 goto found;
1801 }
1802 return 0;
1803 }
1804 found:
1805 mask |= item->mask;
1806 if (*p1 != ',')
1807 break;
1808 p = p1 + 1;
1809 }
1810 return mask;
1811}
1812#endif /* !VBOX */
1813
1814#ifndef VBOX /* VBOX: we have our own routine. */
1815void cpu_abort(CPUState *env, const char *fmt, ...)
1816{
1817 va_list ap;
1818
1819 va_start(ap, fmt);
1820 fprintf(stderr, "qemu: fatal: ");
1821 vfprintf(stderr, fmt, ap);
1822 fprintf(stderr, "\n");
1823#ifdef TARGET_I386
1824 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1825#else
1826 cpu_dump_state(env, stderr, fprintf, 0);
1827#endif
1828 va_end(ap);
1829 abort();
1830}
1831#endif /* !VBOX */
1832
1833#ifndef VBOX
1834CPUState *cpu_copy(CPUState *env)
1835{
1836 CPUState *new_env = cpu_init(env->cpu_model_str);
1837 /* preserve chaining and index */
1838 CPUState *next_cpu = new_env->next_cpu;
1839 int cpu_index = new_env->cpu_index;
1840 memcpy(new_env, env, sizeof(CPUState));
1841 new_env->next_cpu = next_cpu;
1842 new_env->cpu_index = cpu_index;
1843 return new_env;
1844}
1845#endif
1846
1847#if !defined(CONFIG_USER_ONLY)
1848
1849#ifndef VBOX
1850static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1851#else
1852DECLINLINE(void) tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1853#endif
1854{
1855 unsigned int i;
1856
1857 /* Discard jump cache entries for any tb which might potentially
1858 overlap the flushed page. */
1859 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1860 memset (&env->tb_jmp_cache[i], 0,
1861 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1862
1863 i = tb_jmp_cache_hash_page(addr);
1864 memset (&env->tb_jmp_cache[i], 0,
1865 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1866
1867#ifdef VBOX
1868 /* inform raw mode about TLB page flush */
1869 remR3FlushPage(env, addr);
1870#endif /* VBOX */
1871}
1872
1873/* NOTE: if flush_global is true, also flush global entries (not
1874 implemented yet) */
1875void tlb_flush(CPUState *env, int flush_global)
1876{
1877 int i;
1878#if defined(DEBUG_TLB)
1879 printf("tlb_flush:\n");
1880#endif
1881 /* must reset current TB so that interrupts cannot modify the
1882 links while we are modifying them */
1883 env->current_tb = NULL;
1884
1885 for(i = 0; i < CPU_TLB_SIZE; i++) {
1886 env->tlb_table[0][i].addr_read = -1;
1887 env->tlb_table[0][i].addr_write = -1;
1888 env->tlb_table[0][i].addr_code = -1;
1889 env->tlb_table[1][i].addr_read = -1;
1890 env->tlb_table[1][i].addr_write = -1;
1891 env->tlb_table[1][i].addr_code = -1;
1892#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1893 env->phys_addends[0][i] = -1;
1894 env->phys_addends[1][i] = -1;
1895#endif
1896#if (NB_MMU_MODES >= 3)
1897 env->tlb_table[2][i].addr_read = -1;
1898 env->tlb_table[2][i].addr_write = -1;
1899 env->tlb_table[2][i].addr_code = -1;
1900#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1901 env->phys_addends[2][i] = -1;
1902#endif
1903#if (NB_MMU_MODES == 4)
1904 env->tlb_table[3][i].addr_read = -1;
1905 env->tlb_table[3][i].addr_write = -1;
1906 env->tlb_table[3][i].addr_code = -1;
1907#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1908 env->phys_addends[3][i] = -1;
1909#endif
1910#endif
1911#endif
1912 }
1913
1914 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1915
1916#ifdef VBOX
1917 /* inform raw mode about TLB flush */
1918 remR3FlushTLB(env, flush_global);
1919#endif
1920#ifdef USE_KQEMU
1921 if (env->kqemu_enabled) {
1922 kqemu_flush(env, flush_global);
1923 }
1924#endif
1925 tlb_flush_count++;
1926}
1927
1928#ifndef VBOX
1929static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1930#else
1931DECLINLINE(void) tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1932#endif
1933{
1934 if (addr == (tlb_entry->addr_read &
1935 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1936 addr == (tlb_entry->addr_write &
1937 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1938 addr == (tlb_entry->addr_code &
1939 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1940 tlb_entry->addr_read = -1;
1941 tlb_entry->addr_write = -1;
1942 tlb_entry->addr_code = -1;
1943 }
1944}
1945
1946void tlb_flush_page(CPUState *env, target_ulong addr)
1947{
1948 int i;
1949
1950#if defined(DEBUG_TLB)
1951 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1952#endif
1953 /* must reset current TB so that interrupts cannot modify the
1954 links while we are modifying them */
1955 env->current_tb = NULL;
1956
1957 addr &= TARGET_PAGE_MASK;
1958 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1959 tlb_flush_entry(&env->tlb_table[0][i], addr);
1960 tlb_flush_entry(&env->tlb_table[1][i], addr);
1961#if (NB_MMU_MODES >= 3)
1962 tlb_flush_entry(&env->tlb_table[2][i], addr);
1963#if (NB_MMU_MODES == 4)
1964 tlb_flush_entry(&env->tlb_table[3][i], addr);
1965#endif
1966#endif
1967
1968 tlb_flush_jmp_cache(env, addr);
1969
1970#ifdef USE_KQEMU
1971 if (env->kqemu_enabled) {
1972 kqemu_flush_page(env, addr);
1973 }
1974#endif
1975}
1976
1977/* update the TLBs so that writes to code in the virtual page 'addr'
1978 can be detected */
1979static void tlb_protect_code(ram_addr_t ram_addr)
1980{
1981 cpu_physical_memory_reset_dirty(ram_addr,
1982 ram_addr + TARGET_PAGE_SIZE,
1983 CODE_DIRTY_FLAG);
1984#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1985 /** @todo Retest this? This function has changed... */
1986 remR3ProtectCode(cpu_single_env, ram_addr);
1987#endif
1988}
1989
1990/* update the TLB so that writes in physical page 'phys_addr' are no longer
1991 tested for self modifying code */
1992static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1993 target_ulong vaddr)
1994{
1995#ifdef VBOX
1996 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1997#endif
1998 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1999}
2000
2001#ifndef VBOX
2002static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2003 unsigned long start, unsigned long length)
2004#else
2005DECLINLINE(void) tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2006 unsigned long start, unsigned long length)
2007#endif
2008{
2009 unsigned long addr;
2010
2011#ifdef VBOX
2012 if (start & 3)
2013 return;
2014#endif
2015 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2016 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2017 if ((addr - start) < length) {
2018 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
2019 }
2020 }
2021}
2022
2023void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2024 int dirty_flags)
2025{
2026 CPUState *env;
2027 unsigned long length, start1;
2028 int i, mask, len;
2029 uint8_t *p;
2030
2031 start &= TARGET_PAGE_MASK;
2032 end = TARGET_PAGE_ALIGN(end);
2033
2034 length = end - start;
2035 if (length == 0)
2036 return;
2037 len = length >> TARGET_PAGE_BITS;
2038#ifdef USE_KQEMU
2039 /* XXX: should not depend on cpu context */
2040 env = first_cpu;
2041 if (env->kqemu_enabled) {
2042 ram_addr_t addr;
2043 addr = start;
2044 for(i = 0; i < len; i++) {
2045 kqemu_set_notdirty(env, addr);
2046 addr += TARGET_PAGE_SIZE;
2047 }
2048 }
2049#endif
2050 mask = ~dirty_flags;
2051 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2052#ifdef VBOX
2053 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2054#endif
2055 for(i = 0; i < len; i++)
2056 p[i] &= mask;
2057
2058 /* we modify the TLB cache so that the dirty bit will be set again
2059 when accessing the range */
2060#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2061 start1 = start;
2062#elif !defined(VBOX)
2063 start1 = start + (unsigned long)phys_ram_base;
2064#else
2065 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo this can be harmful with VBOX_WITH_NEW_PHYS_CODE, fix interface/whatever. */
2066#endif
2067 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2068 for(i = 0; i < CPU_TLB_SIZE; i++)
2069 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2070 for(i = 0; i < CPU_TLB_SIZE; i++)
2071 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2072#if (NB_MMU_MODES >= 3)
2073 for(i = 0; i < CPU_TLB_SIZE; i++)
2074 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2075#if (NB_MMU_MODES == 4)
2076 for(i = 0; i < CPU_TLB_SIZE; i++)
2077 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2078#endif
2079#endif
2080 }
2081}
2082
2083#ifndef VBOX
2084int cpu_physical_memory_set_dirty_tracking(int enable)
2085{
2086 in_migration = enable;
2087 return 0;
2088}
2089
2090int cpu_physical_memory_get_dirty_tracking(void)
2091{
2092 return in_migration;
2093}
2094#endif
2095
2096#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2097DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2098#else
2099static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2100#endif
2101{
2102 ram_addr_t ram_addr;
2103
2104 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2105 /* RAM case */
2106#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2107 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2108#elif !defined(VBOX)
2109 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2110 tlb_entry->addend - (unsigned long)phys_ram_base;
2111#else
2112 Assert(phys_addend != -1);
2113 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2114#endif
2115 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2116 tlb_entry->addr_write |= TLB_NOTDIRTY;
2117 }
2118 }
2119}
2120
2121/* update the TLB according to the current state of the dirty bits */
2122void cpu_tlb_update_dirty(CPUState *env)
2123{
2124 int i;
2125#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2126 for(i = 0; i < CPU_TLB_SIZE; i++)
2127 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
2128 for(i = 0; i < CPU_TLB_SIZE; i++)
2129 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
2130#if (NB_MMU_MODES >= 3)
2131 for(i = 0; i < CPU_TLB_SIZE; i++)
2132 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
2133#if (NB_MMU_MODES == 4)
2134 for(i = 0; i < CPU_TLB_SIZE; i++)
2135 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
2136#endif
2137#endif
2138#else /* VBOX */
2139 for(i = 0; i < CPU_TLB_SIZE; i++)
2140 tlb_update_dirty(&env->tlb_table[0][i]);
2141 for(i = 0; i < CPU_TLB_SIZE; i++)
2142 tlb_update_dirty(&env->tlb_table[1][i]);
2143#if (NB_MMU_MODES >= 3)
2144 for(i = 0; i < CPU_TLB_SIZE; i++)
2145 tlb_update_dirty(&env->tlb_table[2][i]);
2146#if (NB_MMU_MODES == 4)
2147 for(i = 0; i < CPU_TLB_SIZE; i++)
2148 tlb_update_dirty(&env->tlb_table[3][i]);
2149#endif
2150#endif
2151#endif /* VBOX */
2152}
2153
2154#ifndef VBOX
2155static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2156#else
2157DECLINLINE(void) tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2158#endif
2159{
2160 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2161 tlb_entry->addr_write = vaddr;
2162}
2163
2164
2165/* update the TLB corresponding to virtual page vaddr and phys addr
2166 addr so that it is no longer dirty */
2167#ifndef VBOX
2168static inline void tlb_set_dirty(CPUState *env,
2169 unsigned long addr, target_ulong vaddr)
2170#else
2171DECLINLINE(void) tlb_set_dirty(CPUState *env,
2172 unsigned long addr, target_ulong vaddr)
2173#endif
2174{
2175 int i;
2176
2177 addr &= TARGET_PAGE_MASK;
2178 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2179 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2180 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2181#if (NB_MMU_MODES >= 3)
2182 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2183#if (NB_MMU_MODES == 4)
2184 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2185#endif
2186#endif
2187}
2188
2189/* add a new TLB entry. At most one entry for a given virtual address
2190 is permitted. Return 0 if OK or 2 if the page could not be mapped
2191 (can only happen in non SOFTMMU mode for I/O pages or pages
2192 conflicting with the host address space). */
2193int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2194 target_phys_addr_t paddr, int prot,
2195 int mmu_idx, int is_softmmu)
2196{
2197 PhysPageDesc *p;
2198 unsigned long pd;
2199 unsigned int index;
2200 target_ulong address;
2201 target_ulong code_address;
2202 target_phys_addr_t addend;
2203 int ret;
2204 CPUTLBEntry *te;
2205 int i;
2206 target_phys_addr_t iotlb;
2207#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2208 int read_mods = 0, write_mods = 0, code_mods = 0;
2209#endif
2210
2211 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2212 if (!p) {
2213 pd = IO_MEM_UNASSIGNED;
2214 } else {
2215 pd = p->phys_offset;
2216 }
2217#if defined(DEBUG_TLB)
2218 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2219 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2220#endif
2221
2222 ret = 0;
2223 address = vaddr;
2224 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2225 /* IO memory case (romd handled later) */
2226 address |= TLB_MMIO;
2227 }
2228#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2229 addend = pd & TARGET_PAGE_MASK;
2230#elif !defined(VBOX)
2231 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2232#else
2233 /** @todo this is racing the phys_page_find call above since it may register
2234 * a new chunk of memory... */
2235 addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
2236 pd & TARGET_PAGE_MASK,
2237 !!(prot & PAGE_WRITE));
2238#endif
2239
2240 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2241 /* Normal RAM. */
2242 iotlb = pd & TARGET_PAGE_MASK;
2243 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2244 iotlb |= IO_MEM_NOTDIRTY;
2245 else
2246 iotlb |= IO_MEM_ROM;
2247 } else {
2248 /* IO handlers are currently passed a phsical address.
2249 It would be nice to pass an offset from the base address
2250 of that region. This would avoid having to special case RAM,
2251 and avoid full address decoding in every device.
2252 We can't use the high bits of pd for this because
2253 IO_MEM_ROMD uses these as a ram address. */
2254 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2255 }
2256
2257 code_address = address;
2258
2259#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2260 if (addend & 0x3)
2261 {
2262 if (addend & 0x2)
2263 {
2264 /* catch write */
2265 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2266 write_mods |= TLB_MMIO;
2267 }
2268 else if (addend & 0x1)
2269 {
2270 /* catch all */
2271 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2272 {
2273 read_mods |= TLB_MMIO;
2274 write_mods |= TLB_MMIO;
2275 code_mods |= TLB_MMIO;
2276 }
2277 }
2278 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2279 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2280 addend &= ~(target_ulong)0x3;
2281 }
2282#endif
2283
2284 /* Make accesses to pages with watchpoints go via the
2285 watchpoint trap routines. */
2286 for (i = 0; i < env->nb_watchpoints; i++) {
2287 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2288 iotlb = io_mem_watch + paddr;
2289 /* TODO: The memory case can be optimized by not trapping
2290 reads of pages with a write breakpoint. */
2291 address |= TLB_MMIO;
2292 }
2293 }
2294
2295 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2296 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2297 te = &env->tlb_table[mmu_idx][index];
2298 te->addend = addend - vaddr;
2299 if (prot & PAGE_READ) {
2300 te->addr_read = address;
2301 } else {
2302 te->addr_read = -1;
2303 }
2304
2305 if (prot & PAGE_EXEC) {
2306 te->addr_code = code_address;
2307 } else {
2308 te->addr_code = -1;
2309 }
2310 if (prot & PAGE_WRITE) {
2311 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2312 (pd & IO_MEM_ROMD)) {
2313 /* Write access calls the I/O callback. */
2314 te->addr_write = address | TLB_MMIO;
2315 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2316 !cpu_physical_memory_is_dirty(pd)) {
2317 te->addr_write = address | TLB_NOTDIRTY;
2318 } else {
2319 te->addr_write = address;
2320 }
2321 } else {
2322 te->addr_write = -1;
2323 }
2324
2325#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2326 if (prot & PAGE_READ)
2327 te->addr_read |= read_mods;
2328 if (prot & PAGE_EXEC)
2329 te->addr_code |= code_mods;
2330 if (prot & PAGE_WRITE)
2331 te->addr_write |= write_mods;
2332
2333 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2334#endif
2335
2336#ifdef VBOX
2337 /* inform raw mode about TLB page change */
2338 remR3FlushPage(env, vaddr);
2339#endif
2340 return ret;
2341}
2342#if 0
2343/* called from signal handler: invalidate the code and unprotect the
2344 page. Return TRUE if the fault was succesfully handled. */
2345int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2346{
2347#if !defined(CONFIG_SOFTMMU)
2348 VirtPageDesc *vp;
2349
2350#if defined(DEBUG_TLB)
2351 printf("page_unprotect: addr=0x%08x\n", addr);
2352#endif
2353 addr &= TARGET_PAGE_MASK;
2354
2355 /* if it is not mapped, no need to worry here */
2356 if (addr >= MMAP_AREA_END)
2357 return 0;
2358 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2359 if (!vp)
2360 return 0;
2361 /* NOTE: in this case, validate_tag is _not_ tested as it
2362 validates only the code TLB */
2363 if (vp->valid_tag != virt_valid_tag)
2364 return 0;
2365 if (!(vp->prot & PAGE_WRITE))
2366 return 0;
2367#if defined(DEBUG_TLB)
2368 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2369 addr, vp->phys_addr, vp->prot);
2370#endif
2371 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2372 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2373 (unsigned long)addr, vp->prot);
2374 /* set the dirty bit */
2375 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2376 /* flush the code inside */
2377 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2378 return 1;
2379#elif defined(VBOX)
2380 addr &= TARGET_PAGE_MASK;
2381
2382 /* if it is not mapped, no need to worry here */
2383 if (addr >= MMAP_AREA_END)
2384 return 0;
2385 return 1;
2386#else
2387 return 0;
2388#endif
2389}
2390#endif /* 0 */
2391
2392#else
2393
2394void tlb_flush(CPUState *env, int flush_global)
2395{
2396}
2397
2398void tlb_flush_page(CPUState *env, target_ulong addr)
2399{
2400}
2401
2402int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2403 target_phys_addr_t paddr, int prot,
2404 int mmu_idx, int is_softmmu)
2405{
2406 return 0;
2407}
2408
2409#ifndef VBOX
2410/* dump memory mappings */
2411void page_dump(FILE *f)
2412{
2413 unsigned long start, end;
2414 int i, j, prot, prot1;
2415 PageDesc *p;
2416
2417 fprintf(f, "%-8s %-8s %-8s %s\n",
2418 "start", "end", "size", "prot");
2419 start = -1;
2420 end = -1;
2421 prot = 0;
2422 for(i = 0; i <= L1_SIZE; i++) {
2423 if (i < L1_SIZE)
2424 p = l1_map[i];
2425 else
2426 p = NULL;
2427 for(j = 0;j < L2_SIZE; j++) {
2428 if (!p)
2429 prot1 = 0;
2430 else
2431 prot1 = p[j].flags;
2432 if (prot1 != prot) {
2433 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2434 if (start != -1) {
2435 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2436 start, end, end - start,
2437 prot & PAGE_READ ? 'r' : '-',
2438 prot & PAGE_WRITE ? 'w' : '-',
2439 prot & PAGE_EXEC ? 'x' : '-');
2440 }
2441 if (prot1 != 0)
2442 start = end;
2443 else
2444 start = -1;
2445 prot = prot1;
2446 }
2447 if (!p)
2448 break;
2449 }
2450 }
2451}
2452#endif /* !VBOX */
2453
2454int page_get_flags(target_ulong address)
2455{
2456 PageDesc *p;
2457
2458 p = page_find(address >> TARGET_PAGE_BITS);
2459 if (!p)
2460 return 0;
2461 return p->flags;
2462}
2463
2464/* modify the flags of a page and invalidate the code if
2465 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2466 depending on PAGE_WRITE */
2467void page_set_flags(target_ulong start, target_ulong end, int flags)
2468{
2469 PageDesc *p;
2470 target_ulong addr;
2471
2472 start = start & TARGET_PAGE_MASK;
2473 end = TARGET_PAGE_ALIGN(end);
2474 if (flags & PAGE_WRITE)
2475 flags |= PAGE_WRITE_ORG;
2476#ifdef VBOX
2477 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2478#endif
2479 spin_lock(&tb_lock);
2480 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2481 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2482 /* if the write protection is set, then we invalidate the code
2483 inside */
2484 if (!(p->flags & PAGE_WRITE) &&
2485 (flags & PAGE_WRITE) &&
2486 p->first_tb) {
2487 tb_invalidate_phys_page(addr, 0, NULL);
2488 }
2489 p->flags = flags;
2490 }
2491 spin_unlock(&tb_lock);
2492}
2493
2494int page_check_range(target_ulong start, target_ulong len, int flags)
2495{
2496 PageDesc *p;
2497 target_ulong end;
2498 target_ulong addr;
2499
2500 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2501 start = start & TARGET_PAGE_MASK;
2502
2503 if( end < start )
2504 /* we've wrapped around */
2505 return -1;
2506 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2507 p = page_find(addr >> TARGET_PAGE_BITS);
2508 if( !p )
2509 return -1;
2510 if( !(p->flags & PAGE_VALID) )
2511 return -1;
2512
2513 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2514 return -1;
2515 if (flags & PAGE_WRITE) {
2516 if (!(p->flags & PAGE_WRITE_ORG))
2517 return -1;
2518 /* unprotect the page if it was put read-only because it
2519 contains translated code */
2520 if (!(p->flags & PAGE_WRITE)) {
2521 if (!page_unprotect(addr, 0, NULL))
2522 return -1;
2523 }
2524 return 0;
2525 }
2526 }
2527 return 0;
2528}
2529
2530/* called from signal handler: invalidate the code and unprotect the
2531 page. Return TRUE if the fault was succesfully handled. */
2532int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2533{
2534 unsigned int page_index, prot, pindex;
2535 PageDesc *p, *p1;
2536 target_ulong host_start, host_end, addr;
2537
2538 /* Technically this isn't safe inside a signal handler. However we
2539 know this only ever happens in a synchronous SEGV handler, so in
2540 practice it seems to be ok. */
2541 mmap_lock();
2542
2543 host_start = address & qemu_host_page_mask;
2544 page_index = host_start >> TARGET_PAGE_BITS;
2545 p1 = page_find(page_index);
2546 if (!p1) {
2547 mmap_unlock();
2548 return 0;
2549 }
2550 host_end = host_start + qemu_host_page_size;
2551 p = p1;
2552 prot = 0;
2553 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2554 prot |= p->flags;
2555 p++;
2556 }
2557 /* if the page was really writable, then we change its
2558 protection back to writable */
2559 if (prot & PAGE_WRITE_ORG) {
2560 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2561 if (!(p1[pindex].flags & PAGE_WRITE)) {
2562 mprotect((void *)g2h(host_start), qemu_host_page_size,
2563 (prot & PAGE_BITS) | PAGE_WRITE);
2564 p1[pindex].flags |= PAGE_WRITE;
2565 /* and since the content will be modified, we must invalidate
2566 the corresponding translated code. */
2567 tb_invalidate_phys_page(address, pc, puc);
2568#ifdef DEBUG_TB_CHECK
2569 tb_invalidate_check(address);
2570#endif
2571 mmap_unlock();
2572 return 1;
2573 }
2574 }
2575 mmap_unlock();
2576 return 0;
2577}
2578
2579static inline void tlb_set_dirty(CPUState *env,
2580 unsigned long addr, target_ulong vaddr)
2581{
2582}
2583#endif /* defined(CONFIG_USER_ONLY) */
2584
2585#if !defined(CONFIG_USER_ONLY)
2586static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2587 ram_addr_t memory);
2588static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2589 ram_addr_t orig_memory);
2590#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2591 need_subpage) \
2592 do { \
2593 if (addr > start_addr) \
2594 start_addr2 = 0; \
2595 else { \
2596 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2597 if (start_addr2 > 0) \
2598 need_subpage = 1; \
2599 } \
2600 \
2601 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2602 end_addr2 = TARGET_PAGE_SIZE - 1; \
2603 else { \
2604 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2605 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2606 need_subpage = 1; \
2607 } \
2608 } while (0)
2609
2610
2611/* register physical memory. 'size' must be a multiple of the target
2612 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2613 io memory page */
2614void cpu_register_physical_memory(target_phys_addr_t start_addr,
2615 unsigned long size,
2616 unsigned long phys_offset)
2617{
2618 target_phys_addr_t addr, end_addr;
2619 PhysPageDesc *p;
2620 CPUState *env;
2621 ram_addr_t orig_size = size;
2622 void *subpage;
2623
2624#ifdef USE_KQEMU
2625 /* XXX: should not depend on cpu context */
2626 env = first_cpu;
2627 if (env->kqemu_enabled) {
2628 kqemu_set_phys_mem(start_addr, size, phys_offset);
2629 }
2630#endif
2631 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2632 end_addr = start_addr + (target_phys_addr_t)size;
2633 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2634 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2635 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2636 ram_addr_t orig_memory = p->phys_offset;
2637 target_phys_addr_t start_addr2, end_addr2;
2638 int need_subpage = 0;
2639
2640 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2641 need_subpage);
2642 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2643 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2644 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2645 &p->phys_offset, orig_memory);
2646 } else {
2647 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2648 >> IO_MEM_SHIFT];
2649 }
2650 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2651 } else {
2652 p->phys_offset = phys_offset;
2653#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2654 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2655 (phys_offset & IO_MEM_ROMD))
2656#else
2657 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2658 || (phys_offset & IO_MEM_ROMD)
2659 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2660#endif
2661 phys_offset += TARGET_PAGE_SIZE;
2662 }
2663 } else {
2664 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2665 p->phys_offset = phys_offset;
2666#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2667 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2668 (phys_offset & IO_MEM_ROMD))
2669#else
2670 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2671 || (phys_offset & IO_MEM_ROMD)
2672 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2673#endif
2674 phys_offset += TARGET_PAGE_SIZE;
2675 else {
2676 target_phys_addr_t start_addr2, end_addr2;
2677 int need_subpage = 0;
2678
2679 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2680 end_addr2, need_subpage);
2681
2682 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2683 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2684 &p->phys_offset, IO_MEM_UNASSIGNED);
2685 subpage_register(subpage, start_addr2, end_addr2,
2686 phys_offset);
2687 }
2688 }
2689 }
2690 }
2691 /* since each CPU stores ram addresses in its TLB cache, we must
2692 reset the modified entries */
2693 /* XXX: slow ! */
2694 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2695 tlb_flush(env, 1);
2696 }
2697}
2698
2699/* XXX: temporary until new memory mapping API */
2700uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2701{
2702 PhysPageDesc *p;
2703
2704 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2705 if (!p)
2706 return IO_MEM_UNASSIGNED;
2707 return p->phys_offset;
2708}
2709
2710#ifndef VBOX
2711/* XXX: better than nothing */
2712ram_addr_t qemu_ram_alloc(ram_addr_t size)
2713{
2714 ram_addr_t addr;
2715 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2716 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2717 (uint64_t)size, (uint64_t)phys_ram_size);
2718 abort();
2719 }
2720 addr = phys_ram_alloc_offset;
2721 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2722 return addr;
2723}
2724
2725void qemu_ram_free(ram_addr_t addr)
2726{
2727}
2728#endif
2729
2730
2731static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2732{
2733#ifdef DEBUG_UNASSIGNED
2734 printf("Unassigned mem read 0x%08x\n", (int)addr);
2735#endif
2736#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2737 do_unassigned_access(addr, 0, 0, 0, 1);
2738#endif
2739 return 0;
2740}
2741
2742static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2743{
2744#ifdef DEBUG_UNASSIGNED
2745 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2746#endif
2747#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2748 do_unassigned_access(addr, 0, 0, 0, 2);
2749#endif
2750 return 0;
2751}
2752
2753static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2754{
2755#ifdef DEBUG_UNASSIGNED
2756 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2757#endif
2758#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2759 do_unassigned_access(addr, 0, 0, 0, 4);
2760#endif
2761 return 0;
2762}
2763
2764static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2765{
2766#ifdef DEBUG_UNASSIGNED
2767 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2768#endif
2769}
2770
2771static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2772{
2773#ifdef DEBUG_UNASSIGNED
2774 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2775#endif
2776#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2777 do_unassigned_access(addr, 1, 0, 0, 2);
2778#endif
2779}
2780
2781static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2782{
2783#ifdef DEBUG_UNASSIGNED
2784 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2785#endif
2786#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2787 do_unassigned_access(addr, 1, 0, 0, 4);
2788#endif
2789}
2790static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2791 unassigned_mem_readb,
2792 unassigned_mem_readw,
2793 unassigned_mem_readl,
2794};
2795
2796static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2797 unassigned_mem_writeb,
2798 unassigned_mem_writew,
2799 unassigned_mem_writel,
2800};
2801
2802static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2803{
2804 unsigned long ram_addr;
2805 int dirty_flags;
2806#if defined(VBOX)
2807 ram_addr = addr;
2808#elif
2809 ram_addr = addr - (unsigned long)phys_ram_base;
2810#endif
2811#ifdef VBOX
2812 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2813 dirty_flags = 0xff;
2814 else
2815#endif /* VBOX */
2816 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2817 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2818#if !defined(CONFIG_USER_ONLY)
2819 tb_invalidate_phys_page_fast(ram_addr, 1);
2820# ifdef VBOX
2821 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2822 dirty_flags = 0xff;
2823 else
2824# endif /* VBOX */
2825 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2826#endif
2827 }
2828#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2829 remR3PhysWriteU8(addr, val);
2830#else
2831 stb_p((uint8_t *)(long)addr, val);
2832#endif
2833#ifdef USE_KQEMU
2834 if (cpu_single_env->kqemu_enabled &&
2835 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2836 kqemu_modify_page(cpu_single_env, ram_addr);
2837#endif
2838 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2839#ifdef VBOX
2840 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2841#endif /* !VBOX */
2842 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2843 /* we remove the notdirty callback only if the code has been
2844 flushed */
2845 if (dirty_flags == 0xff)
2846 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2847}
2848
2849static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2850{
2851 unsigned long ram_addr;
2852 int dirty_flags;
2853#if defined(VBOX)
2854 ram_addr = addr;
2855#else
2856 ram_addr = addr - (unsigned long)phys_ram_base;
2857#endif
2858#ifdef VBOX
2859 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2860 dirty_flags = 0xff;
2861 else
2862#endif /* VBOX */
2863 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2864 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2865#if !defined(CONFIG_USER_ONLY)
2866 tb_invalidate_phys_page_fast(ram_addr, 2);
2867# ifdef VBOX
2868 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2869 dirty_flags = 0xff;
2870 else
2871# endif /* VBOX */
2872 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2873#endif
2874 }
2875#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2876 remR3PhysWriteU16(addr, val);
2877#else
2878 stw_p((uint8_t *)(long)addr, val);
2879#endif
2880
2881#ifdef USE_KQEMU
2882 if (cpu_single_env->kqemu_enabled &&
2883 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2884 kqemu_modify_page(cpu_single_env, ram_addr);
2885#endif
2886 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2887#ifdef VBOX
2888 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2889#endif
2890 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2891 /* we remove the notdirty callback only if the code has been
2892 flushed */
2893 if (dirty_flags == 0xff)
2894 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2895}
2896
2897static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2898{
2899 unsigned long ram_addr;
2900 int dirty_flags;
2901#if defined(VBOX)
2902 ram_addr = addr;
2903#else
2904 ram_addr = addr - (unsigned long)phys_ram_base;
2905#endif
2906#ifdef VBOX
2907 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2908 dirty_flags = 0xff;
2909 else
2910#endif /* VBOX */
2911 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2912 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2913#if !defined(CONFIG_USER_ONLY)
2914 tb_invalidate_phys_page_fast(ram_addr, 4);
2915# ifdef VBOX
2916 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2917 dirty_flags = 0xff;
2918 else
2919# endif /* VBOX */
2920 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2921#endif
2922 }
2923#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2924 remR3PhysWriteU32(addr, val);
2925#else
2926 stl_p((uint8_t *)(long)addr, val);
2927#endif
2928#ifdef USE_KQEMU
2929 if (cpu_single_env->kqemu_enabled &&
2930 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2931 kqemu_modify_page(cpu_single_env, ram_addr);
2932#endif
2933 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2934#ifdef VBOX
2935 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2936#endif
2937 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2938 /* we remove the notdirty callback only if the code has been
2939 flushed */
2940 if (dirty_flags == 0xff)
2941 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2942}
2943
2944static CPUReadMemoryFunc *error_mem_read[3] = {
2945 NULL, /* never used */
2946 NULL, /* never used */
2947 NULL, /* never used */
2948};
2949
2950static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2951 notdirty_mem_writeb,
2952 notdirty_mem_writew,
2953 notdirty_mem_writel,
2954};
2955
2956
2957/* Generate a debug exception if a watchpoint has been hit. */
2958static void check_watchpoint(int offset, int flags)
2959{
2960 CPUState *env = cpu_single_env;
2961 target_ulong vaddr;
2962 int i;
2963
2964 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2965 for (i = 0; i < env->nb_watchpoints; i++) {
2966 if (vaddr == env->watchpoint[i].vaddr
2967 && (env->watchpoint[i].type & flags)) {
2968 env->watchpoint_hit = i + 1;
2969 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2970 break;
2971 }
2972 }
2973}
2974
2975/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2976 so these check for a hit then pass through to the normal out-of-line
2977 phys routines. */
2978static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2979{
2980 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2981 return ldub_phys(addr);
2982}
2983
2984static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2985{
2986 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2987 return lduw_phys(addr);
2988}
2989
2990static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2991{
2992 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2993 return ldl_phys(addr);
2994}
2995
2996static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2997 uint32_t val)
2998{
2999 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3000 stb_phys(addr, val);
3001}
3002
3003static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3004 uint32_t val)
3005{
3006 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3007 stw_phys(addr, val);
3008}
3009
3010static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3011 uint32_t val)
3012{
3013 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3014 stl_phys(addr, val);
3015}
3016
3017static CPUReadMemoryFunc *watch_mem_read[3] = {
3018 watch_mem_readb,
3019 watch_mem_readw,
3020 watch_mem_readl,
3021};
3022
3023static CPUWriteMemoryFunc *watch_mem_write[3] = {
3024 watch_mem_writeb,
3025 watch_mem_writew,
3026 watch_mem_writel,
3027};
3028
3029static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3030 unsigned int len)
3031{
3032 uint32_t ret;
3033 unsigned int idx;
3034
3035 idx = SUBPAGE_IDX(addr - mmio->base);
3036#if defined(DEBUG_SUBPAGE)
3037 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3038 mmio, len, addr, idx);
3039#endif
3040 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
3041
3042 return ret;
3043}
3044
3045static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3046 uint32_t value, unsigned int len)
3047{
3048 unsigned int idx;
3049
3050 idx = SUBPAGE_IDX(addr - mmio->base);
3051#if defined(DEBUG_SUBPAGE)
3052 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3053 mmio, len, addr, idx, value);
3054#endif
3055 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
3056}
3057
3058static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3059{
3060#if defined(DEBUG_SUBPAGE)
3061 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3062#endif
3063
3064 return subpage_readlen(opaque, addr, 0);
3065}
3066
3067static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3068 uint32_t value)
3069{
3070#if defined(DEBUG_SUBPAGE)
3071 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3072#endif
3073 subpage_writelen(opaque, addr, value, 0);
3074}
3075
3076static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3077{
3078#if defined(DEBUG_SUBPAGE)
3079 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3080#endif
3081
3082 return subpage_readlen(opaque, addr, 1);
3083}
3084
3085static void subpage_writew (void *opaque, target_phys_addr_t addr,
3086 uint32_t value)
3087{
3088#if defined(DEBUG_SUBPAGE)
3089 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3090#endif
3091 subpage_writelen(opaque, addr, value, 1);
3092}
3093
3094static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3095{
3096#if defined(DEBUG_SUBPAGE)
3097 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3098#endif
3099
3100 return subpage_readlen(opaque, addr, 2);
3101}
3102
3103static void subpage_writel (void *opaque,
3104 target_phys_addr_t addr, uint32_t value)
3105{
3106#if defined(DEBUG_SUBPAGE)
3107 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3108#endif
3109 subpage_writelen(opaque, addr, value, 2);
3110}
3111
3112static CPUReadMemoryFunc *subpage_read[] = {
3113 &subpage_readb,
3114 &subpage_readw,
3115 &subpage_readl,
3116};
3117
3118static CPUWriteMemoryFunc *subpage_write[] = {
3119 &subpage_writeb,
3120 &subpage_writew,
3121 &subpage_writel,
3122};
3123
3124static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3125 ram_addr_t memory)
3126{
3127 int idx, eidx;
3128 unsigned int i;
3129
3130 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3131 return -1;
3132 idx = SUBPAGE_IDX(start);
3133 eidx = SUBPAGE_IDX(end);
3134#if defined(DEBUG_SUBPAGE)
3135 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3136 mmio, start, end, idx, eidx, memory);
3137#endif
3138 memory >>= IO_MEM_SHIFT;
3139 for (; idx <= eidx; idx++) {
3140 for (i = 0; i < 4; i++) {
3141 if (io_mem_read[memory][i]) {
3142 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3143 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3144 }
3145 if (io_mem_write[memory][i]) {
3146 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3147 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3148 }
3149 }
3150 }
3151
3152 return 0;
3153}
3154
3155static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3156 ram_addr_t orig_memory)
3157{
3158 subpage_t *mmio;
3159 int subpage_memory;
3160
3161 mmio = qemu_mallocz(sizeof(subpage_t));
3162 if (mmio != NULL) {
3163 mmio->base = base;
3164 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3165#if defined(DEBUG_SUBPAGE)
3166 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3167 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3168#endif
3169 *phys = subpage_memory | IO_MEM_SUBPAGE;
3170 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
3171 }
3172
3173 return mmio;
3174}
3175
3176static void io_mem_init(void)
3177{
3178 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3179 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3180 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3181#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
3182 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3183 io_mem_nb = 6;
3184#else
3185 io_mem_nb = 5;
3186#endif
3187
3188 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3189 watch_mem_write, NULL);
3190
3191#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
3192 /* alloc dirty bits array */
3193 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3194 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
3195#endif /* !VBOX */
3196}
3197
3198/* mem_read and mem_write are arrays of functions containing the
3199 function to access byte (index 0), word (index 1) and dword (index
3200 2). Functions can be omitted with a NULL function pointer. The
3201 registered functions may be modified dynamically later.
3202 If io_index is non zero, the corresponding io zone is
3203 modified. If it is zero, a new io zone is allocated. The return
3204 value can be used with cpu_register_physical_memory(). (-1) is
3205 returned if error. */
3206int cpu_register_io_memory(int io_index,
3207 CPUReadMemoryFunc **mem_read,
3208 CPUWriteMemoryFunc **mem_write,
3209 void *opaque)
3210{
3211 int i, subwidth = 0;
3212
3213 if (io_index <= 0) {
3214 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
3215 return -1;
3216 io_index = io_mem_nb++;
3217 } else {
3218 if (io_index >= IO_MEM_NB_ENTRIES)
3219 return -1;
3220 }
3221
3222 for(i = 0;i < 3; i++) {
3223 if (!mem_read[i] || !mem_write[i])
3224 subwidth = IO_MEM_SUBWIDTH;
3225 io_mem_read[io_index][i] = mem_read[i];
3226 io_mem_write[io_index][i] = mem_write[i];
3227 }
3228 io_mem_opaque[io_index] = opaque;
3229 return (io_index << IO_MEM_SHIFT) | subwidth;
3230}
3231
3232CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3233{
3234 return io_mem_write[io_index >> IO_MEM_SHIFT];
3235}
3236
3237CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3238{
3239 return io_mem_read[io_index >> IO_MEM_SHIFT];
3240}
3241#endif /* !defined(CONFIG_USER_ONLY) */
3242
3243/* physical memory access (slow version, mainly for debug) */
3244#if defined(CONFIG_USER_ONLY)
3245void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3246 int len, int is_write)
3247{
3248 int l, flags;
3249 target_ulong page;
3250 void * p;
3251
3252 while (len > 0) {
3253 page = addr & TARGET_PAGE_MASK;
3254 l = (page + TARGET_PAGE_SIZE) - addr;
3255 if (l > len)
3256 l = len;
3257 flags = page_get_flags(page);
3258 if (!(flags & PAGE_VALID))
3259 return;
3260 if (is_write) {
3261 if (!(flags & PAGE_WRITE))
3262 return;
3263 /* XXX: this code should not depend on lock_user */
3264 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3265 /* FIXME - should this return an error rather than just fail? */
3266 return;
3267 memcpy(p, buf, len);
3268 unlock_user(p, addr, len);
3269 } else {
3270 if (!(flags & PAGE_READ))
3271 return;
3272 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3273 /* FIXME - should this return an error rather than just fail? */
3274 return;
3275 memcpy(buf, p, len);
3276 unlock_user(p, addr, 0);
3277 }
3278 len -= l;
3279 buf += l;
3280 addr += l;
3281 }
3282}
3283
3284#else
3285void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3286 int len, int is_write)
3287{
3288 int l, io_index;
3289 uint8_t *ptr;
3290 uint32_t val;
3291 target_phys_addr_t page;
3292 unsigned long pd;
3293 PhysPageDesc *p;
3294
3295 while (len > 0) {
3296 page = addr & TARGET_PAGE_MASK;
3297 l = (page + TARGET_PAGE_SIZE) - addr;
3298 if (l > len)
3299 l = len;
3300 p = phys_page_find(page >> TARGET_PAGE_BITS);
3301 if (!p) {
3302 pd = IO_MEM_UNASSIGNED;
3303 } else {
3304 pd = p->phys_offset;
3305 }
3306
3307 if (is_write) {
3308 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3309 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3310 /* XXX: could force cpu_single_env to NULL to avoid
3311 potential bugs */
3312 if (l >= 4 && ((addr & 3) == 0)) {
3313 /* 32 bit write access */
3314#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3315 val = ldl_p(buf);
3316#else
3317 val = *(const uint32_t *)buf;
3318#endif
3319 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3320 l = 4;
3321 } else if (l >= 2 && ((addr & 1) == 0)) {
3322 /* 16 bit write access */
3323#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3324 val = lduw_p(buf);
3325#else
3326 val = *(const uint16_t *)buf;
3327#endif
3328 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3329 l = 2;
3330 } else {
3331 /* 8 bit write access */
3332#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3333 val = ldub_p(buf);
3334#else
3335 val = *(const uint8_t *)buf;
3336#endif
3337 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
3338 l = 1;
3339 }
3340 } else {
3341 unsigned long addr1;
3342 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3343 /* RAM case */
3344#ifdef VBOX
3345 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3346#else
3347 ptr = phys_ram_base + addr1;
3348 memcpy(ptr, buf, l);
3349#endif
3350 if (!cpu_physical_memory_is_dirty(addr1)) {
3351 /* invalidate code */
3352 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3353 /* set dirty bit */
3354#ifdef VBOX
3355 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3356#endif
3357 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3358 (0xff & ~CODE_DIRTY_FLAG);
3359 }
3360 }
3361 } else {
3362 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3363 !(pd & IO_MEM_ROMD)) {
3364 /* I/O case */
3365 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3366 if (l >= 4 && ((addr & 3) == 0)) {
3367 /* 32 bit read access */
3368 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3369#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3370 stl_p(buf, val);
3371#else
3372 *(uint32_t *)buf = val;
3373#endif
3374 l = 4;
3375 } else if (l >= 2 && ((addr & 1) == 0)) {
3376 /* 16 bit read access */
3377 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3378#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3379 stw_p(buf, val);
3380#else
3381 *(uint16_t *)buf = val;
3382#endif
3383 l = 2;
3384 } else {
3385 /* 8 bit read access */
3386 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3387#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3388 stb_p(buf, val);
3389#else
3390 *(uint8_t *)buf = val;
3391#endif
3392 l = 1;
3393 }
3394 } else {
3395 /* RAM case */
3396#ifdef VBOX
3397 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3398#else
3399 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3400 (addr & ~TARGET_PAGE_MASK);
3401 memcpy(buf, ptr, l);
3402#endif
3403 }
3404 }
3405 len -= l;
3406 buf += l;
3407 addr += l;
3408 }
3409}
3410
3411#ifndef VBOX
3412/* used for ROM loading : can write in RAM and ROM */
3413void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3414 const uint8_t *buf, int len)
3415{
3416 int l;
3417 uint8_t *ptr;
3418 target_phys_addr_t page;
3419 unsigned long pd;
3420 PhysPageDesc *p;
3421
3422 while (len > 0) {
3423 page = addr & TARGET_PAGE_MASK;
3424 l = (page + TARGET_PAGE_SIZE) - addr;
3425 if (l > len)
3426 l = len;
3427 p = phys_page_find(page >> TARGET_PAGE_BITS);
3428 if (!p) {
3429 pd = IO_MEM_UNASSIGNED;
3430 } else {
3431 pd = p->phys_offset;
3432 }
3433
3434 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3435 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3436 !(pd & IO_MEM_ROMD)) {
3437 /* do nothing */
3438 } else {
3439 unsigned long addr1;
3440 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3441 /* ROM/RAM case */
3442 ptr = phys_ram_base + addr1;
3443 memcpy(ptr, buf, l);
3444 }
3445 len -= l;
3446 buf += l;
3447 addr += l;
3448 }
3449}
3450#endif /* !VBOX */
3451
3452
3453/* warning: addr must be aligned */
3454uint32_t ldl_phys(target_phys_addr_t addr)
3455{
3456 int io_index;
3457 uint8_t *ptr;
3458 uint32_t val;
3459 unsigned long pd;
3460 PhysPageDesc *p;
3461
3462 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3463 if (!p) {
3464 pd = IO_MEM_UNASSIGNED;
3465 } else {
3466 pd = p->phys_offset;
3467 }
3468
3469 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3470 !(pd & IO_MEM_ROMD)) {
3471 /* I/O case */
3472 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3473 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3474 } else {
3475 /* RAM case */
3476#ifndef VBOX
3477 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3478 (addr & ~TARGET_PAGE_MASK);
3479 val = ldl_p(ptr);
3480#else
3481 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3482#endif
3483 }
3484 return val;
3485}
3486
3487/* warning: addr must be aligned */
3488uint64_t ldq_phys(target_phys_addr_t addr)
3489{
3490 int io_index;
3491 uint8_t *ptr;
3492 uint64_t val;
3493 unsigned long pd;
3494 PhysPageDesc *p;
3495
3496 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3497 if (!p) {
3498 pd = IO_MEM_UNASSIGNED;
3499 } else {
3500 pd = p->phys_offset;
3501 }
3502
3503 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3504 !(pd & IO_MEM_ROMD)) {
3505 /* I/O case */
3506 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3507#ifdef TARGET_WORDS_BIGENDIAN
3508 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3509 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3510#else
3511 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3512 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3513#endif
3514 } else {
3515 /* RAM case */
3516#ifndef VBOX
3517 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3518 (addr & ~TARGET_PAGE_MASK);
3519 val = ldq_p(ptr);
3520#else
3521 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3522#endif
3523 }
3524 return val;
3525}
3526
3527/* XXX: optimize */
3528uint32_t ldub_phys(target_phys_addr_t addr)
3529{
3530 uint8_t val;
3531 cpu_physical_memory_read(addr, &val, 1);
3532 return val;
3533}
3534
3535/* XXX: optimize */
3536uint32_t lduw_phys(target_phys_addr_t addr)
3537{
3538 uint16_t val;
3539 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3540 return tswap16(val);
3541}
3542
3543/* warning: addr must be aligned. The ram page is not masked as dirty
3544 and the code inside is not invalidated. It is useful if the dirty
3545 bits are used to track modified PTEs */
3546void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3547{
3548 int io_index;
3549 uint8_t *ptr;
3550 unsigned long pd;
3551 PhysPageDesc *p;
3552
3553 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3554 if (!p) {
3555 pd = IO_MEM_UNASSIGNED;
3556 } else {
3557 pd = p->phys_offset;
3558 }
3559
3560 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3561 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3562 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3563 } else {
3564#ifndef VBOX
3565 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3566 (addr & ~TARGET_PAGE_MASK);
3567 stl_p(ptr, val);
3568#else
3569 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3570#endif
3571#ifndef VBOX
3572 if (unlikely(in_migration)) {
3573 if (!cpu_physical_memory_is_dirty(addr1)) {
3574 /* invalidate code */
3575 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3576 /* set dirty bit */
3577 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3578 (0xff & ~CODE_DIRTY_FLAG);
3579 }
3580 }
3581#endif
3582 }
3583}
3584
3585void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3586{
3587 int io_index;
3588 uint8_t *ptr;
3589 unsigned long pd;
3590 PhysPageDesc *p;
3591
3592 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3593 if (!p) {
3594 pd = IO_MEM_UNASSIGNED;
3595 } else {
3596 pd = p->phys_offset;
3597 }
3598
3599 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3600 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3601#ifdef TARGET_WORDS_BIGENDIAN
3602 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3603 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3604#else
3605 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3606 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3607#endif
3608 } else {
3609#ifndef VBOX
3610 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3611 (addr & ~TARGET_PAGE_MASK);
3612 stq_p(ptr, val);
3613#else
3614 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3615#endif
3616 }
3617}
3618
3619
3620/* warning: addr must be aligned */
3621void stl_phys(target_phys_addr_t addr, uint32_t val)
3622{
3623 int io_index;
3624 uint8_t *ptr;
3625 unsigned long pd;
3626 PhysPageDesc *p;
3627
3628 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3629 if (!p) {
3630 pd = IO_MEM_UNASSIGNED;
3631 } else {
3632 pd = p->phys_offset;
3633 }
3634
3635 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3636 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3637 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3638 } else {
3639 unsigned long addr1;
3640 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3641 /* RAM case */
3642#ifndef VBOX
3643 ptr = phys_ram_base + addr1;
3644 stl_p(ptr, val);
3645#else
3646 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3647#endif
3648 if (!cpu_physical_memory_is_dirty(addr1)) {
3649 /* invalidate code */
3650 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3651 /* set dirty bit */
3652#ifdef VBOX
3653 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3654#endif
3655 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3656 (0xff & ~CODE_DIRTY_FLAG);
3657 }
3658 }
3659}
3660
3661/* XXX: optimize */
3662void stb_phys(target_phys_addr_t addr, uint32_t val)
3663{
3664 uint8_t v = val;
3665 cpu_physical_memory_write(addr, &v, 1);
3666}
3667
3668/* XXX: optimize */
3669void stw_phys(target_phys_addr_t addr, uint32_t val)
3670{
3671 uint16_t v = tswap16(val);
3672 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3673}
3674
3675/* XXX: optimize */
3676void stq_phys(target_phys_addr_t addr, uint64_t val)
3677{
3678 val = tswap64(val);
3679 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3680}
3681
3682#endif
3683
3684/* virtual memory access for debug */
3685int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3686 uint8_t *buf, int len, int is_write)
3687{
3688 int l;
3689 target_ulong page, phys_addr;
3690
3691 while (len > 0) {
3692 page = addr & TARGET_PAGE_MASK;
3693 phys_addr = cpu_get_phys_page_debug(env, page);
3694 /* if no physical page mapped, return an error */
3695 if (phys_addr == -1)
3696 return -1;
3697 l = (page + TARGET_PAGE_SIZE) - addr;
3698 if (l > len)
3699 l = len;
3700 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3701 buf, l, is_write);
3702 len -= l;
3703 buf += l;
3704 addr += l;
3705 }
3706 return 0;
3707}
3708
3709/* in deterministic execution mode, instructions doing device I/Os
3710 must be at the end of the TB */
3711void cpu_io_recompile(CPUState *env, void *retaddr)
3712{
3713 TranslationBlock *tb;
3714 uint32_t n, cflags;
3715 target_ulong pc, cs_base;
3716 uint64_t flags;
3717
3718 tb = tb_find_pc((unsigned long)retaddr);
3719 if (!tb) {
3720 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3721 retaddr);
3722 }
3723 n = env->icount_decr.u16.low + tb->icount;
3724 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3725 /* Calculate how many instructions had been executed before the fault
3726 occurred. */
3727 n = n - env->icount_decr.u16.low;
3728 /* Generate a new TB ending on the I/O insn. */
3729 n++;
3730 /* On MIPS and SH, delay slot instructions can only be restarted if
3731 they were already the first instruction in the TB. If this is not
3732 the first instruction in a TB then re-execute the preceding
3733 branch. */
3734#if defined(TARGET_MIPS)
3735 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3736 env->active_tc.PC -= 4;
3737 env->icount_decr.u16.low++;
3738 env->hflags &= ~MIPS_HFLAG_BMASK;
3739 }
3740#elif defined(TARGET_SH4)
3741 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3742 && n > 1) {
3743 env->pc -= 2;
3744 env->icount_decr.u16.low++;
3745 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3746 }
3747#endif
3748 /* This should never happen. */
3749 if (n > CF_COUNT_MASK)
3750 cpu_abort(env, "TB too big during recompile");
3751
3752 cflags = n | CF_LAST_IO;
3753 pc = tb->pc;
3754 cs_base = tb->cs_base;
3755 flags = tb->flags;
3756 tb_phys_invalidate(tb, -1);
3757 /* FIXME: In theory this could raise an exception. In practice
3758 we have already translated the block once so it's probably ok. */
3759 tb_gen_code(env, pc, cs_base, flags, cflags);
3760 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3761 the first in the TB) then we end up generating a whole new TB and
3762 repeating the fault, which is horribly inefficient.
3763 Better would be to execute just this insn uncached, or generate a
3764 second new TB. */
3765 cpu_resume_from_signal(env, NULL);
3766}
3767
3768#ifndef VBOX
3769void dump_exec_info(FILE *f,
3770 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3771{
3772 int i, target_code_size, max_target_code_size;
3773 int direct_jmp_count, direct_jmp2_count, cross_page;
3774 TranslationBlock *tb;
3775
3776 target_code_size = 0;
3777 max_target_code_size = 0;
3778 cross_page = 0;
3779 direct_jmp_count = 0;
3780 direct_jmp2_count = 0;
3781 for(i = 0; i < nb_tbs; i++) {
3782 tb = &tbs[i];
3783 target_code_size += tb->size;
3784 if (tb->size > max_target_code_size)
3785 max_target_code_size = tb->size;
3786 if (tb->page_addr[1] != -1)
3787 cross_page++;
3788 if (tb->tb_next_offset[0] != 0xffff) {
3789 direct_jmp_count++;
3790 if (tb->tb_next_offset[1] != 0xffff) {
3791 direct_jmp2_count++;
3792 }
3793 }
3794 }
3795 /* XXX: avoid using doubles ? */
3796 cpu_fprintf(f, "Translation buffer state:\n");
3797 cpu_fprintf(f, "gen code size %ld/%ld\n",
3798 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3799 cpu_fprintf(f, "TB count %d/%d\n",
3800 nb_tbs, code_gen_max_blocks);
3801 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3802 nb_tbs ? target_code_size / nb_tbs : 0,
3803 max_target_code_size);
3804 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3805 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3806 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3807 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3808 cross_page,
3809 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3810 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3811 direct_jmp_count,
3812 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3813 direct_jmp2_count,
3814 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3815 cpu_fprintf(f, "\nStatistics:\n");
3816 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3817 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3818 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3819 tcg_dump_info(f, cpu_fprintf);
3820}
3821#endif /* !VBOX */
3822
3823#if !defined(CONFIG_USER_ONLY)
3824
3825#define MMUSUFFIX _cmmu
3826#define GETPC() NULL
3827#define env cpu_single_env
3828#define SOFTMMU_CODE_ACCESS
3829
3830#define SHIFT 0
3831#include "softmmu_template.h"
3832
3833#define SHIFT 1
3834#include "softmmu_template.h"
3835
3836#define SHIFT 2
3837#include "softmmu_template.h"
3838
3839#define SHIFT 3
3840#include "softmmu_template.h"
3841
3842#undef env
3843
3844#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette