VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 78377

Last change on this file since 78377 was 76474, checked in by vboxsync, 6 years ago

scm --fix-err-h src/ (bugref:9344)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 133.0 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51# include <iprt/errcore.h>
52#endif /* VBOX */
53
54#include "cpu.h"
55#include "exec-all.h"
56#include "qemu-common.h"
57#include "tcg.h"
58#ifndef VBOX
59#include "hw/hw.h"
60#include "hw/qdev.h"
61#endif /* !VBOX */
62#include "osdep.h"
63#include "kvm.h"
64#include "qemu-timer.h"
65#if defined(CONFIG_USER_ONLY)
66#include <qemu.h>
67#include <signal.h>
68#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
69#include <sys/param.h>
70#if __FreeBSD_version >= 700104
71#define HAVE_KINFO_GETVMMAP
72#define sigqueue sigqueue_freebsd /* avoid redefinition */
73#include <sys/time.h>
74#include <sys/proc.h>
75#include <machine/profile.h>
76#define _KERNEL
77#include <sys/user.h>
78#undef _KERNEL
79#undef sigqueue
80#include <libutil.h>
81#endif
82#endif
83#endif
84
85//#define DEBUG_TB_INVALIDATE
86//#define DEBUG_FLUSH
87//#define DEBUG_TLB
88//#define DEBUG_UNASSIGNED
89
90/* make various TB consistency checks */
91//#define DEBUG_TB_CHECK
92//#define DEBUG_TLB_CHECK
93
94//#define DEBUG_IOPORT
95//#define DEBUG_SUBPAGE
96
97#if !defined(CONFIG_USER_ONLY)
98/* TB consistency checks only implemented for usermode emulation. */
99#undef DEBUG_TB_CHECK
100#endif
101
102#define SMC_BITMAP_USE_THRESHOLD 10
103
104static TranslationBlock *tbs;
105static int code_gen_max_blocks;
106TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
107static int nb_tbs;
108/* any access to the tbs or the page table must use this lock */
109spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
110
111#ifndef VBOX
112#if defined(__arm__) || defined(__sparc_v9__)
113/* The prologue must be reachable with a direct jump. ARM and Sparc64
114 have limited branch ranges (possibly also PPC) so place it in a
115 section close to code segment. */
116#define code_gen_section \
117 __attribute__((__section__(".gen_code"))) \
118 __attribute__((aligned (32)))
119#elif defined(_WIN32)
120/* Maximum alignment for Win32 is 16. */
121#define code_gen_section \
122 __attribute__((aligned (16)))
123#else
124#define code_gen_section \
125 __attribute__((aligned (32)))
126#endif
127
128uint8_t code_gen_prologue[1024] code_gen_section;
129#else /* VBOX */
130extern uint8_t *code_gen_prologue;
131#endif /* VBOX */
132static uint8_t *code_gen_buffer;
133static size_t code_gen_buffer_size;
134/* threshold to flush the translated code buffer */
135static size_t code_gen_buffer_max_size;
136static uint8_t *code_gen_ptr;
137
138#if !defined(CONFIG_USER_ONLY)
139# ifndef VBOX
140int phys_ram_fd;
141static int in_migration;
142# endif /* !VBOX */
143
144RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
145#endif
146
147CPUState *first_cpu;
148/* current CPU in the current thread. It is only valid inside
149 cpu_exec() */
150CPUState *cpu_single_env;
151/* 0 = Do not count executed instructions.
152 1 = Precise instruction counting.
153 2 = Adaptive rate instruction counting. */
154int use_icount = 0;
155/* Current instruction counter. While executing translated code this may
156 include some instructions that have not yet been executed. */
157int64_t qemu_icount;
158
159typedef struct PageDesc {
160 /* list of TBs intersecting this ram page */
161 TranslationBlock *first_tb;
162 /* in order to optimize self modifying code, we count the number
163 of lookups we do to a given page to use a bitmap */
164 unsigned int code_write_count;
165 uint8_t *code_bitmap;
166#if defined(CONFIG_USER_ONLY)
167 unsigned long flags;
168#endif
169} PageDesc;
170
171/* In system mode we want L1_MAP to be based on ram offsets,
172 while in user mode we want it to be based on virtual addresses. */
173#if !defined(CONFIG_USER_ONLY)
174#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
175# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
176#else
177# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
178#endif
179#else
180# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
181#endif
182
183/* Size of the L2 (and L3, etc) page tables. */
184#define L2_BITS 10
185#define L2_SIZE (1 << L2_BITS)
186
187/* The bits remaining after N lower levels of page tables. */
188#define P_L1_BITS_REM \
189 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
190#define V_L1_BITS_REM \
191 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
192
193/* Size of the L1 page table. Avoid silly small sizes. */
194#if P_L1_BITS_REM < 4
195#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
196#else
197#define P_L1_BITS P_L1_BITS_REM
198#endif
199
200#if V_L1_BITS_REM < 4
201#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
202#else
203#define V_L1_BITS V_L1_BITS_REM
204#endif
205
206#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
207#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
208
209#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
210#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
211
212size_t qemu_real_host_page_size;
213size_t qemu_host_page_bits;
214size_t qemu_host_page_size;
215uintptr_t qemu_host_page_mask;
216
217/* This is a multi-level map on the virtual address space.
218 The bottom level has pointers to PageDesc. */
219static void *l1_map[V_L1_SIZE];
220
221#if !defined(CONFIG_USER_ONLY)
222typedef struct PhysPageDesc {
223 /* offset in host memory of the page + io_index in the low bits */
224 ram_addr_t phys_offset;
225 ram_addr_t region_offset;
226} PhysPageDesc;
227
228/* This is a multi-level map on the physical address space.
229 The bottom level has pointers to PhysPageDesc. */
230static void *l1_phys_map[P_L1_SIZE];
231
232static void io_mem_init(void);
233
234/* io memory support */
235CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
236CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
237void *io_mem_opaque[IO_MEM_NB_ENTRIES];
238static char io_mem_used[IO_MEM_NB_ENTRIES];
239static int io_mem_watch;
240#endif
241
242#ifndef VBOX
243/* log support */
244#ifdef WIN32
245static const char *logfilename = "qemu.log";
246#else
247static const char *logfilename = "/tmp/qemu.log";
248#endif
249#endif /* !VBOX */
250FILE *logfile;
251int loglevel;
252#ifndef VBOX
253static int log_append = 0;
254#endif /* !VBOX */
255
256/* statistics */
257#ifndef VBOX
258#if !defined(CONFIG_USER_ONLY)
259static int tlb_flush_count;
260#endif
261static int tb_flush_count;
262static int tb_phys_invalidate_count;
263#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
264uint32_t tlb_flush_count;
265uint32_t tb_flush_count;
266uint32_t tb_phys_invalidate_count;
267#endif /* VBOX */
268
269#ifndef VBOX
270#ifdef _WIN32
271static void map_exec(void *addr, size_t size)
272{
273 DWORD old_protect;
274 VirtualProtect(addr, size,
275 PAGE_EXECUTE_READWRITE, &old_protect);
276
277}
278#else
279static void map_exec(void *addr, size_t size)
280{
281 uintptr_t start, end, page_size;
282
283 page_size = getpagesize();
284 start = (uintptr_t)addr;
285 start &= ~(page_size - 1);
286
287 end = (uintptr_t)addr + size;
288 end += page_size - 1;
289 end &= ~(page_size - 1);
290
291 mprotect((void *)start, end - start,
292 PROT_READ | PROT_WRITE | PROT_EXEC);
293}
294#endif
295#else /* VBOX */
296static void map_exec(void *addr, size_t size)
297{
298 RTMemProtect(addr, size,
299 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
300}
301#endif /* VBOX */
302
303static void page_init(void)
304{
305 /* NOTE: we can always suppose that qemu_host_page_size >=
306 TARGET_PAGE_SIZE */
307#ifdef VBOX
308 RTMemProtect(code_gen_buffer, code_gen_buffer_size,
309 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
310 qemu_real_host_page_size = PAGE_SIZE;
311#else /* !VBOX */
312#ifdef _WIN32
313 {
314 SYSTEM_INFO system_info;
315
316 GetSystemInfo(&system_info);
317 qemu_real_host_page_size = system_info.dwPageSize;
318 }
319#else
320 qemu_real_host_page_size = getpagesize();
321#endif
322#endif /* !VBOX */
323 if (qemu_host_page_size == 0)
324 qemu_host_page_size = qemu_real_host_page_size;
325 if (qemu_host_page_size < TARGET_PAGE_SIZE)
326 qemu_host_page_size = TARGET_PAGE_SIZE;
327 qemu_host_page_bits = 0;
328 while ((1 << qemu_host_page_bits) < VBOX_ONLY((int))qemu_host_page_size)
329 qemu_host_page_bits++;
330 qemu_host_page_mask = ~(qemu_host_page_size - 1);
331
332#ifndef VBOX /* We use other means to set reserved bit on our pages */
333#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
334 {
335#ifdef HAVE_KINFO_GETVMMAP
336 struct kinfo_vmentry *freep;
337 int i, cnt;
338
339 freep = kinfo_getvmmap(getpid(), &cnt);
340 if (freep) {
341 mmap_lock();
342 for (i = 0; i < cnt; i++) {
343 uintptr_t startaddr, endaddr;
344
345 startaddr = freep[i].kve_start;
346 endaddr = freep[i].kve_end;
347 if (h2g_valid(startaddr)) {
348 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
349
350 if (h2g_valid(endaddr)) {
351 endaddr = h2g(endaddr);
352 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
353 } else {
354#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
355 endaddr = ~0ul;
356 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
357#endif
358 }
359 }
360 }
361 free(freep);
362 mmap_unlock();
363 }
364#else
365 FILE *f;
366
367 last_brk = (uintptr_t)sbrk(0);
368
369 f = fopen("/compat/linux/proc/self/maps", "r");
370 if (f) {
371 mmap_lock();
372
373 do {
374 uintptr_t startaddr, endaddr;
375 int n;
376
377 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
378
379 if (n == 2 && h2g_valid(startaddr)) {
380 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
381
382 if (h2g_valid(endaddr)) {
383 endaddr = h2g(endaddr);
384 } else {
385 endaddr = ~0ul;
386 }
387 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
388 }
389 } while (!feof(f));
390
391 fclose(f);
392 mmap_unlock();
393 }
394#endif
395 }
396#endif
397#endif /* !VBOX */
398}
399
400static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
401{
402 PageDesc *pd;
403 void **lp;
404 int i;
405
406#if defined(CONFIG_USER_ONLY)
407 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
408# define ALLOC(P, SIZE) \
409 do { \
410 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
411 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
412 } while (0)
413#else
414# define ALLOC(P, SIZE) \
415 do { P = qemu_mallocz(SIZE); } while (0)
416#endif
417
418 /* Level 1. Always allocated. */
419 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
420
421 /* Level 2..N-1. */
422 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
423 void **p = *lp;
424
425 if (p == NULL) {
426 if (!alloc) {
427 return NULL;
428 }
429 ALLOC(p, sizeof(void *) * L2_SIZE);
430 *lp = p;
431 }
432
433 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
434 }
435
436 pd = *lp;
437 if (pd == NULL) {
438 if (!alloc) {
439 return NULL;
440 }
441 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
442 *lp = pd;
443 }
444
445#undef ALLOC
446
447 return pd + (index & (L2_SIZE - 1));
448}
449
450static inline PageDesc *page_find(tb_page_addr_t index)
451{
452 return page_find_alloc(index, 0);
453}
454
455#if !defined(CONFIG_USER_ONLY)
456static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
457{
458 PhysPageDesc *pd;
459 void **lp;
460 int i;
461
462 /* Level 1. Always allocated. */
463 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
464
465 /* Level 2..N-1. */
466 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
467 void **p = *lp;
468 if (p == NULL) {
469 if (!alloc) {
470 return NULL;
471 }
472 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
473 }
474 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
475 }
476
477 pd = *lp;
478 if (pd == NULL) {
479 int i;
480
481 if (!alloc) {
482 return NULL;
483 }
484
485 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
486
487 for (i = 0; i < L2_SIZE; i++) {
488 pd[i].phys_offset = IO_MEM_UNASSIGNED;
489 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
490 }
491 }
492
493 return pd + (index & (L2_SIZE - 1));
494}
495
496static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
497{
498 return phys_page_find_alloc(index, 0);
499}
500
501static void tlb_protect_code(ram_addr_t ram_addr);
502static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
503 target_ulong vaddr);
504#define mmap_lock() do { } while(0)
505#define mmap_unlock() do { } while(0)
506#endif
507
508#ifdef VBOX /* We don't need such huge codegen buffer size, as execute
509 most of the code in raw or hm mode. */
510#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
511#else /* !VBOX */
512#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
513#endif /* !VBOX */
514
515#if defined(CONFIG_USER_ONLY)
516/* Currently it is not recommended to allocate big chunks of data in
517 user mode. It will change when a dedicated libc will be used */
518#define USE_STATIC_CODE_GEN_BUFFER
519#endif
520
521#if defined(VBOX) && defined(USE_STATIC_CODE_GEN_BUFFER)
522# error "VBox allocates codegen buffer dynamically"
523#endif
524
525#ifdef USE_STATIC_CODE_GEN_BUFFER
526static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
527 __attribute__((aligned (CODE_GEN_ALIGN)));
528#endif
529
530static void code_gen_alloc(uintptr_t tb_size)
531{
532#ifdef USE_STATIC_CODE_GEN_BUFFER
533 code_gen_buffer = static_code_gen_buffer;
534 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
535 map_exec(code_gen_buffer, code_gen_buffer_size);
536#else
537# ifdef VBOX
538 /* We cannot use phys_ram_size here, as it's 0 now,
539 * it only gets initialized once RAM registration callback
540 * (REMR3NotifyPhysRamRegister()) called.
541 */
542 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
543# else /* !VBOX */
544 code_gen_buffer_size = tb_size;
545 if (code_gen_buffer_size == 0) {
546#if defined(CONFIG_USER_ONLY)
547 /* in user mode, phys_ram_size is not meaningful */
548 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
549#else
550 /* XXX: needs adjustments */
551 code_gen_buffer_size = (uintptr_t)(ram_size / 4);
552#endif
553 }
554 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
555 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
556# endif /* !VBOX */
557 /* The code gen buffer location may have constraints depending on
558 the host cpu and OS */
559# ifdef VBOX
560 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
561
562 if (!code_gen_buffer) {
563 LogRel(("REM: failed allocate codegen buffer %lld\n",
564 code_gen_buffer_size));
565 return;
566 }
567# else /* !VBOX */
568#if defined(__linux__)
569 {
570 int flags;
571 void *start = NULL;
572
573 flags = MAP_PRIVATE | MAP_ANONYMOUS;
574#if defined(__x86_64__)
575 flags |= MAP_32BIT;
576 /* Cannot map more than that */
577 if (code_gen_buffer_size > (800 * 1024 * 1024))
578 code_gen_buffer_size = (800 * 1024 * 1024);
579#elif defined(__sparc_v9__)
580 // Map the buffer below 2G, so we can use direct calls and branches
581 flags |= MAP_FIXED;
582 start = (void *) 0x60000000UL;
583 if (code_gen_buffer_size > (512 * 1024 * 1024))
584 code_gen_buffer_size = (512 * 1024 * 1024);
585#elif defined(__arm__)
586 /* Map the buffer below 32M, so we can use direct calls and branches */
587 flags |= MAP_FIXED;
588 start = (void *) 0x01000000UL;
589 if (code_gen_buffer_size > 16 * 1024 * 1024)
590 code_gen_buffer_size = 16 * 1024 * 1024;
591#elif defined(__s390x__)
592 /* Map the buffer so that we can use direct calls and branches. */
593 /* We have a +- 4GB range on the branches; leave some slop. */
594 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
595 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
596 }
597 start = (void *)0x90000000UL;
598#endif
599 code_gen_buffer = mmap(start, code_gen_buffer_size,
600 PROT_WRITE | PROT_READ | PROT_EXEC,
601 flags, -1, 0);
602 if (code_gen_buffer == MAP_FAILED) {
603 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
604 exit(1);
605 }
606 }
607#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
608 {
609 int flags;
610 void *addr = NULL;
611 flags = MAP_PRIVATE | MAP_ANONYMOUS;
612#if defined(__x86_64__)
613 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
614 * 0x40000000 is free */
615 flags |= MAP_FIXED;
616 addr = (void *)0x40000000;
617 /* Cannot map more than that */
618 if (code_gen_buffer_size > (800 * 1024 * 1024))
619 code_gen_buffer_size = (800 * 1024 * 1024);
620#endif
621 code_gen_buffer = mmap(addr, code_gen_buffer_size,
622 PROT_WRITE | PROT_READ | PROT_EXEC,
623 flags, -1, 0);
624 if (code_gen_buffer == MAP_FAILED) {
625 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
626 exit(1);
627 }
628 }
629#else
630 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
631 map_exec(code_gen_buffer, code_gen_buffer_size);
632#endif
633# endif /* !VBOX */
634#endif /* !USE_STATIC_CODE_GEN_BUFFER */
635#ifndef VBOX /** @todo r=bird: why are we different? */
636 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
637#else
638 map_exec(code_gen_prologue, _1K);
639#endif
640 code_gen_buffer_max_size = code_gen_buffer_size -
641 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
642 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
643 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
644}
645
646/* Must be called before using the QEMU cpus. 'tb_size' is the size
647 (in bytes) allocated to the translation buffer. Zero means default
648 size. */
649void cpu_exec_init_all(uintptr_t tb_size)
650{
651 cpu_gen_init();
652 code_gen_alloc(tb_size);
653 code_gen_ptr = code_gen_buffer;
654 page_init();
655#if !defined(CONFIG_USER_ONLY)
656 io_mem_init();
657#endif
658#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
659 /* There's no guest base to take into account, so go ahead and
660 initialize the prologue now. */
661 tcg_prologue_init(&tcg_ctx);
662#endif
663}
664
665#ifndef VBOX
666#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
667
668static int cpu_common_post_load(void *opaque, int version_id)
669{
670 CPUState *env = opaque;
671
672 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
673 version_id is increased. */
674 env->interrupt_request &= ~0x01;
675 tlb_flush(env, 1);
676
677 return 0;
678}
679
680static const VMStateDescription vmstate_cpu_common = {
681 .name = "cpu_common",
682 .version_id = 1,
683 .minimum_version_id = 1,
684 .minimum_version_id_old = 1,
685 .post_load = cpu_common_post_load,
686 .fields = (VMStateField []) {
687 VMSTATE_UINT32(halted, CPUState),
688 VMSTATE_UINT32(interrupt_request, CPUState),
689 VMSTATE_END_OF_LIST()
690 }
691};
692#endif
693
694CPUState *qemu_get_cpu(int cpu)
695{
696 CPUState *env = first_cpu;
697
698 while (env) {
699 if (env->cpu_index == cpu)
700 break;
701 env = env->next_cpu;
702 }
703
704 return env;
705}
706
707#endif /* !VBOX */
708
709void cpu_exec_init(CPUState *env)
710{
711 CPUState **penv;
712 int cpu_index;
713
714#if defined(CONFIG_USER_ONLY)
715 cpu_list_lock();
716#endif
717 env->next_cpu = NULL;
718 penv = &first_cpu;
719 cpu_index = 0;
720 while (*penv != NULL) {
721 penv = &(*penv)->next_cpu;
722 cpu_index++;
723 }
724 env->cpu_index = cpu_index;
725 env->numa_node = 0;
726 QTAILQ_INIT(&env->breakpoints);
727 QTAILQ_INIT(&env->watchpoints);
728 *penv = env;
729#ifndef VBOX
730#if defined(CONFIG_USER_ONLY)
731 cpu_list_unlock();
732#endif
733#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
734 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
735 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
736 cpu_save, cpu_load, env);
737#endif
738#endif /* !VBOX */
739}
740
741static inline void invalidate_page_bitmap(PageDesc *p)
742{
743 if (p->code_bitmap) {
744 qemu_free(p->code_bitmap);
745 p->code_bitmap = NULL;
746 }
747 p->code_write_count = 0;
748}
749
750/* Set to NULL all the 'first_tb' fields in all PageDescs. */
751
752static void page_flush_tb_1 (int level, void **lp)
753{
754 int i;
755
756 if (*lp == NULL) {
757 return;
758 }
759 if (level == 0) {
760 PageDesc *pd = *lp;
761 for (i = 0; i < L2_SIZE; ++i) {
762 pd[i].first_tb = NULL;
763 invalidate_page_bitmap(pd + i);
764 }
765 } else {
766 void **pp = *lp;
767 for (i = 0; i < L2_SIZE; ++i) {
768 page_flush_tb_1 (level - 1, pp + i);
769 }
770 }
771}
772
773static void page_flush_tb(void)
774{
775 int i;
776 for (i = 0; i < V_L1_SIZE; i++) {
777 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
778 }
779}
780
781/* flush all the translation blocks */
782/* XXX: tb_flush is currently not thread safe */
783void tb_flush(CPUState *env1)
784{
785 CPUState *env;
786#ifdef VBOX
787 STAM_PROFILE_START(&env1->StatTbFlush, a);
788#endif
789#if defined(DEBUG_FLUSH)
790 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
791 (unsigned long)(code_gen_ptr - code_gen_buffer),
792 nb_tbs, nb_tbs > 0 ?
793 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
794#endif
795 if ((uintptr_t)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
796 cpu_abort(env1, "Internal error: code buffer overflow\n");
797
798 nb_tbs = 0;
799
800 for(env = first_cpu; env != NULL; env = env->next_cpu) {
801 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
802 }
803
804 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
805 page_flush_tb();
806
807 code_gen_ptr = code_gen_buffer;
808 /* XXX: flush processor icache at this point if cache flush is
809 expensive */
810 tb_flush_count++;
811#ifdef VBOX
812 STAM_PROFILE_STOP(&env1->StatTbFlush, a);
813#endif
814}
815
816#ifdef DEBUG_TB_CHECK
817
818static void tb_invalidate_check(target_ulong address)
819{
820 TranslationBlock *tb;
821 int i;
822 address &= TARGET_PAGE_MASK;
823 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
824 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
825 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
826 address >= tb->pc + tb->size)) {
827 printf("ERROR invalidate: address=" TARGET_FMT_lx
828 " PC=%08lx size=%04x\n",
829 address, (long)tb->pc, tb->size);
830 }
831 }
832 }
833}
834
835/* verify that all the pages have correct rights for code */
836static void tb_page_check(void)
837{
838 TranslationBlock *tb;
839 int i, flags1, flags2;
840
841 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
842 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
843 flags1 = page_get_flags(tb->pc);
844 flags2 = page_get_flags(tb->pc + tb->size - 1);
845 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
846 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
847 (long)tb->pc, tb->size, flags1, flags2);
848 }
849 }
850 }
851}
852
853#endif
854
855/* invalidate one TB */
856static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
857 int next_offset)
858{
859 TranslationBlock *tb1;
860 for(;;) {
861 tb1 = *ptb;
862 if (tb1 == tb) {
863 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
864 break;
865 }
866 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
867 }
868}
869
870static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
871{
872 TranslationBlock *tb1;
873 unsigned int n1;
874
875 for(;;) {
876 tb1 = *ptb;
877 n1 = (intptr_t)tb1 & 3;
878 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3);
879 if (tb1 == tb) {
880 *ptb = tb1->page_next[n1];
881 break;
882 }
883 ptb = &tb1->page_next[n1];
884 }
885}
886
887static inline void tb_jmp_remove(TranslationBlock *tb, int n)
888{
889 TranslationBlock *tb1, **ptb;
890 unsigned int n1;
891
892 ptb = &tb->jmp_next[n];
893 tb1 = *ptb;
894 if (tb1) {
895 /* find tb(n) in circular list */
896 for(;;) {
897 tb1 = *ptb;
898 n1 = (intptr_t)tb1 & 3;
899 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3);
900 if (n1 == n && tb1 == tb)
901 break;
902 if (n1 == 2) {
903 ptb = &tb1->jmp_first;
904 } else {
905 ptb = &tb1->jmp_next[n1];
906 }
907 }
908 /* now we can suppress tb(n) from the list */
909 *ptb = tb->jmp_next[n];
910
911 tb->jmp_next[n] = NULL;
912 }
913}
914
915/* reset the jump entry 'n' of a TB so that it is not chained to
916 another TB */
917static inline void tb_reset_jump(TranslationBlock *tb, int n)
918{
919 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
920}
921
922void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
923{
924 CPUState *env;
925 PageDesc *p;
926 unsigned int h, n1;
927 tb_page_addr_t phys_pc;
928 TranslationBlock *tb1, *tb2;
929
930 /* remove the TB from the hash list */
931 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
932 h = tb_phys_hash_func(phys_pc);
933 tb_remove(&tb_phys_hash[h], tb,
934 offsetof(TranslationBlock, phys_hash_next));
935
936 /* remove the TB from the page list */
937 if (tb->page_addr[0] != page_addr) {
938 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
939 tb_page_remove(&p->first_tb, tb);
940 invalidate_page_bitmap(p);
941 }
942 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
943 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
944 tb_page_remove(&p->first_tb, tb);
945 invalidate_page_bitmap(p);
946 }
947
948 tb_invalidated_flag = 1;
949
950 /* remove the TB from the hash list */
951 h = tb_jmp_cache_hash_func(tb->pc);
952 for(env = first_cpu; env != NULL; env = env->next_cpu) {
953 if (env->tb_jmp_cache[h] == tb)
954 env->tb_jmp_cache[h] = NULL;
955 }
956
957 /* suppress this TB from the two jump lists */
958 tb_jmp_remove(tb, 0);
959 tb_jmp_remove(tb, 1);
960
961 /* suppress any remaining jumps to this TB */
962 tb1 = tb->jmp_first;
963 for(;;) {
964 n1 = (intptr_t)tb1 & 3;
965 if (n1 == 2)
966 break;
967 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3);
968 tb2 = tb1->jmp_next[n1];
969 tb_reset_jump(tb1, n1);
970 tb1->jmp_next[n1] = NULL;
971 tb1 = tb2;
972 }
973 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2); /* fail safe */
974
975 tb_phys_invalidate_count++;
976}
977
978#ifdef VBOX
979
980void tb_invalidate_virt(CPUState *env, uint32_t eip)
981{
982# if 1
983 tb_flush(env);
984# else
985 uint8_t *cs_base, *pc;
986 unsigned int flags, h, phys_pc;
987 TranslationBlock *tb, **ptb;
988
989 flags = env->hflags;
990 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
991 cs_base = env->segs[R_CS].base;
992 pc = cs_base + eip;
993
994 tb = tb_find(&ptb, (uintptr_t)pc, (uintptr_t)cs_base,
995 flags);
996
997 if(tb)
998 {
999# ifdef DEBUG
1000 printf("invalidating TB (%08X) at %08X\n", tb, eip);
1001# endif
1002 tb_invalidate(tb);
1003 //Note: this will leak TBs, but the whole cache will be flushed
1004 // when it happens too often
1005 tb->pc = 0;
1006 tb->cs_base = 0;
1007 tb->flags = 0;
1008 }
1009# endif
1010}
1011
1012# ifdef VBOX_STRICT
1013/**
1014 * Gets the page offset.
1015 */
1016ram_addr_t get_phys_page_offset(target_ulong addr)
1017{
1018 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
1019 return p ? p->phys_offset : 0;
1020}
1021# endif /* VBOX_STRICT */
1022
1023#endif /* VBOX */
1024
1025static inline void set_bits(uint8_t *tab, int start, int len)
1026{
1027 int end, mask, end1;
1028
1029 end = start + len;
1030 tab += start >> 3;
1031 mask = 0xff << (start & 7);
1032 if ((start & ~7) == (end & ~7)) {
1033 if (start < end) {
1034 mask &= ~(0xff << (end & 7));
1035 *tab |= mask;
1036 }
1037 } else {
1038 *tab++ |= mask;
1039 start = (start + 8) & ~7;
1040 end1 = end & ~7;
1041 while (start < end1) {
1042 *tab++ = 0xff;
1043 start += 8;
1044 }
1045 if (start < end) {
1046 mask = ~(0xff << (end & 7));
1047 *tab |= mask;
1048 }
1049 }
1050}
1051
1052static void build_page_bitmap(PageDesc *p)
1053{
1054 int n, tb_start, tb_end;
1055 TranslationBlock *tb;
1056
1057 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
1058
1059 tb = p->first_tb;
1060 while (tb != NULL) {
1061 n = (intptr_t)tb & 3;
1062 tb = (TranslationBlock *)((intptr_t)tb & ~3);
1063 /* NOTE: this is subtle as a TB may span two physical pages */
1064 if (n == 0) {
1065 /* NOTE: tb_end may be after the end of the page, but
1066 it is not a problem */
1067 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1068 tb_end = tb_start + tb->size;
1069 if (tb_end > TARGET_PAGE_SIZE)
1070 tb_end = TARGET_PAGE_SIZE;
1071 } else {
1072 tb_start = 0;
1073 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1074 }
1075 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1076 tb = tb->page_next[n];
1077 }
1078}
1079
1080TranslationBlock *tb_gen_code(CPUState *env,
1081 target_ulong pc, target_ulong cs_base,
1082 int flags, int cflags)
1083{
1084 TranslationBlock *tb;
1085 uint8_t *tc_ptr;
1086 tb_page_addr_t phys_pc, phys_page2;
1087 target_ulong virt_page2;
1088 int code_gen_size;
1089
1090 phys_pc = get_page_addr_code(env, pc);
1091 tb = tb_alloc(pc);
1092 if (!tb) {
1093 /* flush must be done */
1094 tb_flush(env);
1095 /* cannot fail at this point */
1096 tb = tb_alloc(pc);
1097 /* Don't forget to invalidate previous TB info. */
1098 tb_invalidated_flag = 1;
1099 }
1100 tc_ptr = code_gen_ptr;
1101 tb->tc_ptr = tc_ptr;
1102 tb->cs_base = cs_base;
1103 tb->flags = flags;
1104 tb->cflags = cflags;
1105 cpu_gen_code(env, tb, &code_gen_size);
1106 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1107
1108 /* check next page if needed */
1109 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1110 phys_page2 = -1;
1111 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1112 phys_page2 = get_page_addr_code(env, virt_page2);
1113 }
1114 tb_link_page(tb, phys_pc, phys_page2);
1115 return tb;
1116}
1117
1118/* invalidate all TBs which intersect with the target physical page
1119 starting in range [start;end[. NOTE: start and end must refer to
1120 the same physical page. 'is_cpu_write_access' should be true if called
1121 from a real cpu write access: the virtual CPU will exit the current
1122 TB if code is modified inside this TB. */
1123void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1124 int is_cpu_write_access)
1125{
1126 TranslationBlock *tb, *tb_next, *saved_tb;
1127 CPUState *env = cpu_single_env;
1128 tb_page_addr_t tb_start, tb_end;
1129 PageDesc *p;
1130 int n;
1131#ifdef TARGET_HAS_PRECISE_SMC
1132 int current_tb_not_found = is_cpu_write_access;
1133 TranslationBlock *current_tb = NULL;
1134 int current_tb_modified = 0;
1135 target_ulong current_pc = 0;
1136 target_ulong current_cs_base = 0;
1137 int current_flags = 0;
1138#endif /* TARGET_HAS_PRECISE_SMC */
1139
1140 p = page_find(start >> TARGET_PAGE_BITS);
1141 if (!p)
1142 return;
1143 if (!p->code_bitmap &&
1144 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1145 is_cpu_write_access) {
1146 /* build code bitmap */
1147 build_page_bitmap(p);
1148 }
1149
1150 /* we remove all the TBs in the range [start, end[ */
1151 /* XXX: see if in some cases it could be faster to invalidate all the code */
1152 tb = p->first_tb;
1153 while (tb != NULL) {
1154 n = (intptr_t)tb & 3;
1155 tb = (TranslationBlock *)((intptr_t)tb & ~3);
1156 tb_next = tb->page_next[n];
1157 /* NOTE: this is subtle as a TB may span two physical pages */
1158 if (n == 0) {
1159 /* NOTE: tb_end may be after the end of the page, but
1160 it is not a problem */
1161 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1162 tb_end = tb_start + tb->size;
1163 } else {
1164 tb_start = tb->page_addr[1];
1165 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1166 }
1167 if (!(tb_end <= start || tb_start >= end)) {
1168#ifdef TARGET_HAS_PRECISE_SMC
1169 if (current_tb_not_found) {
1170 current_tb_not_found = 0;
1171 current_tb = NULL;
1172 if (env->mem_io_pc) {
1173 /* now we have a real cpu fault */
1174 current_tb = tb_find_pc(env->mem_io_pc);
1175 }
1176 }
1177 if (current_tb == tb &&
1178 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1179 /* If we are modifying the current TB, we must stop
1180 its execution. We could be more precise by checking
1181 that the modification is after the current PC, but it
1182 would require a specialized function to partially
1183 restore the CPU state */
1184
1185 current_tb_modified = 1;
1186 cpu_restore_state(current_tb, env,
1187 env->mem_io_pc, NULL);
1188 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1189 &current_flags);
1190 }
1191#endif /* TARGET_HAS_PRECISE_SMC */
1192 /* we need to do that to handle the case where a signal
1193 occurs while doing tb_phys_invalidate() */
1194 saved_tb = NULL;
1195 if (env) {
1196 saved_tb = env->current_tb;
1197 env->current_tb = NULL;
1198 }
1199 tb_phys_invalidate(tb, -1);
1200 if (env) {
1201 env->current_tb = saved_tb;
1202 if (env->interrupt_request && env->current_tb)
1203 cpu_interrupt(env, env->interrupt_request);
1204 }
1205 }
1206 tb = tb_next;
1207 }
1208#if !defined(CONFIG_USER_ONLY)
1209 /* if no code remaining, no need to continue to use slow writes */
1210 if (!p->first_tb) {
1211 invalidate_page_bitmap(p);
1212 if (is_cpu_write_access) {
1213 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1214 }
1215 }
1216#endif
1217#ifdef TARGET_HAS_PRECISE_SMC
1218 if (current_tb_modified) {
1219 /* we generate a block containing just the instruction
1220 modifying the memory. It will ensure that it cannot modify
1221 itself */
1222 env->current_tb = NULL;
1223 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1224 cpu_resume_from_signal(env, NULL);
1225 }
1226#endif
1227}
1228
1229/* len must be <= 8 and start must be a multiple of len */
1230static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1231{
1232 PageDesc *p;
1233 int offset, b;
1234#if 0
1235 if (1) {
1236 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1237 cpu_single_env->mem_io_vaddr, len,
1238 cpu_single_env->eip,
1239 cpu_single_env->eip + (intptr_t)cpu_single_env->segs[R_CS].base);
1240 }
1241#endif
1242 p = page_find(start >> TARGET_PAGE_BITS);
1243 if (!p)
1244 return;
1245 if (p->code_bitmap) {
1246 offset = start & ~TARGET_PAGE_MASK;
1247 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1248 if (b & ((1 << len) - 1))
1249 goto do_invalidate;
1250 } else {
1251 do_invalidate:
1252 tb_invalidate_phys_page_range(start, start + len, 1);
1253 }
1254}
1255
1256#if !defined(CONFIG_SOFTMMU)
1257static void tb_invalidate_phys_page(tb_page_addr_t addr,
1258 uintptr_t pc, void *puc)
1259{
1260 TranslationBlock *tb;
1261 PageDesc *p;
1262 int n;
1263#ifdef TARGET_HAS_PRECISE_SMC
1264 TranslationBlock *current_tb = NULL;
1265 CPUState *env = cpu_single_env;
1266 int current_tb_modified = 0;
1267 target_ulong current_pc = 0;
1268 target_ulong current_cs_base = 0;
1269 int current_flags = 0;
1270#endif
1271
1272 addr &= TARGET_PAGE_MASK;
1273 p = page_find(addr >> TARGET_PAGE_BITS);
1274 if (!p)
1275 return;
1276 tb = p->first_tb;
1277#ifdef TARGET_HAS_PRECISE_SMC
1278 if (tb && pc != 0) {
1279 current_tb = tb_find_pc(pc);
1280 }
1281#endif
1282 while (tb != NULL) {
1283 n = (intptr_t)tb & 3;
1284 tb = (TranslationBlock *)((intptr_t)tb & ~3);
1285#ifdef TARGET_HAS_PRECISE_SMC
1286 if (current_tb == tb &&
1287 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1288 /* If we are modifying the current TB, we must stop
1289 its execution. We could be more precise by checking
1290 that the modification is after the current PC, but it
1291 would require a specialized function to partially
1292 restore the CPU state */
1293
1294 current_tb_modified = 1;
1295 cpu_restore_state(current_tb, env, pc, puc);
1296 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1297 &current_flags);
1298 }
1299#endif /* TARGET_HAS_PRECISE_SMC */
1300 tb_phys_invalidate(tb, addr);
1301 tb = tb->page_next[n];
1302 }
1303 p->first_tb = NULL;
1304#ifdef TARGET_HAS_PRECISE_SMC
1305 if (current_tb_modified) {
1306 /* we generate a block containing just the instruction
1307 modifying the memory. It will ensure that it cannot modify
1308 itself */
1309 env->current_tb = NULL;
1310 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1311 cpu_resume_from_signal(env, puc);
1312 }
1313#endif
1314}
1315#endif
1316
1317/* add the tb in the target page and protect it if necessary */
1318static inline void tb_alloc_page(TranslationBlock *tb,
1319 unsigned int n, tb_page_addr_t page_addr)
1320{
1321 PageDesc *p;
1322 TranslationBlock *last_first_tb;
1323
1324 tb->page_addr[n] = page_addr;
1325 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1326 tb->page_next[n] = p->first_tb;
1327 last_first_tb = p->first_tb;
1328 p->first_tb = (TranslationBlock *)((intptr_t)tb | n);
1329 invalidate_page_bitmap(p);
1330
1331#if defined(TARGET_HAS_SMC) || 1
1332
1333#if defined(CONFIG_USER_ONLY)
1334 if (p->flags & PAGE_WRITE) {
1335 target_ulong addr;
1336 PageDesc *p2;
1337 int prot;
1338
1339 /* force the host page as non writable (writes will have a
1340 page fault + mprotect overhead) */
1341 page_addr &= qemu_host_page_mask;
1342 prot = 0;
1343 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1344 addr += TARGET_PAGE_SIZE) {
1345
1346 p2 = page_find (addr >> TARGET_PAGE_BITS);
1347 if (!p2)
1348 continue;
1349 prot |= p2->flags;
1350 p2->flags &= ~PAGE_WRITE;
1351 }
1352 mprotect(g2h(page_addr), qemu_host_page_size,
1353 (prot & PAGE_BITS) & ~PAGE_WRITE);
1354#ifdef DEBUG_TB_INVALIDATE
1355 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1356 page_addr);
1357#endif
1358 }
1359#else
1360 /* if some code is already present, then the pages are already
1361 protected. So we handle the case where only the first TB is
1362 allocated in a physical page */
1363 if (!last_first_tb) {
1364 tlb_protect_code(page_addr);
1365 }
1366#endif
1367
1368#endif /* TARGET_HAS_SMC */
1369}
1370
1371/* Allocate a new translation block. Flush the translation buffer if
1372 too many translation blocks or too much generated code. */
1373TranslationBlock *tb_alloc(target_ulong pc)
1374{
1375 TranslationBlock *tb;
1376
1377 if (nb_tbs >= code_gen_max_blocks ||
1378 (code_gen_ptr - code_gen_buffer) >= VBOX_ONLY((uintptr_t))code_gen_buffer_max_size)
1379 return NULL;
1380 tb = &tbs[nb_tbs++];
1381 tb->pc = pc;
1382 tb->cflags = 0;
1383 return tb;
1384}
1385
1386void tb_free(TranslationBlock *tb)
1387{
1388 /* In practice this is mostly used for single use temporary TB
1389 Ignore the hard cases and just back up if this TB happens to
1390 be the last one generated. */
1391 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1392 code_gen_ptr = tb->tc_ptr;
1393 nb_tbs--;
1394 }
1395}
1396
1397/* add a new TB and link it to the physical page tables. phys_page2 is
1398 (-1) to indicate that only one page contains the TB. */
1399void tb_link_page(TranslationBlock *tb,
1400 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1401{
1402 unsigned int h;
1403 TranslationBlock **ptb;
1404
1405 /* Grab the mmap lock to stop another thread invalidating this TB
1406 before we are done. */
1407 mmap_lock();
1408 /* add in the physical hash table */
1409 h = tb_phys_hash_func(phys_pc);
1410 ptb = &tb_phys_hash[h];
1411 tb->phys_hash_next = *ptb;
1412 *ptb = tb;
1413
1414 /* add in the page list */
1415 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1416 if (phys_page2 != -1)
1417 tb_alloc_page(tb, 1, phys_page2);
1418 else
1419 tb->page_addr[1] = -1;
1420
1421 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
1422 tb->jmp_next[0] = NULL;
1423 tb->jmp_next[1] = NULL;
1424
1425 /* init original jump addresses */
1426 if (tb->tb_next_offset[0] != 0xffff)
1427 tb_reset_jump(tb, 0);
1428 if (tb->tb_next_offset[1] != 0xffff)
1429 tb_reset_jump(tb, 1);
1430
1431#ifdef DEBUG_TB_CHECK
1432 tb_page_check();
1433#endif
1434 mmap_unlock();
1435}
1436
1437/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1438 tb[1].tc_ptr. Return NULL if not found */
1439TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1440{
1441 int m_min, m_max, m;
1442 uintptr_t v;
1443 TranslationBlock *tb;
1444
1445 if (nb_tbs <= 0)
1446 return NULL;
1447 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1448 tc_ptr >= (uintptr_t)code_gen_ptr)
1449 return NULL;
1450 /* binary search (cf Knuth) */
1451 m_min = 0;
1452 m_max = nb_tbs - 1;
1453 while (m_min <= m_max) {
1454 m = (m_min + m_max) >> 1;
1455 tb = &tbs[m];
1456 v = (uintptr_t)tb->tc_ptr;
1457 if (v == tc_ptr)
1458 return tb;
1459 else if (tc_ptr < v) {
1460 m_max = m - 1;
1461 } else {
1462 m_min = m + 1;
1463 }
1464 }
1465 return &tbs[m_max];
1466}
1467
1468static void tb_reset_jump_recursive(TranslationBlock *tb);
1469
1470static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1471{
1472 TranslationBlock *tb1, *tb_next, **ptb;
1473 unsigned int n1;
1474
1475 tb1 = tb->jmp_next[n];
1476 if (tb1 != NULL) {
1477 /* find head of list */
1478 for(;;) {
1479 n1 = (intptr_t)tb1 & 3;
1480 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3);
1481 if (n1 == 2)
1482 break;
1483 tb1 = tb1->jmp_next[n1];
1484 }
1485 /* we are now sure now that tb jumps to tb1 */
1486 tb_next = tb1;
1487
1488 /* remove tb from the jmp_first list */
1489 ptb = &tb_next->jmp_first;
1490 for(;;) {
1491 tb1 = *ptb;
1492 n1 = (intptr_t)tb1 & 3;
1493 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3);
1494 if (n1 == n && tb1 == tb)
1495 break;
1496 ptb = &tb1->jmp_next[n1];
1497 }
1498 *ptb = tb->jmp_next[n];
1499 tb->jmp_next[n] = NULL;
1500
1501 /* suppress the jump to next tb in generated code */
1502 tb_reset_jump(tb, n);
1503
1504 /* suppress jumps in the tb on which we could have jumped */
1505 tb_reset_jump_recursive(tb_next);
1506 }
1507}
1508
1509static void tb_reset_jump_recursive(TranslationBlock *tb)
1510{
1511 tb_reset_jump_recursive2(tb, 0);
1512 tb_reset_jump_recursive2(tb, 1);
1513}
1514
1515#if defined(TARGET_HAS_ICE)
1516#if defined(CONFIG_USER_ONLY)
1517static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1518{
1519 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1520}
1521#else
1522static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1523{
1524 target_phys_addr_t addr;
1525 target_ulong pd;
1526 ram_addr_t ram_addr;
1527 PhysPageDesc *p;
1528
1529 addr = cpu_get_phys_page_debug(env, pc);
1530 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1531 if (!p) {
1532 pd = IO_MEM_UNASSIGNED;
1533 } else {
1534 pd = p->phys_offset;
1535 }
1536 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1537 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1538}
1539#endif
1540#endif /* TARGET_HAS_ICE */
1541
1542#if defined(CONFIG_USER_ONLY)
1543void cpu_watchpoint_remove_all(CPUState *env, int mask)
1544
1545{
1546}
1547
1548int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1549 int flags, CPUWatchpoint **watchpoint)
1550{
1551 return -ENOSYS;
1552}
1553#else
1554/* Add a watchpoint. */
1555int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1556 int flags, CPUWatchpoint **watchpoint)
1557{
1558 target_ulong len_mask = ~(len - 1);
1559 CPUWatchpoint *wp;
1560
1561 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1562 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1563 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1564 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1565#ifndef VBOX
1566 return -EINVAL;
1567#else
1568 return VERR_INVALID_PARAMETER;
1569#endif
1570 }
1571 wp = qemu_malloc(sizeof(*wp));
1572
1573 wp->vaddr = addr;
1574 wp->len_mask = len_mask;
1575 wp->flags = flags;
1576
1577 /* keep all GDB-injected watchpoints in front */
1578 if (flags & BP_GDB)
1579 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1580 else
1581 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1582
1583 tlb_flush_page(env, addr);
1584
1585 if (watchpoint)
1586 *watchpoint = wp;
1587 return 0;
1588}
1589
1590/* Remove a specific watchpoint. */
1591int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1592 int flags)
1593{
1594 target_ulong len_mask = ~(len - 1);
1595 CPUWatchpoint *wp;
1596
1597 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1598 if (addr == wp->vaddr && len_mask == wp->len_mask
1599 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1600 cpu_watchpoint_remove_by_ref(env, wp);
1601 return 0;
1602 }
1603 }
1604#ifndef VBOX
1605 return -ENOENT;
1606#else
1607 return VERR_NOT_FOUND;
1608#endif
1609}
1610
1611/* Remove a specific watchpoint by reference. */
1612void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1613{
1614 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1615
1616 tlb_flush_page(env, watchpoint->vaddr);
1617
1618 qemu_free(watchpoint);
1619}
1620
1621/* Remove all matching watchpoints. */
1622void cpu_watchpoint_remove_all(CPUState *env, int mask)
1623{
1624 CPUWatchpoint *wp, *next;
1625
1626 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1627 if (wp->flags & mask)
1628 cpu_watchpoint_remove_by_ref(env, wp);
1629 }
1630}
1631#endif
1632
1633/* Add a breakpoint. */
1634int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1635 CPUBreakpoint **breakpoint)
1636{
1637#if defined(TARGET_HAS_ICE)
1638 CPUBreakpoint *bp;
1639
1640 bp = qemu_malloc(sizeof(*bp));
1641
1642 bp->pc = pc;
1643 bp->flags = flags;
1644
1645 /* keep all GDB-injected breakpoints in front */
1646 if (flags & BP_GDB)
1647 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1648 else
1649 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1650
1651 breakpoint_invalidate(env, pc);
1652
1653 if (breakpoint)
1654 *breakpoint = bp;
1655 return 0;
1656#else
1657 return -ENOSYS;
1658#endif
1659}
1660
1661/* Remove a specific breakpoint. */
1662int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1663{
1664#if defined(TARGET_HAS_ICE)
1665 CPUBreakpoint *bp;
1666
1667 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1668 if (bp->pc == pc && bp->flags == flags) {
1669 cpu_breakpoint_remove_by_ref(env, bp);
1670 return 0;
1671 }
1672 }
1673# ifndef VBOX
1674 return -ENOENT;
1675# else
1676 return VERR_NOT_FOUND;
1677# endif
1678#else
1679 return -ENOSYS;
1680#endif
1681}
1682
1683/* Remove a specific breakpoint by reference. */
1684void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1685{
1686#if defined(TARGET_HAS_ICE)
1687 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1688
1689 breakpoint_invalidate(env, breakpoint->pc);
1690
1691 qemu_free(breakpoint);
1692#endif
1693}
1694
1695/* Remove all matching breakpoints. */
1696void cpu_breakpoint_remove_all(CPUState *env, int mask)
1697{
1698#if defined(TARGET_HAS_ICE)
1699 CPUBreakpoint *bp, *next;
1700
1701 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1702 if (bp->flags & mask)
1703 cpu_breakpoint_remove_by_ref(env, bp);
1704 }
1705#endif
1706}
1707
1708/* enable or disable single step mode. EXCP_DEBUG is returned by the
1709 CPU loop after each instruction */
1710void cpu_single_step(CPUState *env, int enabled)
1711{
1712#if defined(TARGET_HAS_ICE)
1713 if (env->singlestep_enabled != enabled) {
1714 env->singlestep_enabled = enabled;
1715 if (kvm_enabled())
1716 kvm_update_guest_debug(env, 0);
1717 else {
1718 /* must flush all the translated code to avoid inconsistencies */
1719 /* XXX: only flush what is necessary */
1720 tb_flush(env);
1721 }
1722 }
1723#endif
1724}
1725
1726#ifndef VBOX
1727
1728/* enable or disable low levels log */
1729void cpu_set_log(int log_flags)
1730{
1731 loglevel = log_flags;
1732 if (loglevel && !logfile) {
1733 logfile = fopen(logfilename, log_append ? "a" : "w");
1734 if (!logfile) {
1735 perror(logfilename);
1736 _exit(1);
1737 }
1738#if !defined(CONFIG_SOFTMMU)
1739 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1740 {
1741 static char logfile_buf[4096];
1742 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1743 }
1744#elif !defined(_WIN32)
1745 /* Win32 doesn't support line-buffering and requires size >= 2 */
1746 setvbuf(logfile, NULL, _IOLBF, 0);
1747#endif
1748 log_append = 1;
1749 }
1750 if (!loglevel && logfile) {
1751 fclose(logfile);
1752 logfile = NULL;
1753 }
1754}
1755
1756void cpu_set_log_filename(const char *filename)
1757{
1758 logfilename = strdup(filename);
1759 if (logfile) {
1760 fclose(logfile);
1761 logfile = NULL;
1762 }
1763 cpu_set_log(loglevel);
1764}
1765
1766#endif /* !VBOX */
1767
1768static void cpu_unlink_tb(CPUState *env)
1769{
1770 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1771 problem and hope the cpu will stop of its own accord. For userspace
1772 emulation this often isn't actually as bad as it sounds. Often
1773 signals are used primarily to interrupt blocking syscalls. */
1774 TranslationBlock *tb;
1775 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1776
1777 spin_lock(&interrupt_lock);
1778 tb = env->current_tb;
1779 /* if the cpu is currently executing code, we must unlink it and
1780 all the potentially executing TB */
1781 if (tb) {
1782 env->current_tb = NULL;
1783 tb_reset_jump_recursive(tb);
1784 }
1785 spin_unlock(&interrupt_lock);
1786}
1787
1788/* mask must never be zero, except for A20 change call */
1789void cpu_interrupt(CPUState *env, int mask)
1790{
1791 int old_mask;
1792
1793 old_mask = env->interrupt_request;
1794#ifndef VBOX
1795 env->interrupt_request |= mask;
1796#else /* VBOX */
1797 VM_ASSERT_EMT(env->pVM);
1798 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1799#endif /* VBOX */
1800
1801#ifndef VBOX
1802#ifndef CONFIG_USER_ONLY
1803 /*
1804 * If called from iothread context, wake the target cpu in
1805 * case its halted.
1806 */
1807 if (!qemu_cpu_self(env)) {
1808 qemu_cpu_kick(env);
1809 return;
1810 }
1811#endif
1812#endif /* !VBOX */
1813
1814 if (use_icount) {
1815 env->icount_decr.u16.high = 0xffff;
1816#ifndef CONFIG_USER_ONLY
1817 if (!can_do_io(env)
1818 && (mask & ~old_mask) != 0) {
1819 cpu_abort(env, "Raised interrupt while not in I/O function");
1820 }
1821#endif
1822 } else {
1823 cpu_unlink_tb(env);
1824 }
1825}
1826
1827void cpu_reset_interrupt(CPUState *env, int mask)
1828{
1829#ifdef VBOX
1830 /*
1831 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1832 * for future changes!
1833 */
1834 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1835#else /* !VBOX */
1836 env->interrupt_request &= ~mask;
1837#endif /* !VBOX */
1838}
1839
1840void cpu_exit(CPUState *env)
1841{
1842 env->exit_request = 1;
1843 cpu_unlink_tb(env);
1844}
1845
1846#ifndef VBOX
1847const CPULogItem cpu_log_items[] = {
1848 { CPU_LOG_TB_OUT_ASM, "out_asm",
1849 "show generated host assembly code for each compiled TB" },
1850 { CPU_LOG_TB_IN_ASM, "in_asm",
1851 "show target assembly code for each compiled TB" },
1852 { CPU_LOG_TB_OP, "op",
1853 "show micro ops for each compiled TB" },
1854 { CPU_LOG_TB_OP_OPT, "op_opt",
1855 "show micro ops "
1856#ifdef TARGET_I386
1857 "before eflags optimization and "
1858#endif
1859 "after liveness analysis" },
1860 { CPU_LOG_INT, "int",
1861 "show interrupts/exceptions in short format" },
1862 { CPU_LOG_EXEC, "exec",
1863 "show trace before each executed TB (lots of logs)" },
1864 { CPU_LOG_TB_CPU, "cpu",
1865 "show CPU state before block translation" },
1866#ifdef TARGET_I386
1867 { CPU_LOG_PCALL, "pcall",
1868 "show protected mode far calls/returns/exceptions" },
1869 { CPU_LOG_RESET, "cpu_reset",
1870 "show CPU state before CPU resets" },
1871#endif
1872#ifdef DEBUG_IOPORT
1873 { CPU_LOG_IOPORT, "ioport",
1874 "show all i/o ports accesses" },
1875#endif
1876 { 0, NULL, NULL },
1877};
1878
1879#ifndef CONFIG_USER_ONLY
1880static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1881 = QLIST_HEAD_INITIALIZER(memory_client_list);
1882
1883static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1884 ram_addr_t size,
1885 ram_addr_t phys_offset)
1886{
1887 CPUPhysMemoryClient *client;
1888 QLIST_FOREACH(client, &memory_client_list, list) {
1889 client->set_memory(client, start_addr, size, phys_offset);
1890 }
1891}
1892
1893static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1894 target_phys_addr_t end)
1895{
1896 CPUPhysMemoryClient *client;
1897 QLIST_FOREACH(client, &memory_client_list, list) {
1898 int r = client->sync_dirty_bitmap(client, start, end);
1899 if (r < 0)
1900 return r;
1901 }
1902 return 0;
1903}
1904
1905static int cpu_notify_migration_log(int enable)
1906{
1907 CPUPhysMemoryClient *client;
1908 QLIST_FOREACH(client, &memory_client_list, list) {
1909 int r = client->migration_log(client, enable);
1910 if (r < 0)
1911 return r;
1912 }
1913 return 0;
1914}
1915
1916static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1917 int level, void **lp)
1918{
1919 int i;
1920
1921 if (*lp == NULL) {
1922 return;
1923 }
1924 if (level == 0) {
1925 PhysPageDesc *pd = *lp;
1926 for (i = 0; i < L2_SIZE; ++i) {
1927 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1928 client->set_memory(client, pd[i].region_offset,
1929 TARGET_PAGE_SIZE, pd[i].phys_offset);
1930 }
1931 }
1932 } else {
1933 void **pp = *lp;
1934 for (i = 0; i < L2_SIZE; ++i) {
1935 phys_page_for_each_1(client, level - 1, pp + i);
1936 }
1937 }
1938}
1939
1940static void phys_page_for_each(CPUPhysMemoryClient *client)
1941{
1942 int i;
1943 for (i = 0; i < P_L1_SIZE; ++i) {
1944 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1945 l1_phys_map + 1);
1946 }
1947}
1948
1949void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1950{
1951 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1952 phys_page_for_each(client);
1953}
1954
1955void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1956{
1957 QLIST_REMOVE(client, list);
1958}
1959#endif
1960
1961static int cmp1(const char *s1, int n, const char *s2)
1962{
1963 if (strlen(s2) != n)
1964 return 0;
1965 return memcmp(s1, s2, n) == 0;
1966}
1967
1968/* takes a comma separated list of log masks. Return 0 if error. */
1969int cpu_str_to_log_mask(const char *str)
1970{
1971 const CPULogItem *item;
1972 int mask;
1973 const char *p, *p1;
1974
1975 p = str;
1976 mask = 0;
1977 for(;;) {
1978 p1 = strchr(p, ',');
1979 if (!p1)
1980 p1 = p + strlen(p);
1981 if(cmp1(p,p1-p,"all")) {
1982 for(item = cpu_log_items; item->mask != 0; item++) {
1983 mask |= item->mask;
1984 }
1985 } else {
1986 for(item = cpu_log_items; item->mask != 0; item++) {
1987 if (cmp1(p, p1 - p, item->name))
1988 goto found;
1989 }
1990 return 0;
1991 }
1992 found:
1993 mask |= item->mask;
1994 if (*p1 != ',')
1995 break;
1996 p = p1 + 1;
1997 }
1998 return mask;
1999}
2000
2001void cpu_abort(CPUState *env, const char *fmt, ...)
2002{
2003 va_list ap;
2004 va_list ap2;
2005
2006 va_start(ap, fmt);
2007 va_copy(ap2, ap);
2008 fprintf(stderr, "qemu: fatal: ");
2009 vfprintf(stderr, fmt, ap);
2010 fprintf(stderr, "\n");
2011#ifdef TARGET_I386
2012 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
2013#else
2014 cpu_dump_state(env, stderr, fprintf, 0);
2015#endif
2016 if (qemu_log_enabled()) {
2017 qemu_log("qemu: fatal: ");
2018 qemu_log_vprintf(fmt, ap2);
2019 qemu_log("\n");
2020#ifdef TARGET_I386
2021 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
2022#else
2023 log_cpu_state(env, 0);
2024#endif
2025 qemu_log_flush();
2026 qemu_log_close();
2027 }
2028 va_end(ap2);
2029 va_end(ap);
2030#if defined(CONFIG_USER_ONLY)
2031 {
2032 struct sigaction act;
2033 sigfillset(&act.sa_mask);
2034 act.sa_handler = SIG_DFL;
2035 sigaction(SIGABRT, &act, NULL);
2036 }
2037#endif
2038 abort();
2039}
2040
2041CPUState *cpu_copy(CPUState *env)
2042{
2043 CPUState *new_env = cpu_init(env->cpu_model_str);
2044 CPUState *next_cpu = new_env->next_cpu;
2045 int cpu_index = new_env->cpu_index;
2046#if defined(TARGET_HAS_ICE)
2047 CPUBreakpoint *bp;
2048 CPUWatchpoint *wp;
2049#endif
2050
2051 memcpy(new_env, env, sizeof(CPUState));
2052
2053 /* Preserve chaining and index. */
2054 new_env->next_cpu = next_cpu;
2055 new_env->cpu_index = cpu_index;
2056
2057 /* Clone all break/watchpoints.
2058 Note: Once we support ptrace with hw-debug register access, make sure
2059 BP_CPU break/watchpoints are handled correctly on clone. */
2060 QTAILQ_INIT(&env->breakpoints);
2061 QTAILQ_INIT(&env->watchpoints);
2062#if defined(TARGET_HAS_ICE)
2063 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2064 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
2065 }
2066 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2067 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
2068 wp->flags, NULL);
2069 }
2070#endif
2071
2072 return new_env;
2073}
2074
2075#endif /* !VBOX */
2076#if !defined(CONFIG_USER_ONLY)
2077
2078static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
2079{
2080 unsigned int i;
2081
2082 /* Discard jump cache entries for any tb which might potentially
2083 overlap the flushed page. */
2084 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
2085 memset (&env->tb_jmp_cache[i], 0,
2086 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
2087
2088 i = tb_jmp_cache_hash_page(addr);
2089 memset (&env->tb_jmp_cache[i], 0,
2090 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
2091#ifdef VBOX
2092
2093 /* inform raw mode about TLB page flush */
2094 remR3FlushPage(env, addr);
2095#endif /* VBOX */
2096}
2097
2098static CPUTLBEntry s_cputlb_empty_entry = {
2099 .addr_read = -1,
2100 .addr_write = -1,
2101 .addr_code = -1,
2102 .addend = -1,
2103};
2104
2105/* NOTE: if flush_global is true, also flush global entries (not
2106 implemented yet) */
2107void tlb_flush(CPUState *env, int flush_global)
2108{
2109 int i;
2110
2111#ifdef VBOX
2112 Assert(EMRemIsLockOwner(env->pVM));
2113 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_FLUSH_TLB);
2114#endif
2115
2116#if defined(DEBUG_TLB)
2117 printf("tlb_flush:\n");
2118#endif
2119 /* must reset current TB so that interrupts cannot modify the
2120 links while we are modifying them */
2121 env->current_tb = NULL;
2122
2123 for(i = 0; i < CPU_TLB_SIZE; i++) {
2124 int mmu_idx;
2125 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2126 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
2127 }
2128 }
2129
2130 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
2131
2132 env->tlb_flush_addr = -1;
2133 env->tlb_flush_mask = 0;
2134 tlb_flush_count++;
2135#ifdef VBOX
2136
2137 /* inform raw mode about TLB flush */
2138 remR3FlushTLB(env, flush_global);
2139#endif /* VBOX */
2140}
2141
2142static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
2143{
2144 if (addr == (tlb_entry->addr_read &
2145 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2146 addr == (tlb_entry->addr_write &
2147 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2148 addr == (tlb_entry->addr_code &
2149 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
2150 *tlb_entry = s_cputlb_empty_entry;
2151 }
2152}
2153
2154void tlb_flush_page(CPUState *env, target_ulong addr)
2155{
2156 int i;
2157 int mmu_idx;
2158
2159 Assert(EMRemIsLockOwner(env->pVM));
2160#if defined(DEBUG_TLB)
2161 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
2162#endif
2163 /* Check if we need to flush due to large pages. */
2164 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2165#if defined(DEBUG_TLB)
2166 printf("tlb_flush_page: forced full flush ("
2167 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2168 env->tlb_flush_addr, env->tlb_flush_mask);
2169#endif
2170 tlb_flush(env, 1);
2171 return;
2172 }
2173 /* must reset current TB so that interrupts cannot modify the
2174 links while we are modifying them */
2175 env->current_tb = NULL;
2176
2177 addr &= TARGET_PAGE_MASK;
2178 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2179 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2180 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2181
2182 tlb_flush_jmp_cache(env, addr);
2183}
2184
2185/* update the TLBs so that writes to code in the virtual page 'addr'
2186 can be detected */
2187static void tlb_protect_code(ram_addr_t ram_addr)
2188{
2189 cpu_physical_memory_reset_dirty(ram_addr,
2190 ram_addr + TARGET_PAGE_SIZE,
2191 CODE_DIRTY_FLAG);
2192#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
2193 /** @todo Retest this? This function has changed... */
2194 remR3ProtectCode(cpu_single_env, ram_addr);
2195#endif /* VBOX */
2196}
2197
2198/* update the TLB so that writes in physical page 'phys_addr' are no longer
2199 tested for self modifying code */
2200static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2201 target_ulong vaddr)
2202{
2203 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2204}
2205
2206static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2207 uintptr_t start, uintptr_t length)
2208{
2209 uintptr_t addr;
2210#ifdef VBOX
2211
2212 if (start & 3)
2213 return;
2214#endif /* VBOX */
2215 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2216 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2217 if ((addr - start) < length) {
2218 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2219 }
2220 }
2221}
2222
2223/* Note: start and end must be within the same ram block. */
2224void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2225 int dirty_flags)
2226{
2227 CPUState *env;
2228 uintptr_t length, start1;
2229 int i;
2230
2231 start &= TARGET_PAGE_MASK;
2232 end = TARGET_PAGE_ALIGN(end);
2233
2234 length = end - start;
2235 if (length == 0)
2236 return;
2237 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2238
2239 /* we modify the TLB cache so that the dirty bit will be set again
2240 when accessing the range */
2241#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2242 start1 = start;
2243#elif !defined(VBOX)
2244 start1 = (uintptr_t)qemu_get_ram_ptr(start);
2245 /* Chek that we don't span multiple blocks - this breaks the
2246 address comparisons below. */
2247 if ((uintptr_t)qemu_get_ram_ptr(end - 1) - start1
2248 != (end - 1) - start) {
2249 abort();
2250 }
2251#else
2252 start1 = (uintptr_t)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
2253#endif
2254
2255 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2256 int mmu_idx;
2257 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2258 for(i = 0; i < CPU_TLB_SIZE; i++)
2259 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2260 start1, length);
2261 }
2262 }
2263}
2264
2265#ifndef VBOX
2266
2267int cpu_physical_memory_set_dirty_tracking(int enable)
2268{
2269 int ret = 0;
2270 in_migration = enable;
2271 ret = cpu_notify_migration_log(!!enable);
2272 return ret;
2273}
2274
2275int cpu_physical_memory_get_dirty_tracking(void)
2276{
2277 return in_migration;
2278}
2279
2280#endif /* !VBOX */
2281
2282int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2283 target_phys_addr_t end_addr)
2284{
2285#ifndef VBOX
2286 int ret;
2287
2288 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2289 return ret;
2290#else /* VBOX */
2291 return 0;
2292#endif /* VBOX */
2293}
2294
2295#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2296DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2297#else
2298static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2299#endif
2300{
2301 ram_addr_t ram_addr;
2302#ifndef VBOX
2303 void *p;
2304#endif
2305
2306 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2307#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2308 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2309#elif !defined(VBOX)
2310 p = (void *)(uintptr_t)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2311 + tlb_entry->addend);
2312 ram_addr = qemu_ram_addr_from_host(p);
2313#else
2314 Assert(phys_addend != -1);
2315 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2316#endif
2317 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2318 tlb_entry->addr_write |= TLB_NOTDIRTY;
2319 }
2320 }
2321}
2322
2323/* update the TLB according to the current state of the dirty bits */
2324void cpu_tlb_update_dirty(CPUState *env)
2325{
2326 int i;
2327 int mmu_idx;
2328 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2329 for(i = 0; i < CPU_TLB_SIZE; i++)
2330#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2331 tlb_update_dirty(&env->tlb_table[mmu_idx][i], env->phys_addends[mmu_idx][i]);
2332#else
2333 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2334#endif
2335 }
2336}
2337
2338static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2339{
2340 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2341 tlb_entry->addr_write = vaddr;
2342}
2343
2344/* update the TLB corresponding to virtual page vaddr
2345 so that it is no longer dirty */
2346static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2347{
2348 int i;
2349 int mmu_idx;
2350
2351 vaddr &= TARGET_PAGE_MASK;
2352 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2353 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2354 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2355}
2356
2357/* Our TLB does not support large pages, so remember the area covered by
2358 large pages and trigger a full TLB flush if these are invalidated. */
2359static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2360 target_ulong size)
2361{
2362 target_ulong mask = ~(size - 1);
2363
2364 if (env->tlb_flush_addr == (target_ulong)-1) {
2365 env->tlb_flush_addr = vaddr & mask;
2366 env->tlb_flush_mask = mask;
2367 return;
2368 }
2369 /* Extend the existing region to include the new page.
2370 This is a compromise between unnecessary flushes and the cost
2371 of maintaining a full variable size TLB. */
2372 mask &= env->tlb_flush_mask;
2373 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2374 mask <<= 1;
2375 }
2376 env->tlb_flush_addr &= mask;
2377 env->tlb_flush_mask = mask;
2378}
2379
2380/* Add a new TLB entry. At most one entry for a given virtual address
2381 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2382 supplied size is only used by tlb_flush_page. */
2383void tlb_set_page(CPUState *env, target_ulong vaddr,
2384 target_phys_addr_t paddr, int prot,
2385 int mmu_idx, target_ulong size)
2386{
2387 PhysPageDesc *p;
2388 ram_addr_t pd;
2389 unsigned int index;
2390 target_ulong address;
2391 target_ulong code_address;
2392 uintptr_t addend;
2393 CPUTLBEntry *te;
2394 CPUWatchpoint *wp;
2395 target_phys_addr_t iotlb;
2396#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2397 int read_mods = 0, write_mods = 0, code_mods = 0;
2398#endif
2399
2400 assert(size >= TARGET_PAGE_SIZE);
2401 if (size != TARGET_PAGE_SIZE) {
2402 tlb_add_large_page(env, vaddr, size);
2403 }
2404 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2405 if (!p) {
2406 pd = IO_MEM_UNASSIGNED;
2407 } else {
2408 pd = p->phys_offset;
2409 }
2410#if defined(DEBUG_TLB)
2411 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d size=" TARGET_FMT_lx " pd=0x%08lx\n",
2412 vaddr, (int)paddr, prot, mmu_idx, size, (long)pd);
2413#endif
2414
2415 address = vaddr;
2416 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2417 /* IO memory case (romd handled later) */
2418 address |= TLB_MMIO;
2419 }
2420#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2421 addend = pd & TARGET_PAGE_MASK;
2422#elif !defined(VBOX)
2423 addend = (uintptr_t)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2424#else
2425 /** @todo this is racing the phys_page_find call above since it may register
2426 * a new chunk of memory... */
2427 addend = (uintptr_t)remR3TlbGCPhys2Ptr(env, pd & TARGET_PAGE_MASK, !!(prot & PAGE_WRITE));
2428#endif
2429
2430 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2431 /* Normal RAM. */
2432 iotlb = pd & TARGET_PAGE_MASK;
2433 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2434 iotlb |= IO_MEM_NOTDIRTY;
2435 else
2436 iotlb |= IO_MEM_ROM;
2437 } else {
2438 /* IO handlers are currently passed a physical address.
2439 It would be nice to pass an offset from the base address
2440 of that region. This would avoid having to special case RAM,
2441 and avoid full address decoding in every device.
2442 We can't use the high bits of pd for this because
2443 IO_MEM_ROMD uses these as a ram address. */
2444 iotlb = (pd & ~TARGET_PAGE_MASK);
2445 if (p) {
2446 iotlb += p->region_offset;
2447 } else {
2448 iotlb += paddr;
2449 }
2450 }
2451
2452 code_address = address;
2453#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2454
2455 if (addend & 0x3)
2456 {
2457 if (addend & 0x2)
2458 {
2459 /* catch write */
2460 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2461 write_mods |= TLB_MMIO;
2462 }
2463 else if (addend & 0x1)
2464 {
2465 /* catch all */
2466 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2467 {
2468 read_mods |= TLB_MMIO;
2469 write_mods |= TLB_MMIO;
2470 code_mods |= TLB_MMIO;
2471 }
2472 }
2473 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2474 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2475 addend &= ~(target_ulong)0x3;
2476 }
2477
2478#endif
2479 /* Make accesses to pages with watchpoints go via the
2480 watchpoint trap routines. */
2481 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2482 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2483 /* Avoid trapping reads of pages with a write breakpoint. */
2484 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2485 iotlb = io_mem_watch + paddr;
2486 address |= TLB_MMIO;
2487 break;
2488 }
2489 }
2490 }
2491
2492 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2493 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2494 te = &env->tlb_table[mmu_idx][index];
2495 te->addend = addend - vaddr;
2496 if (prot & PAGE_READ) {
2497 te->addr_read = address;
2498 } else {
2499 te->addr_read = -1;
2500 }
2501
2502 if (prot & PAGE_EXEC) {
2503 te->addr_code = code_address;
2504 } else {
2505 te->addr_code = -1;
2506 }
2507 if (prot & PAGE_WRITE) {
2508 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2509 (pd & IO_MEM_ROMD)) {
2510 /* Write access calls the I/O callback. */
2511 te->addr_write = address | TLB_MMIO;
2512 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2513 !cpu_physical_memory_is_dirty(pd)) {
2514 te->addr_write = address | TLB_NOTDIRTY;
2515 } else {
2516 te->addr_write = address;
2517 }
2518 } else {
2519 te->addr_write = -1;
2520 }
2521
2522#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2523 if (prot & PAGE_READ)
2524 te->addr_read |= read_mods;
2525 if (prot & PAGE_EXEC)
2526 te->addr_code |= code_mods;
2527 if (prot & PAGE_WRITE)
2528 te->addr_write |= write_mods;
2529
2530 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2531#endif
2532
2533#ifdef VBOX
2534 /* inform raw mode about TLB page change */
2535 remR3FlushPage(env, vaddr);
2536#endif
2537}
2538
2539#else
2540
2541void tlb_flush(CPUState *env, int flush_global)
2542{
2543}
2544
2545void tlb_flush_page(CPUState *env, target_ulong addr)
2546{
2547}
2548
2549/*
2550 * Walks guest process memory "regions" one by one
2551 * and calls callback function 'fn' for each region.
2552 */
2553
2554struct walk_memory_regions_data
2555{
2556 walk_memory_regions_fn fn;
2557 void *priv;
2558 uintptr_t start;
2559 int prot;
2560};
2561
2562static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2563 abi_ulong end, int new_prot)
2564{
2565 if (data->start != -1ul) {
2566 int rc = data->fn(data->priv, data->start, end, data->prot);
2567 if (rc != 0) {
2568 return rc;
2569 }
2570 }
2571
2572 data->start = (new_prot ? end : -1ul);
2573 data->prot = new_prot;
2574
2575 return 0;
2576}
2577
2578static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2579 abi_ulong base, int level, void **lp)
2580{
2581 abi_ulong pa;
2582 int i, rc;
2583
2584 if (*lp == NULL) {
2585 return walk_memory_regions_end(data, base, 0);
2586 }
2587
2588 if (level == 0) {
2589 PageDesc *pd = *lp;
2590 for (i = 0; i < L2_SIZE; ++i) {
2591 int prot = pd[i].flags;
2592
2593 pa = base | (i << TARGET_PAGE_BITS);
2594 if (prot != data->prot) {
2595 rc = walk_memory_regions_end(data, pa, prot);
2596 if (rc != 0) {
2597 return rc;
2598 }
2599 }
2600 }
2601 } else {
2602 void **pp = *lp;
2603 for (i = 0; i < L2_SIZE; ++i) {
2604 pa = base | ((abi_ulong)i <<
2605 (TARGET_PAGE_BITS + L2_BITS * level));
2606 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2607 if (rc != 0) {
2608 return rc;
2609 }
2610 }
2611 }
2612
2613 return 0;
2614}
2615
2616int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2617{
2618 struct walk_memory_regions_data data;
2619 target_ulong i;
2620
2621 data.fn = fn;
2622 data.priv = priv;
2623 data.start = -1ul;
2624 data.prot = 0;
2625
2626 for (i = 0; i < V_L1_SIZE; i++) {
2627 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2628 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2629 if (rc != 0) {
2630 return rc;
2631 }
2632 }
2633
2634 return walk_memory_regions_end(&data, 0, 0);
2635}
2636
2637static int dump_region(void *priv, abi_ulong start,
2638 abi_ulong end, unsigned long prot)
2639{
2640 FILE *f = (FILE *)priv;
2641
2642 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2643 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2644 start, end, end - start,
2645 ((prot & PAGE_READ) ? 'r' : '-'),
2646 ((prot & PAGE_WRITE) ? 'w' : '-'),
2647 ((prot & PAGE_EXEC) ? 'x' : '-'));
2648
2649 return (0);
2650}
2651
2652/* dump memory mappings */
2653void page_dump(FILE *f)
2654{
2655 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2656 "start", "end", "size", "prot");
2657 walk_memory_regions(f, dump_region);
2658}
2659
2660int page_get_flags(target_ulong address)
2661{
2662 PageDesc *p;
2663
2664 p = page_find(address >> TARGET_PAGE_BITS);
2665 if (!p)
2666 return 0;
2667 return p->flags;
2668}
2669
2670/* Modify the flags of a page and invalidate the code if necessary.
2671 The flag PAGE_WRITE_ORG is positioned automatically depending
2672 on PAGE_WRITE. The mmap_lock should already be held. */
2673void page_set_flags(target_ulong start, target_ulong end, int flags)
2674{
2675 target_ulong addr, len;
2676
2677 /* This function should never be called with addresses outside the
2678 guest address space. If this assert fires, it probably indicates
2679 a missing call to h2g_valid. */
2680#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2681 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2682#endif
2683 assert(start < end);
2684
2685 start = start & TARGET_PAGE_MASK;
2686 end = TARGET_PAGE_ALIGN(end);
2687
2688 if (flags & PAGE_WRITE) {
2689 flags |= PAGE_WRITE_ORG;
2690 }
2691
2692#ifdef VBOX
2693 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2694#endif
2695 for (addr = start, len = end - start;
2696 len != 0;
2697 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2698 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2699
2700 /* If the write protection bit is set, then we invalidate
2701 the code inside. */
2702 if (!(p->flags & PAGE_WRITE) &&
2703 (flags & PAGE_WRITE) &&
2704 p->first_tb) {
2705 tb_invalidate_phys_page(addr, 0, NULL);
2706 }
2707 p->flags = flags;
2708 }
2709}
2710
2711int page_check_range(target_ulong start, target_ulong len, int flags)
2712{
2713 PageDesc *p;
2714 target_ulong end;
2715 target_ulong addr;
2716
2717 /* This function should never be called with addresses outside the
2718 guest address space. If this assert fires, it probably indicates
2719 a missing call to h2g_valid. */
2720#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2721 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2722#endif
2723
2724 if (len == 0) {
2725 return 0;
2726 }
2727 if (start + len - 1 < start) {
2728 /* We've wrapped around. */
2729 return -1;
2730 }
2731
2732 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2733 start = start & TARGET_PAGE_MASK;
2734
2735 for (addr = start, len = end - start;
2736 len != 0;
2737 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2738 p = page_find(addr >> TARGET_PAGE_BITS);
2739 if( !p )
2740 return -1;
2741 if( !(p->flags & PAGE_VALID) )
2742 return -1;
2743
2744 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2745 return -1;
2746 if (flags & PAGE_WRITE) {
2747 if (!(p->flags & PAGE_WRITE_ORG))
2748 return -1;
2749 /* unprotect the page if it was put read-only because it
2750 contains translated code */
2751 if (!(p->flags & PAGE_WRITE)) {
2752 if (!page_unprotect(addr, 0, NULL))
2753 return -1;
2754 }
2755 return 0;
2756 }
2757 }
2758 return 0;
2759}
2760
2761/* called from signal handler: invalidate the code and unprotect the
2762 page. Return TRUE if the fault was successfully handled. */
2763int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
2764{
2765 unsigned int prot;
2766 PageDesc *p;
2767 target_ulong host_start, host_end, addr;
2768
2769 /* Technically this isn't safe inside a signal handler. However we
2770 know this only ever happens in a synchronous SEGV handler, so in
2771 practice it seems to be ok. */
2772 mmap_lock();
2773
2774 p = page_find(address >> TARGET_PAGE_BITS);
2775 if (!p) {
2776 mmap_unlock();
2777 return 0;
2778 }
2779
2780 /* if the page was really writable, then we change its
2781 protection back to writable */
2782 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2783 host_start = address & qemu_host_page_mask;
2784 host_end = host_start + qemu_host_page_size;
2785
2786 prot = 0;
2787 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2788 p = page_find(addr >> TARGET_PAGE_BITS);
2789 p->flags |= PAGE_WRITE;
2790 prot |= p->flags;
2791
2792 /* and since the content will be modified, we must invalidate
2793 the corresponding translated code. */
2794 tb_invalidate_phys_page(addr, pc, puc);
2795#ifdef DEBUG_TB_CHECK
2796 tb_invalidate_check(addr);
2797#endif
2798 }
2799 mprotect((void *)g2h(host_start), qemu_host_page_size,
2800 prot & PAGE_BITS);
2801
2802 mmap_unlock();
2803 return 1;
2804 }
2805 mmap_unlock();
2806 return 0;
2807}
2808
2809static inline void tlb_set_dirty(CPUState *env,
2810 uintptr_t addr, target_ulong vaddr)
2811{
2812}
2813#endif /* defined(CONFIG_USER_ONLY) */
2814
2815#if !defined(CONFIG_USER_ONLY)
2816
2817#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2818typedef struct subpage_t {
2819 target_phys_addr_t base;
2820 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2821 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2822} subpage_t;
2823
2824static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2825 ram_addr_t memory, ram_addr_t region_offset);
2826static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2827 ram_addr_t orig_memory,
2828 ram_addr_t region_offset);
2829#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2830 need_subpage) \
2831 do { \
2832 if (addr > start_addr) \
2833 start_addr2 = 0; \
2834 else { \
2835 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2836 if (start_addr2 > 0) \
2837 need_subpage = 1; \
2838 } \
2839 \
2840 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2841 end_addr2 = TARGET_PAGE_SIZE - 1; \
2842 else { \
2843 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2844 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2845 need_subpage = 1; \
2846 } \
2847 } while (0)
2848
2849/* register physical memory.
2850 For RAM, 'size' must be a multiple of the target page size.
2851 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2852 io memory page. The address used when calling the IO function is
2853 the offset from the start of the region, plus region_offset. Both
2854 start_addr and region_offset are rounded down to a page boundary
2855 before calculating this offset. This should not be a problem unless
2856 the low bits of start_addr and region_offset differ. */
2857void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2858 ram_addr_t size,
2859 ram_addr_t phys_offset,
2860 ram_addr_t region_offset)
2861{
2862 target_phys_addr_t addr, end_addr;
2863 PhysPageDesc *p;
2864 CPUState *env;
2865 ram_addr_t orig_size = size;
2866 subpage_t *subpage;
2867
2868#ifndef VBOX
2869 cpu_notify_set_memory(start_addr, size, phys_offset);
2870#endif /* !VBOX */
2871
2872 if (phys_offset == IO_MEM_UNASSIGNED) {
2873 region_offset = start_addr;
2874 }
2875 region_offset &= TARGET_PAGE_MASK;
2876 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2877 end_addr = start_addr + (target_phys_addr_t)size;
2878 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2879 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2880 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2881 ram_addr_t orig_memory = p->phys_offset;
2882 target_phys_addr_t start_addr2, end_addr2;
2883 int need_subpage = 0;
2884
2885 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2886 need_subpage);
2887 if (need_subpage) {
2888 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2889 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2890 &p->phys_offset, orig_memory,
2891 p->region_offset);
2892 } else {
2893 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2894 >> IO_MEM_SHIFT];
2895 }
2896 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2897 region_offset);
2898 p->region_offset = 0;
2899 } else {
2900 p->phys_offset = phys_offset;
2901 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2902 (phys_offset & IO_MEM_ROMD))
2903 phys_offset += TARGET_PAGE_SIZE;
2904 }
2905 } else {
2906 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2907 p->phys_offset = phys_offset;
2908 p->region_offset = region_offset;
2909 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2910 (phys_offset & IO_MEM_ROMD)) {
2911 phys_offset += TARGET_PAGE_SIZE;
2912 } else {
2913 target_phys_addr_t start_addr2, end_addr2;
2914 int need_subpage = 0;
2915
2916 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2917 end_addr2, need_subpage);
2918
2919 if (need_subpage) {
2920 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2921 &p->phys_offset, IO_MEM_UNASSIGNED,
2922 addr & TARGET_PAGE_MASK);
2923 subpage_register(subpage, start_addr2, end_addr2,
2924 phys_offset, region_offset);
2925 p->region_offset = 0;
2926 }
2927 }
2928 }
2929 region_offset += TARGET_PAGE_SIZE;
2930 }
2931
2932 /* since each CPU stores ram addresses in its TLB cache, we must
2933 reset the modified entries */
2934#ifndef VBOX
2935 /* XXX: slow ! */
2936 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2937 tlb_flush(env, 1);
2938 }
2939#else
2940 /* We have one thread per CPU, so, one of the other EMTs might be executing
2941 code right now and flushing the TLB may crash it. */
2942 env = first_cpu;
2943 if (EMRemIsLockOwner(env->pVM))
2944 tlb_flush(env, 1);
2945 else
2946 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request,
2947 CPU_INTERRUPT_EXTERNAL_FLUSH_TLB);
2948#endif
2949}
2950
2951/* XXX: temporary until new memory mapping API */
2952ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2953{
2954 PhysPageDesc *p;
2955
2956 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2957 if (!p)
2958 return IO_MEM_UNASSIGNED;
2959 return p->phys_offset;
2960}
2961
2962#ifndef VBOX
2963
2964void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2965{
2966 if (kvm_enabled())
2967 kvm_coalesce_mmio_region(addr, size);
2968}
2969
2970void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2971{
2972 if (kvm_enabled())
2973 kvm_uncoalesce_mmio_region(addr, size);
2974}
2975
2976void qemu_flush_coalesced_mmio_buffer(void)
2977{
2978 if (kvm_enabled())
2979 kvm_flush_coalesced_mmio_buffer();
2980}
2981
2982#if defined(__linux__) && !defined(TARGET_S390X)
2983
2984#include <sys/vfs.h>
2985
2986#define HUGETLBFS_MAGIC 0x958458f6
2987
2988static size_t gethugepagesize(const char *path)
2989{
2990 struct statfs fs;
2991 int ret;
2992
2993 do {
2994 ret = statfs(path, &fs);
2995 } while (ret != 0 && errno == EINTR);
2996
2997 if (ret != 0) {
2998 perror(path);
2999 return 0;
3000 }
3001
3002 if (fs.f_type != HUGETLBFS_MAGIC)
3003 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
3004
3005 return (size_t)fs.f_bsize;
3006}
3007
3008static void *file_ram_alloc(RAMBlock *block,
3009 ram_addr_t memory,
3010 const char *path)
3011{
3012 char *filename;
3013 void *area;
3014 int fd;
3015#ifdef MAP_POPULATE
3016 int flags;
3017#endif
3018 size_t hpagesize;
3019
3020 hpagesize = gethugepagesize(path);
3021 if (!hpagesize) {
3022 return NULL;
3023 }
3024
3025 if (memory < hpagesize) {
3026 return NULL;
3027 }
3028
3029 if (kvm_enabled() && !kvm_has_sync_mmu()) {
3030 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
3031 return NULL;
3032 }
3033
3034 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
3035 return NULL;
3036 }
3037
3038 fd = mkstemp(filename);
3039 if (fd < 0) {
3040 perror("unable to create backing store for hugepages");
3041 free(filename);
3042 return NULL;
3043 }
3044 unlink(filename);
3045 free(filename);
3046
3047 memory = (memory+hpagesize-1) & ~(hpagesize-1);
3048
3049 /*
3050 * ftruncate is not supported by hugetlbfs in older
3051 * hosts, so don't bother bailing out on errors.
3052 * If anything goes wrong with it under other filesystems,
3053 * mmap will fail.
3054 */
3055 if (ftruncate(fd, memory))
3056 perror("ftruncate");
3057
3058#ifdef MAP_POPULATE
3059 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
3060 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
3061 * to sidestep this quirk.
3062 */
3063 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
3064 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
3065#else
3066 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
3067#endif
3068 if (area == MAP_FAILED) {
3069 perror("file_ram_alloc: can't mmap RAM pages");
3070 close(fd);
3071 return (NULL);
3072 }
3073 block->fd = fd;
3074 return area;
3075}
3076#endif
3077
3078static ram_addr_t find_ram_offset(ram_addr_t size)
3079{
3080 RAMBlock *block, *next_block;
3081 ram_addr_t offset = 0, mingap = ULONG_MAX;
3082
3083 if (QLIST_EMPTY(&ram_list.blocks))
3084 return 0;
3085
3086 QLIST_FOREACH(block, &ram_list.blocks, next) {
3087 ram_addr_t end, next = ULONG_MAX;
3088
3089 end = block->offset + block->length;
3090
3091 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
3092 if (next_block->offset >= end) {
3093 next = MIN(next, next_block->offset);
3094 }
3095 }
3096 if (next - end >= size && next - end < mingap) {
3097 offset = end;
3098 mingap = next - end;
3099 }
3100 }
3101 return offset;
3102}
3103
3104static ram_addr_t last_ram_offset(void)
3105{
3106 RAMBlock *block;
3107 ram_addr_t last = 0;
3108
3109 QLIST_FOREACH(block, &ram_list.blocks, next)
3110 last = MAX(last, block->offset + block->length);
3111
3112 return last;
3113}
3114
3115ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
3116 ram_addr_t size, void *host)
3117{
3118 RAMBlock *new_block, *block;
3119
3120 size = TARGET_PAGE_ALIGN(size);
3121 new_block = qemu_mallocz(sizeof(*new_block));
3122
3123 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
3124 char *id = dev->parent_bus->info->get_dev_path(dev);
3125 if (id) {
3126 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
3127 qemu_free(id);
3128 }
3129 }
3130 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
3131
3132 QLIST_FOREACH(block, &ram_list.blocks, next) {
3133 if (!strcmp(block->idstr, new_block->idstr)) {
3134 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
3135 new_block->idstr);
3136 abort();
3137 }
3138 }
3139
3140 new_block->host = host;
3141
3142 new_block->offset = find_ram_offset(size);
3143 new_block->length = size;
3144
3145 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
3146
3147 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
3148 last_ram_offset() >> TARGET_PAGE_BITS);
3149 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
3150 0xff, size >> TARGET_PAGE_BITS);
3151
3152 if (kvm_enabled())
3153 kvm_setup_guest_memory(new_block->host, size);
3154
3155 return new_block->offset;
3156}
3157
3158ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
3159{
3160 RAMBlock *new_block, *block;
3161
3162 size = TARGET_PAGE_ALIGN(size);
3163 new_block = qemu_mallocz(sizeof(*new_block));
3164
3165 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
3166 char *id = dev->parent_bus->info->get_dev_path(dev);
3167 if (id) {
3168 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
3169 qemu_free(id);
3170 }
3171 }
3172 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
3173
3174 QLIST_FOREACH(block, &ram_list.blocks, next) {
3175 if (!strcmp(block->idstr, new_block->idstr)) {
3176 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
3177 new_block->idstr);
3178 abort();
3179 }
3180 }
3181
3182 if (mem_path) {
3183#if defined (__linux__) && !defined(TARGET_S390X)
3184 new_block->host = file_ram_alloc(new_block, size, mem_path);
3185 if (!new_block->host) {
3186 new_block->host = qemu_vmalloc(size);
3187#ifdef MADV_MERGEABLE
3188 madvise(new_block->host, size, MADV_MERGEABLE);
3189#endif
3190 }
3191#else
3192 fprintf(stderr, "-mem-path option unsupported\n");
3193 exit(1);
3194#endif
3195 } else {
3196#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3197 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
3198 new_block->host = mmap((void*)0x1000000, size,
3199 PROT_EXEC|PROT_READ|PROT_WRITE,
3200 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
3201#else
3202 new_block->host = qemu_vmalloc(size);
3203#endif
3204#ifdef MADV_MERGEABLE
3205 madvise(new_block->host, size, MADV_MERGEABLE);
3206#endif
3207 }
3208 new_block->offset = find_ram_offset(size);
3209 new_block->length = size;
3210
3211 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
3212
3213 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
3214 last_ram_offset() >> TARGET_PAGE_BITS);
3215 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
3216 0xff, size >> TARGET_PAGE_BITS);
3217
3218 if (kvm_enabled())
3219 kvm_setup_guest_memory(new_block->host, size);
3220
3221 return new_block->offset;
3222}
3223
3224void qemu_ram_free(ram_addr_t addr)
3225{
3226 RAMBlock *block;
3227
3228 QLIST_FOREACH(block, &ram_list.blocks, next) {
3229 if (addr == block->offset) {
3230 QLIST_REMOVE(block, next);
3231 if (mem_path) {
3232#if defined (__linux__) && !defined(TARGET_S390X)
3233 if (block->fd) {
3234 munmap(block->host, block->length);
3235 close(block->fd);
3236 } else {
3237 qemu_vfree(block->host);
3238 }
3239#endif
3240 } else {
3241#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3242 munmap(block->host, block->length);
3243#else
3244 qemu_vfree(block->host);
3245#endif
3246 }
3247 qemu_free(block);
3248 return;
3249 }
3250 }
3251
3252}
3253
3254/* Return a host pointer to ram allocated with qemu_ram_alloc.
3255 With the exception of the softmmu code in this file, this should
3256 only be used for local memory (e.g. video ram) that the device owns,
3257 and knows it isn't going to access beyond the end of the block.
3258
3259 It should not be used for general purpose DMA.
3260 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3261 */
3262void *qemu_get_ram_ptr(ram_addr_t addr)
3263{
3264 RAMBlock *block;
3265
3266 QLIST_FOREACH(block, &ram_list.blocks, next) {
3267 if (addr - block->offset < block->length) {
3268 QLIST_REMOVE(block, next);
3269 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3270 return block->host + (addr - block->offset);
3271 }
3272 }
3273
3274 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3275 abort();
3276
3277 return NULL;
3278}
3279
3280/* Some of the softmmu routines need to translate from a host pointer
3281 (typically a TLB entry) back to a ram offset. */
3282ram_addr_t qemu_ram_addr_from_host(void *ptr)
3283{
3284 RAMBlock *block;
3285 uint8_t *host = ptr;
3286
3287 QLIST_FOREACH(block, &ram_list.blocks, next) {
3288 if (host - block->host < block->length) {
3289 return block->offset + (host - block->host);
3290 }
3291 }
3292
3293 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3294 abort();
3295
3296 return 0;
3297}
3298
3299#endif /* !VBOX */
3300
3301static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3302{
3303#ifdef DEBUG_UNASSIGNED
3304 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3305#endif
3306#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3307 do_unassigned_access(addr, 0, 0, 0, 1);
3308#endif
3309 return 0;
3310}
3311
3312static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3313{
3314#ifdef DEBUG_UNASSIGNED
3315 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3316#endif
3317#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3318 do_unassigned_access(addr, 0, 0, 0, 2);
3319#endif
3320 return 0;
3321}
3322
3323static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3324{
3325#ifdef DEBUG_UNASSIGNED
3326 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3327#endif
3328#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3329 do_unassigned_access(addr, 0, 0, 0, 4);
3330#endif
3331 return 0;
3332}
3333
3334static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3335{
3336#ifdef DEBUG_UNASSIGNED
3337 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3338#endif
3339#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3340 do_unassigned_access(addr, 1, 0, 0, 1);
3341#endif
3342}
3343
3344static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3345{
3346#ifdef DEBUG_UNASSIGNED
3347 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3348#endif
3349#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3350 do_unassigned_access(addr, 1, 0, 0, 2);
3351#endif
3352}
3353
3354static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3355{
3356#ifdef DEBUG_UNASSIGNED
3357 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3358#endif
3359#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3360 do_unassigned_access(addr, 1, 0, 0, 4);
3361#endif
3362}
3363
3364static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3365 unassigned_mem_readb,
3366 unassigned_mem_readw,
3367 unassigned_mem_readl,
3368};
3369
3370static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3371 unassigned_mem_writeb,
3372 unassigned_mem_writew,
3373 unassigned_mem_writel,
3374};
3375
3376static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3377 uint32_t val)
3378{
3379 int dirty_flags;
3380 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3381 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3382#if !defined(CONFIG_USER_ONLY)
3383 tb_invalidate_phys_page_fast(ram_addr, 1);
3384 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3385#endif
3386 }
3387#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
3388 remR3PhysWriteU8(ram_addr, val);
3389#else
3390 stb_p(qemu_get_ram_ptr(ram_addr), val);
3391#endif
3392 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3393 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3394 /* we remove the notdirty callback only if the code has been
3395 flushed */
3396 if (dirty_flags == 0xff)
3397 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3398}
3399
3400static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3401 uint32_t val)
3402{
3403 int dirty_flags;
3404 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3405 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3406#if !defined(CONFIG_USER_ONLY)
3407 tb_invalidate_phys_page_fast(ram_addr, 2);
3408 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3409#endif
3410 }
3411#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
3412 remR3PhysWriteU16(ram_addr, val);
3413#else
3414 stw_p(qemu_get_ram_ptr(ram_addr), val);
3415#endif
3416 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3417 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3418 /* we remove the notdirty callback only if the code has been
3419 flushed */
3420 if (dirty_flags == 0xff)
3421 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3422}
3423
3424static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3425 uint32_t val)
3426{
3427 int dirty_flags;
3428 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3429 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3430#if !defined(CONFIG_USER_ONLY)
3431 tb_invalidate_phys_page_fast(ram_addr, 4);
3432 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3433#endif
3434 }
3435#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
3436 remR3PhysWriteU32(ram_addr, val);
3437#else
3438 stl_p(qemu_get_ram_ptr(ram_addr), val);
3439#endif
3440 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3441 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3442 /* we remove the notdirty callback only if the code has been
3443 flushed */
3444 if (dirty_flags == 0xff)
3445 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3446}
3447
3448static CPUReadMemoryFunc * const error_mem_read[3] = {
3449 NULL, /* never used */
3450 NULL, /* never used */
3451 NULL, /* never used */
3452};
3453
3454static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3455 notdirty_mem_writeb,
3456 notdirty_mem_writew,
3457 notdirty_mem_writel,
3458};
3459
3460/* Generate a debug exception if a watchpoint has been hit. */
3461static void check_watchpoint(int offset, int len_mask, int flags)
3462{
3463 CPUState *env = cpu_single_env;
3464 target_ulong pc, cs_base;
3465 TranslationBlock *tb;
3466 target_ulong vaddr;
3467 CPUWatchpoint *wp;
3468 int cpu_flags;
3469
3470 if (env->watchpoint_hit) {
3471 /* We re-entered the check after replacing the TB. Now raise
3472 * the debug interrupt so that is will trigger after the
3473 * current instruction. */
3474 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3475 return;
3476 }
3477 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3478 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3479 if ((vaddr == (wp->vaddr & len_mask) ||
3480 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3481 wp->flags |= BP_WATCHPOINT_HIT;
3482 if (!env->watchpoint_hit) {
3483 env->watchpoint_hit = wp;
3484 tb = tb_find_pc(env->mem_io_pc);
3485 if (!tb) {
3486 cpu_abort(env, "check_watchpoint: could not find TB for "
3487 "pc=%p", (void *)env->mem_io_pc);
3488 }
3489 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3490 tb_phys_invalidate(tb, -1);
3491 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3492 env->exception_index = EXCP_DEBUG;
3493 } else {
3494 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3495 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3496 }
3497 cpu_resume_from_signal(env, NULL);
3498 }
3499 } else {
3500 wp->flags &= ~BP_WATCHPOINT_HIT;
3501 }
3502 }
3503}
3504
3505/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3506 so these check for a hit then pass through to the normal out-of-line
3507 phys routines. */
3508static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3509{
3510 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3511 return ldub_phys(addr);
3512}
3513
3514static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3515{
3516 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3517 return lduw_phys(addr);
3518}
3519
3520static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3521{
3522 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3523 return ldl_phys(addr);
3524}
3525
3526static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3527 uint32_t val)
3528{
3529 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3530 stb_phys(addr, val);
3531}
3532
3533static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3534 uint32_t val)
3535{
3536 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3537 stw_phys(addr, val);
3538}
3539
3540static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3541 uint32_t val)
3542{
3543 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3544 stl_phys(addr, val);
3545}
3546
3547static CPUReadMemoryFunc * const watch_mem_read[3] = {
3548 watch_mem_readb,
3549 watch_mem_readw,
3550 watch_mem_readl,
3551};
3552
3553static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3554 watch_mem_writeb,
3555 watch_mem_writew,
3556 watch_mem_writel,
3557};
3558
3559static inline uint32_t subpage_readlen (subpage_t *mmio,
3560 target_phys_addr_t addr,
3561 unsigned int len)
3562{
3563 unsigned int idx = SUBPAGE_IDX(addr);
3564#if defined(DEBUG_SUBPAGE)
3565 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3566 mmio, len, addr, idx);
3567#endif
3568
3569 addr += mmio->region_offset[idx];
3570 idx = mmio->sub_io_index[idx];
3571 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3572}
3573
3574static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3575 uint32_t value, unsigned int len)
3576{
3577 unsigned int idx = SUBPAGE_IDX(addr);
3578#if defined(DEBUG_SUBPAGE)
3579 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3580 __func__, mmio, len, addr, idx, value);
3581#endif
3582
3583 addr += mmio->region_offset[idx];
3584 idx = mmio->sub_io_index[idx];
3585 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3586}
3587
3588static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3589{
3590 return subpage_readlen(opaque, addr, 0);
3591}
3592
3593static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3594 uint32_t value)
3595{
3596 subpage_writelen(opaque, addr, value, 0);
3597}
3598
3599static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3600{
3601 return subpage_readlen(opaque, addr, 1);
3602}
3603
3604static void subpage_writew (void *opaque, target_phys_addr_t addr,
3605 uint32_t value)
3606{
3607 subpage_writelen(opaque, addr, value, 1);
3608}
3609
3610static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3611{
3612 return subpage_readlen(opaque, addr, 2);
3613}
3614
3615static void subpage_writel (void *opaque, target_phys_addr_t addr,
3616 uint32_t value)
3617{
3618 subpage_writelen(opaque, addr, value, 2);
3619}
3620
3621static CPUReadMemoryFunc * const subpage_read[] = {
3622 &subpage_readb,
3623 &subpage_readw,
3624 &subpage_readl,
3625};
3626
3627static CPUWriteMemoryFunc * const subpage_write[] = {
3628 &subpage_writeb,
3629 &subpage_writew,
3630 &subpage_writel,
3631};
3632
3633static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3634 ram_addr_t memory, ram_addr_t region_offset)
3635{
3636 int idx, eidx;
3637
3638 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3639 return -1;
3640 idx = SUBPAGE_IDX(start);
3641 eidx = SUBPAGE_IDX(end);
3642#if defined(DEBUG_SUBPAGE)
3643 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3644 mmio, start, end, idx, eidx, memory);
3645#endif
3646 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3647 for (; idx <= eidx; idx++) {
3648 mmio->sub_io_index[idx] = memory;
3649 mmio->region_offset[idx] = region_offset;
3650 }
3651
3652 return 0;
3653}
3654
3655static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3656 ram_addr_t orig_memory,
3657 ram_addr_t region_offset)
3658{
3659 subpage_t *mmio;
3660 int subpage_memory;
3661
3662 mmio = qemu_mallocz(sizeof(subpage_t));
3663
3664 mmio->base = base;
3665 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3666#if defined(DEBUG_SUBPAGE)
3667 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3668 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3669#endif
3670 *phys = subpage_memory | IO_MEM_SUBPAGE;
3671 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3672
3673 return mmio;
3674}
3675
3676static int get_free_io_mem_idx(void)
3677{
3678 int i;
3679
3680 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3681 if (!io_mem_used[i]) {
3682 io_mem_used[i] = 1;
3683 return i;
3684 }
3685 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3686 return -1;
3687}
3688
3689/* mem_read and mem_write are arrays of functions containing the
3690 function to access byte (index 0), word (index 1) and dword (index
3691 2). Functions can be omitted with a NULL function pointer.
3692 If io_index is non zero, the corresponding io zone is
3693 modified. If it is zero, a new io zone is allocated. The return
3694 value can be used with cpu_register_physical_memory(). (-1) is
3695 returned if error. */
3696static int cpu_register_io_memory_fixed(int io_index,
3697 CPUReadMemoryFunc * const *mem_read,
3698 CPUWriteMemoryFunc * const *mem_write,
3699 void *opaque)
3700{
3701 int i;
3702
3703 if (io_index <= 0) {
3704 io_index = get_free_io_mem_idx();
3705 if (io_index == -1)
3706 return io_index;
3707 } else {
3708 io_index >>= IO_MEM_SHIFT;
3709 if (io_index >= IO_MEM_NB_ENTRIES)
3710 return -1;
3711 }
3712
3713 for (i = 0; i < 3; ++i) {
3714 io_mem_read[io_index][i]
3715 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3716 }
3717 for (i = 0; i < 3; ++i) {
3718 io_mem_write[io_index][i]
3719 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3720 }
3721 io_mem_opaque[io_index] = opaque;
3722
3723 return (io_index << IO_MEM_SHIFT);
3724}
3725
3726int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3727 CPUWriteMemoryFunc * const *mem_write,
3728 void *opaque)
3729{
3730 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3731}
3732
3733void cpu_unregister_io_memory(int io_table_address)
3734{
3735 int i;
3736 int io_index = io_table_address >> IO_MEM_SHIFT;
3737
3738 for (i=0;i < 3; i++) {
3739 io_mem_read[io_index][i] = unassigned_mem_read[i];
3740 io_mem_write[io_index][i] = unassigned_mem_write[i];
3741 }
3742 io_mem_opaque[io_index] = NULL;
3743 io_mem_used[io_index] = 0;
3744}
3745
3746static void io_mem_init(void)
3747{
3748 int i;
3749
3750 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3751 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3752 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3753 for (i=0; i<5; i++)
3754 io_mem_used[i] = 1;
3755
3756 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3757 watch_mem_write, NULL);
3758}
3759
3760#endif /* !defined(CONFIG_USER_ONLY) */
3761
3762/* physical memory access (slow version, mainly for debug) */
3763#if defined(CONFIG_USER_ONLY)
3764int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3765 uint8_t *buf, int len, int is_write)
3766{
3767 int l, flags;
3768 target_ulong page;
3769 void * p;
3770
3771 while (len > 0) {
3772 page = addr & TARGET_PAGE_MASK;
3773 l = (page + TARGET_PAGE_SIZE) - addr;
3774 if (l > len)
3775 l = len;
3776 flags = page_get_flags(page);
3777 if (!(flags & PAGE_VALID))
3778 return -1;
3779 if (is_write) {
3780 if (!(flags & PAGE_WRITE))
3781 return -1;
3782 /* XXX: this code should not depend on lock_user */
3783 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3784 return -1;
3785 memcpy(p, buf, l);
3786 unlock_user(p, addr, l);
3787 } else {
3788 if (!(flags & PAGE_READ))
3789 return -1;
3790 /* XXX: this code should not depend on lock_user */
3791 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3792 return -1;
3793 memcpy(buf, p, l);
3794 unlock_user(p, addr, 0);
3795 }
3796 len -= l;
3797 buf += l;
3798 addr += l;
3799 }
3800 return 0;
3801}
3802
3803#else
3804void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3805 int len, int is_write)
3806{
3807 int l, io_index;
3808 uint8_t *ptr;
3809 uint32_t val;
3810 target_phys_addr_t page;
3811 ram_addr_t pd;
3812 PhysPageDesc *p;
3813
3814 while (len > 0) {
3815 page = addr & TARGET_PAGE_MASK;
3816 l = (page + TARGET_PAGE_SIZE) - addr;
3817 if (l > len)
3818 l = len;
3819 p = phys_page_find(page >> TARGET_PAGE_BITS);
3820 if (!p) {
3821 pd = IO_MEM_UNASSIGNED;
3822 } else {
3823 pd = p->phys_offset;
3824 }
3825
3826 if (is_write) {
3827 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3828 target_phys_addr_t addr1 = addr;
3829 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3830 if (p)
3831 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3832 /* XXX: could force cpu_single_env to NULL to avoid
3833 potential bugs */
3834 if (l >= 4 && ((addr1 & 3) == 0)) {
3835 /* 32 bit write access */
3836#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3837 val = ldl_p(buf);
3838#else
3839 val = *(const uint32_t *)buf;
3840#endif
3841 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3842 l = 4;
3843 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3844 /* 16 bit write access */
3845#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3846 val = lduw_p(buf);
3847#else
3848 val = *(const uint16_t *)buf;
3849#endif
3850 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3851 l = 2;
3852 } else {
3853 /* 8 bit write access */
3854#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3855 val = ldub_p(buf);
3856#else
3857 val = *(const uint8_t *)buf;
3858#endif
3859 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3860 l = 1;
3861 }
3862 } else {
3863 ram_addr_t addr1;
3864 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3865 /* RAM case */
3866#ifdef VBOX
3867 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3868#else
3869 ptr = qemu_get_ram_ptr(addr1);
3870 memcpy(ptr, buf, l);
3871#endif
3872 if (!cpu_physical_memory_is_dirty(addr1)) {
3873 /* invalidate code */
3874 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3875 /* set dirty bit */
3876 cpu_physical_memory_set_dirty_flags(
3877 addr1, (0xff & ~CODE_DIRTY_FLAG));
3878 }
3879 }
3880 } else {
3881 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3882 !(pd & IO_MEM_ROMD)) {
3883 target_phys_addr_t addr1 = addr;
3884 /* I/O case */
3885 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3886 if (p)
3887 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3888 if (l >= 4 && ((addr1 & 3) == 0)) {
3889 /* 32 bit read access */
3890 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3891#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3892 stl_p(buf, val);
3893#else
3894 *(uint32_t *)buf = val;
3895#endif
3896 l = 4;
3897 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3898 /* 16 bit read access */
3899 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3900#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3901 stw_p(buf, val);
3902#else
3903 *(uint16_t *)buf = val;
3904#endif
3905 l = 2;
3906 } else {
3907 /* 8 bit read access */
3908 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3909#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3910 stb_p(buf, val);
3911#else
3912 *(uint8_t *)buf = val;
3913#endif
3914 l = 1;
3915 }
3916 } else {
3917 /* RAM case */
3918#ifdef VBOX
3919 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3920#else
3921 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3922 (addr & ~TARGET_PAGE_MASK);
3923 memcpy(buf, ptr, l);
3924#endif
3925 }
3926 }
3927 len -= l;
3928 buf += l;
3929 addr += l;
3930 }
3931}
3932
3933#ifndef VBOX
3934
3935/* used for ROM loading : can write in RAM and ROM */
3936void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3937 const uint8_t *buf, int len)
3938{
3939 int l;
3940 uint8_t *ptr;
3941 target_phys_addr_t page;
3942 ram_addr_t pd;
3943 PhysPageDesc *p;
3944
3945 while (len > 0) {
3946 page = addr & TARGET_PAGE_MASK;
3947 l = (page + TARGET_PAGE_SIZE) - addr;
3948 if (l > len)
3949 l = len;
3950 p = phys_page_find(page >> TARGET_PAGE_BITS);
3951 if (!p) {
3952 pd = IO_MEM_UNASSIGNED;
3953 } else {
3954 pd = p->phys_offset;
3955 }
3956
3957 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3958 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3959 !(pd & IO_MEM_ROMD)) {
3960 /* do nothing */
3961 } else {
3962 ram_addr_t addr1;
3963 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3964 /* ROM/RAM case */
3965 ptr = qemu_get_ram_ptr(addr1);
3966 memcpy(ptr, buf, l);
3967 }
3968 len -= l;
3969 buf += l;
3970 addr += l;
3971 }
3972}
3973
3974typedef struct {
3975 void *buffer;
3976 target_phys_addr_t addr;
3977 target_phys_addr_t len;
3978} BounceBuffer;
3979
3980static BounceBuffer bounce;
3981
3982typedef struct MapClient {
3983 void *opaque;
3984 void (*callback)(void *opaque);
3985 QLIST_ENTRY(MapClient) link;
3986} MapClient;
3987
3988static QLIST_HEAD(map_client_list, MapClient) map_client_list
3989 = QLIST_HEAD_INITIALIZER(map_client_list);
3990
3991void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3992{
3993 MapClient *client = qemu_malloc(sizeof(*client));
3994
3995 client->opaque = opaque;
3996 client->callback = callback;
3997 QLIST_INSERT_HEAD(&map_client_list, client, link);
3998 return client;
3999}
4000
4001void cpu_unregister_map_client(void *_client)
4002{
4003 MapClient *client = (MapClient *)_client;
4004
4005 QLIST_REMOVE(client, link);
4006 qemu_free(client);
4007}
4008
4009static void cpu_notify_map_clients(void)
4010{
4011 MapClient *client;
4012
4013 while (!QLIST_EMPTY(&map_client_list)) {
4014 client = QLIST_FIRST(&map_client_list);
4015 client->callback(client->opaque);
4016 cpu_unregister_map_client(client);
4017 }
4018}
4019
4020/* Map a physical memory region into a host virtual address.
4021 * May map a subset of the requested range, given by and returned in *plen.
4022 * May return NULL if resources needed to perform the mapping are exhausted.
4023 * Use only for reads OR writes - not for read-modify-write operations.
4024 * Use cpu_register_map_client() to know when retrying the map operation is
4025 * likely to succeed.
4026 */
4027void *cpu_physical_memory_map(target_phys_addr_t addr,
4028 target_phys_addr_t *plen,
4029 int is_write)
4030{
4031 target_phys_addr_t len = *plen;
4032 target_phys_addr_t done = 0;
4033 int l;
4034 uint8_t *ret = NULL;
4035 uint8_t *ptr;
4036 target_phys_addr_t page;
4037 ram_addr_t pd;
4038 PhysPageDesc *p;
4039 ram_addr_t addr1;
4040
4041 while (len > 0) {
4042 page = addr & TARGET_PAGE_MASK;
4043 l = (page + TARGET_PAGE_SIZE) - addr;
4044 if (l > len)
4045 l = len;
4046 p = phys_page_find(page >> TARGET_PAGE_BITS);
4047 if (!p) {
4048 pd = IO_MEM_UNASSIGNED;
4049 } else {
4050 pd = p->phys_offset;
4051 }
4052
4053 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4054 if (done || bounce.buffer) {
4055 break;
4056 }
4057 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4058 bounce.addr = addr;
4059 bounce.len = l;
4060 if (!is_write) {
4061 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
4062 }
4063 ptr = bounce.buffer;
4064 } else {
4065 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4066 ptr = qemu_get_ram_ptr(addr1);
4067 }
4068 if (!done) {
4069 ret = ptr;
4070 } else if (ret + done != ptr) {
4071 break;
4072 }
4073
4074 len -= l;
4075 addr += l;
4076 done += l;
4077 }
4078 *plen = done;
4079 return ret;
4080}
4081
4082/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4083 * Will also mark the memory as dirty if is_write == 1. access_len gives
4084 * the amount of memory that was actually read or written by the caller.
4085 */
4086void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4087 int is_write, target_phys_addr_t access_len)
4088{
4089 if (buffer != bounce.buffer) {
4090 if (is_write) {
4091 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
4092 while (access_len) {
4093 unsigned l;
4094 l = TARGET_PAGE_SIZE;
4095 if (l > access_len)
4096 l = access_len;
4097 if (!cpu_physical_memory_is_dirty(addr1)) {
4098 /* invalidate code */
4099 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4100 /* set dirty bit */
4101 cpu_physical_memory_set_dirty_flags(
4102 addr1, (0xff & ~CODE_DIRTY_FLAG));
4103 }
4104 addr1 += l;
4105 access_len -= l;
4106 }
4107 }
4108 return;
4109 }
4110 if (is_write) {
4111 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4112 }
4113 qemu_vfree(bounce.buffer);
4114 bounce.buffer = NULL;
4115 cpu_notify_map_clients();
4116}
4117
4118#endif /* !VBOX */
4119
4120/* warning: addr must be aligned */
4121uint32_t ldl_phys(target_phys_addr_t addr)
4122{
4123 int io_index;
4124 uint8_t *ptr;
4125 uint32_t val;
4126 ram_addr_t pd;
4127 PhysPageDesc *p;
4128
4129 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4130 if (!p) {
4131 pd = IO_MEM_UNASSIGNED;
4132 } else {
4133 pd = p->phys_offset;
4134 }
4135
4136 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4137 !(pd & IO_MEM_ROMD)) {
4138 /* I/O case */
4139 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4140 if (p)
4141 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4142 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4143 } else {
4144 /* RAM case */
4145#ifndef VBOX
4146 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4147 (addr & ~TARGET_PAGE_MASK);
4148 val = ldl_p(ptr);
4149#else
4150 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
4151#endif
4152 }
4153 return val;
4154}
4155
4156/* warning: addr must be aligned */
4157uint64_t ldq_phys(target_phys_addr_t addr)
4158{
4159 int io_index;
4160 uint8_t *ptr;
4161 uint64_t val;
4162 ram_addr_t pd;
4163 PhysPageDesc *p;
4164
4165 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4166 if (!p) {
4167 pd = IO_MEM_UNASSIGNED;
4168 } else {
4169 pd = p->phys_offset;
4170 }
4171
4172 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4173 !(pd & IO_MEM_ROMD)) {
4174 /* I/O case */
4175 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4176 if (p)
4177 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4178#ifdef TARGET_WORDS_BIGENDIAN
4179 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4180 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4181#else
4182 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4183 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4184#endif
4185 } else {
4186 /* RAM case */
4187#ifndef VBOX
4188 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4189 (addr & ~TARGET_PAGE_MASK);
4190 val = ldq_p(ptr);
4191#else
4192 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
4193#endif
4194 }
4195 return val;
4196}
4197
4198/* XXX: optimize */
4199uint32_t ldub_phys(target_phys_addr_t addr)
4200{
4201 uint8_t val;
4202 cpu_physical_memory_read(addr, &val, 1);
4203 return val;
4204}
4205
4206/* warning: addr must be aligned */
4207uint32_t lduw_phys(target_phys_addr_t addr)
4208{
4209 int io_index;
4210#ifndef VBOX
4211 uint8_t *ptr;
4212#endif
4213 uint64_t val;
4214 ram_addr_t pd;
4215 PhysPageDesc *p;
4216
4217 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4218 if (!p) {
4219 pd = IO_MEM_UNASSIGNED;
4220 } else {
4221 pd = p->phys_offset;
4222 }
4223
4224 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4225 !(pd & IO_MEM_ROMD)) {
4226 /* I/O case */
4227 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4228 if (p)
4229 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4230 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4231 } else {
4232 /* RAM case */
4233#ifndef VBOX
4234 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4235 (addr & ~TARGET_PAGE_MASK);
4236 val = lduw_p(ptr);
4237#else
4238 val = remR3PhysReadU16((pd & TARGET_PAGE_MASK) | (addr & ~TARGET_PAGE_MASK));
4239#endif
4240 }
4241 return val;
4242}
4243
4244/* warning: addr must be aligned. The ram page is not masked as dirty
4245 and the code inside is not invalidated. It is useful if the dirty
4246 bits are used to track modified PTEs */
4247void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4248{
4249 int io_index;
4250 uint8_t *ptr;
4251 ram_addr_t pd;
4252 PhysPageDesc *p;
4253
4254 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4255 if (!p) {
4256 pd = IO_MEM_UNASSIGNED;
4257 } else {
4258 pd = p->phys_offset;
4259 }
4260
4261 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4262 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4263 if (p)
4264 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4265 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4266 } else {
4267#ifndef VBOX
4268 ram_addr_t addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4269 ptr = qemu_get_ram_ptr(addr1);
4270 stl_p(ptr, val);
4271#else
4272 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
4273#endif
4274
4275#ifndef VBOX
4276 if (unlikely(in_migration)) {
4277 if (!cpu_physical_memory_is_dirty(addr1)) {
4278 /* invalidate code */
4279 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4280 /* set dirty bit */
4281 cpu_physical_memory_set_dirty_flags(
4282 addr1, (0xff & ~CODE_DIRTY_FLAG));
4283 }
4284 }
4285#endif /* !VBOX */
4286 }
4287}
4288
4289void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4290{
4291 int io_index;
4292 uint8_t *ptr;
4293 ram_addr_t pd;
4294 PhysPageDesc *p;
4295
4296 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4297 if (!p) {
4298 pd = IO_MEM_UNASSIGNED;
4299 } else {
4300 pd = p->phys_offset;
4301 }
4302
4303 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4304 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4305 if (p)
4306 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4307#ifdef TARGET_WORDS_BIGENDIAN
4308 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4309 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4310#else
4311 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4312 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4313#endif
4314 } else {
4315#ifndef VBOX
4316 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4317 (addr & ~TARGET_PAGE_MASK);
4318 stq_p(ptr, val);
4319#else
4320 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
4321#endif
4322 }
4323}
4324
4325/* warning: addr must be aligned */
4326void stl_phys(target_phys_addr_t addr, uint32_t val)
4327{
4328 int io_index;
4329 uint8_t *ptr;
4330 ram_addr_t pd;
4331 PhysPageDesc *p;
4332
4333 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4334 if (!p) {
4335 pd = IO_MEM_UNASSIGNED;
4336 } else {
4337 pd = p->phys_offset;
4338 }
4339
4340 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4341 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4342 if (p)
4343 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4344 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4345 } else {
4346 ram_addr_t addr1;
4347 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4348 /* RAM case */
4349#ifndef VBOX
4350 ptr = qemu_get_ram_ptr(addr1);
4351 stl_p(ptr, val);
4352#else
4353 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
4354#endif
4355 if (!cpu_physical_memory_is_dirty(addr1)) {
4356 /* invalidate code */
4357 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4358 /* set dirty bit */
4359 cpu_physical_memory_set_dirty_flags(addr1,
4360 (0xff & ~CODE_DIRTY_FLAG));
4361 }
4362 }
4363}
4364
4365/* XXX: optimize */
4366void stb_phys(target_phys_addr_t addr, uint32_t val)
4367{
4368 uint8_t v = val;
4369 cpu_physical_memory_write(addr, &v, 1);
4370}
4371
4372/* warning: addr must be aligned */
4373void stw_phys(target_phys_addr_t addr, uint32_t val)
4374{
4375 int io_index;
4376 uint8_t *ptr;
4377 ram_addr_t pd;
4378 PhysPageDesc *p;
4379
4380 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4381 if (!p) {
4382 pd = IO_MEM_UNASSIGNED;
4383 } else {
4384 pd = p->phys_offset;
4385 }
4386
4387 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4388 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4389 if (p)
4390 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4391 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4392 } else {
4393 ram_addr_t addr1;
4394 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4395 /* RAM case */
4396#ifndef VBOX
4397 ptr = qemu_get_ram_ptr(addr1);
4398 stw_p(ptr, val);
4399#else
4400 remR3PhysWriteU16(addr1, val); NOREF(ptr);
4401#endif
4402 if (!cpu_physical_memory_is_dirty(addr1)) {
4403 /* invalidate code */
4404 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4405 /* set dirty bit */
4406 cpu_physical_memory_set_dirty_flags(addr1,
4407 (0xff & ~CODE_DIRTY_FLAG));
4408 }
4409 }
4410}
4411
4412/* XXX: optimize */
4413void stq_phys(target_phys_addr_t addr, uint64_t val)
4414{
4415 val = tswap64(val);
4416 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4417}
4418
4419#ifndef VBOX
4420/* virtual memory access for debug (includes writing to ROM) */
4421int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4422 uint8_t *buf, int len, int is_write)
4423{
4424 int l;
4425 target_phys_addr_t phys_addr;
4426 target_ulong page;
4427
4428 while (len > 0) {
4429 page = addr & TARGET_PAGE_MASK;
4430 phys_addr = cpu_get_phys_page_debug(env, page);
4431 /* if no physical page mapped, return an error */
4432 if (phys_addr == -1)
4433 return -1;
4434 l = (page + TARGET_PAGE_SIZE) - addr;
4435 if (l > len)
4436 l = len;
4437 phys_addr += (addr & ~TARGET_PAGE_MASK);
4438 if (is_write)
4439 cpu_physical_memory_write_rom(phys_addr, buf, l);
4440 else
4441 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4442 len -= l;
4443 buf += l;
4444 addr += l;
4445 }
4446 return 0;
4447}
4448#endif /* !VBOX */
4449#endif
4450
4451/* in deterministic execution mode, instructions doing device I/Os
4452 must be at the end of the TB */
4453void cpu_io_recompile(CPUState *env, void *retaddr)
4454{
4455 TranslationBlock *tb;
4456 uint32_t n, cflags;
4457 target_ulong pc, cs_base;
4458 uint64_t flags;
4459
4460 tb = tb_find_pc((uintptr_t)retaddr);
4461 if (!tb) {
4462 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4463 retaddr);
4464 }
4465 n = env->icount_decr.u16.low + tb->icount;
4466 cpu_restore_state(tb, env, (uintptr_t)retaddr, NULL);
4467 /* Calculate how many instructions had been executed before the fault
4468 occurred. */
4469 n = n - env->icount_decr.u16.low;
4470 /* Generate a new TB ending on the I/O insn. */
4471 n++;
4472 /* On MIPS and SH, delay slot instructions can only be restarted if
4473 they were already the first instruction in the TB. If this is not
4474 the first instruction in a TB then re-execute the preceding
4475 branch. */
4476#if defined(TARGET_MIPS)
4477 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4478 env->active_tc.PC -= 4;
4479 env->icount_decr.u16.low++;
4480 env->hflags &= ~MIPS_HFLAG_BMASK;
4481 }
4482#elif defined(TARGET_SH4)
4483 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4484 && n > 1) {
4485 env->pc -= 2;
4486 env->icount_decr.u16.low++;
4487 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4488 }
4489#endif
4490 /* This should never happen. */
4491 if (n > CF_COUNT_MASK)
4492 cpu_abort(env, "TB too big during recompile");
4493
4494 cflags = n | CF_LAST_IO;
4495 pc = tb->pc;
4496 cs_base = tb->cs_base;
4497 flags = tb->flags;
4498 tb_phys_invalidate(tb, -1);
4499 /* FIXME: In theory this could raise an exception. In practice
4500 we have already translated the block once so it's probably ok. */
4501 tb_gen_code(env, pc, cs_base, flags, cflags);
4502 /** @todo If env->pc != tb->pc (i.e. the faulting instruction was not
4503 the first in the TB) then we end up generating a whole new TB and
4504 repeating the fault, which is horribly inefficient.
4505 Better would be to execute just this insn uncached, or generate a
4506 second new TB. */
4507 cpu_resume_from_signal(env, NULL);
4508}
4509
4510#if !defined(CONFIG_USER_ONLY)
4511
4512#ifndef VBOX
4513void dump_exec_info(FILE *f,
4514 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4515{
4516 int i, target_code_size, max_target_code_size;
4517 int direct_jmp_count, direct_jmp2_count, cross_page;
4518 TranslationBlock *tb;
4519
4520 target_code_size = 0;
4521 max_target_code_size = 0;
4522 cross_page = 0;
4523 direct_jmp_count = 0;
4524 direct_jmp2_count = 0;
4525 for(i = 0; i < nb_tbs; i++) {
4526 tb = &tbs[i];
4527 target_code_size += tb->size;
4528 if (tb->size > max_target_code_size)
4529 max_target_code_size = tb->size;
4530 if (tb->page_addr[1] != -1)
4531 cross_page++;
4532 if (tb->tb_next_offset[0] != 0xffff) {
4533 direct_jmp_count++;
4534 if (tb->tb_next_offset[1] != 0xffff) {
4535 direct_jmp2_count++;
4536 }
4537 }
4538 }
4539 /* XXX: avoid using doubles ? */
4540 cpu_fprintf(f, "Translation buffer state:\n");
4541 cpu_fprintf(f, "gen code size %ld/%ld\n",
4542 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4543 cpu_fprintf(f, "TB count %d/%d\n",
4544 nb_tbs, code_gen_max_blocks);
4545 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4546 nb_tbs ? target_code_size / nb_tbs : 0,
4547 max_target_code_size);
4548 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4549 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4550 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4551 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4552 cross_page,
4553 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4554 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4555 direct_jmp_count,
4556 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4557 direct_jmp2_count,
4558 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4559 cpu_fprintf(f, "\nStatistics:\n");
4560 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4561 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4562 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4563 tcg_dump_info(f, cpu_fprintf);
4564}
4565#endif /* !VBOX */
4566
4567#define MMUSUFFIX _cmmu
4568#define GETPC() NULL
4569#define env cpu_single_env
4570#define SOFTMMU_CODE_ACCESS
4571
4572#define SHIFT 0
4573#include "softmmu_template.h"
4574
4575#define SHIFT 1
4576#include "softmmu_template.h"
4577
4578#define SHIFT 2
4579#include "softmmu_template.h"
4580
4581#define SHIFT 3
4582#include "softmmu_template.h"
4583
4584#undef env
4585
4586#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette