VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 36171

Last change on this file since 36171 was 36171, checked in by vboxsync, 14 years ago

rem: Merged in changes from the branches/stable_0_10 (r7249).

  • Property svn:eol-style set to native
File size: 121.9 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include "config.h"
31#ifndef VBOX
32#ifdef _WIN32
33#define WIN32_LEAN_AND_MEAN
34#include <windows.h>
35#else
36#include <sys/types.h>
37#include <sys/mman.h>
38#endif
39#include <stdlib.h>
40#include <stdio.h>
41#include <stdarg.h>
42#include <string.h>
43#include <errno.h>
44#include <unistd.h>
45#include <inttypes.h>
46#else /* VBOX */
47# include <stdlib.h>
48# include <stdio.h>
49# include <iprt/alloc.h>
50# include <iprt/string.h>
51# include <iprt/param.h>
52# include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
53#endif /* VBOX */
54
55#include "cpu.h"
56#include "exec-all.h"
57#include "qemu-common.h"
58#include "tcg.h"
59#ifndef VBOX
60#include "hw/hw.h"
61#endif
62#include "osdep.h"
63#include "kvm.h"
64#if defined(CONFIG_USER_ONLY)
65#include <qemu.h>
66#endif
67
68//#define DEBUG_TB_INVALIDATE
69//#define DEBUG_FLUSH
70//#define DEBUG_TLB
71//#define DEBUG_UNASSIGNED
72
73/* make various TB consistency checks */
74//#define DEBUG_TB_CHECK
75//#define DEBUG_TLB_CHECK
76
77//#define DEBUG_IOPORT
78//#define DEBUG_SUBPAGE
79
80#if !defined(CONFIG_USER_ONLY)
81/* TB consistency checks only implemented for usermode emulation. */
82#undef DEBUG_TB_CHECK
83#endif
84
85#define SMC_BITMAP_USE_THRESHOLD 10
86
87#define MMAP_AREA_START 0x00000000
88#define MMAP_AREA_END 0xa8000000
89
90#if defined(TARGET_SPARC64)
91#define TARGET_PHYS_ADDR_SPACE_BITS 41
92#elif defined(TARGET_SPARC)
93#define TARGET_PHYS_ADDR_SPACE_BITS 36
94#elif defined(TARGET_ALPHA)
95#define TARGET_PHYS_ADDR_SPACE_BITS 42
96#define TARGET_VIRT_ADDR_SPACE_BITS 42
97#elif defined(TARGET_PPC64)
98#define TARGET_PHYS_ADDR_SPACE_BITS 42
99#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
100#define TARGET_PHYS_ADDR_SPACE_BITS 42
101#elif defined(TARGET_I386) && !defined(USE_KQEMU)
102#define TARGET_PHYS_ADDR_SPACE_BITS 36
103#else
104/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
105#define TARGET_PHYS_ADDR_SPACE_BITS 32
106#endif
107
108static TranslationBlock *tbs;
109int code_gen_max_blocks;
110TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
111static int nb_tbs;
112/* any access to the tbs or the page table must use this lock */
113spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
114
115#ifndef VBOX
116#if defined(__arm__) || defined(__sparc_v9__)
117/* The prologue must be reachable with a direct jump. ARM and Sparc64
118 have limited branch ranges (possibly also PPC) so place it in a
119 section close to code segment. */
120#define code_gen_section \
121 __attribute__((__section__(".gen_code"))) \
122 __attribute__((aligned (32)))
123#else
124#define code_gen_section \
125 __attribute__((aligned (32)))
126#endif
127
128uint8_t code_gen_prologue[1024] code_gen_section;
129#else /* VBOX */
130extern uint8_t* code_gen_prologue;
131#endif /* VBOX */
132static uint8_t *code_gen_buffer;
133static unsigned long code_gen_buffer_size;
134/* threshold to flush the translated code buffer */
135static unsigned long code_gen_buffer_max_size;
136uint8_t *code_gen_ptr;
137
138#ifndef VBOX
139#if !defined(CONFIG_USER_ONLY)
140ram_addr_t phys_ram_size;
141int phys_ram_fd;
142uint8_t *phys_ram_base;
143uint8_t *phys_ram_dirty;
144static int in_migration;
145static ram_addr_t phys_ram_alloc_offset = 0;
146#endif
147#else /* VBOX */
148RTGCPHYS phys_ram_size;
149/* we have memory ranges (the high PC-BIOS mapping) which
150 causes some pages to fall outside the dirty map here. */
151RTGCPHYS phys_ram_dirty_size;
152uint8_t *phys_ram_dirty;
153#endif /* VBOX */
154
155CPUState *first_cpu;
156/* current CPU in the current thread. It is only valid inside
157 cpu_exec() */
158CPUState *cpu_single_env;
159/* 0 = Do not count executed instructions.
160 1 = Precise instruction counting.
161 2 = Adaptive rate instruction counting. */
162int use_icount = 0;
163/* Current instruction counter. While executing translated code this may
164 include some instructions that have not yet been executed. */
165int64_t qemu_icount;
166
167typedef struct PageDesc {
168 /* list of TBs intersecting this ram page */
169 TranslationBlock *first_tb;
170 /* in order to optimize self modifying code, we count the number
171 of lookups we do to a given page to use a bitmap */
172 unsigned int code_write_count;
173 uint8_t *code_bitmap;
174#if defined(CONFIG_USER_ONLY)
175 unsigned long flags;
176#endif
177} PageDesc;
178
179typedef struct PhysPageDesc {
180 /* offset in host memory of the page + io_index in the low bits */
181 ram_addr_t phys_offset;
182 ram_addr_t region_offset;
183} PhysPageDesc;
184
185#define L2_BITS 10
186#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
187/* XXX: this is a temporary hack for alpha target.
188 * In the future, this is to be replaced by a multi-level table
189 * to actually be able to handle the complete 64 bits address space.
190 */
191#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
192#else
193#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
194#endif
195#ifdef VBOX
196#define L0_BITS (TARGET_PHYS_ADDR_SPACE_BITS - 32)
197#endif
198
199#ifdef VBOX
200#define L0_SIZE (1 << L0_BITS)
201#endif
202#define L1_SIZE (1 << L1_BITS)
203#define L2_SIZE (1 << L2_BITS)
204
205unsigned long qemu_real_host_page_size;
206unsigned long qemu_host_page_bits;
207unsigned long qemu_host_page_size;
208unsigned long qemu_host_page_mask;
209
210/* XXX: for system emulation, it could just be an array */
211#ifndef VBOX
212static PageDesc *l1_map[L1_SIZE];
213static PhysPageDesc **l1_phys_map;
214#else
215static unsigned l0_map_max_used = 0;
216static PageDesc **l0_map[L0_SIZE];
217static void **l0_phys_map[L0_SIZE];
218#endif
219
220#if !defined(CONFIG_USER_ONLY)
221static void io_mem_init(void);
222
223/* io memory support */
224CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
225CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
226void *io_mem_opaque[IO_MEM_NB_ENTRIES];
227char io_mem_used[IO_MEM_NB_ENTRIES];
228static int io_mem_watch;
229#endif
230
231#ifndef VBOX
232/* log support */
233static const char *logfilename = "/tmp/qemu.log";
234#endif /* !VBOX */
235FILE *logfile;
236int loglevel;
237#ifndef VBOX
238static int log_append = 0;
239#endif
240
241/* statistics */
242#ifndef VBOX
243static int tlb_flush_count;
244static int tb_flush_count;
245static int tb_phys_invalidate_count;
246#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
247uint32_t tlb_flush_count;
248uint32_t tb_flush_count;
249uint32_t tb_phys_invalidate_count;
250#endif /* VBOX */
251
252#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
253typedef struct subpage_t {
254 target_phys_addr_t base;
255 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
256 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
257 void *opaque[TARGET_PAGE_SIZE][2][4];
258 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
259} subpage_t;
260
261#ifndef VBOX
262#ifdef _WIN32
263static void map_exec(void *addr, long size)
264{
265 DWORD old_protect;
266 VirtualProtect(addr, size,
267 PAGE_EXECUTE_READWRITE, &old_protect);
268
269}
270#else
271static void map_exec(void *addr, long size)
272{
273 unsigned long start, end, page_size;
274
275 page_size = getpagesize();
276 start = (unsigned long)addr;
277 start &= ~(page_size - 1);
278
279 end = (unsigned long)addr + size;
280 end += page_size - 1;
281 end &= ~(page_size - 1);
282
283 mprotect((void *)start, end - start,
284 PROT_READ | PROT_WRITE | PROT_EXEC);
285}
286#endif
287#else /* VBOX */
288static void map_exec(void *addr, long size)
289{
290 RTMemProtect(addr, size,
291 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
292}
293#endif /* VBOX */
294
295static void page_init(void)
296{
297 /* NOTE: we can always suppose that qemu_host_page_size >=
298 TARGET_PAGE_SIZE */
299#ifdef VBOX
300 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
301 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
302 qemu_real_host_page_size = PAGE_SIZE;
303#else /* !VBOX */
304#ifdef _WIN32
305 {
306 SYSTEM_INFO system_info;
307
308 GetSystemInfo(&system_info);
309 qemu_real_host_page_size = system_info.dwPageSize;
310 }
311#else
312 qemu_real_host_page_size = getpagesize();
313#endif
314#endif /* !VBOX */
315 if (qemu_host_page_size == 0)
316 qemu_host_page_size = qemu_real_host_page_size;
317 if (qemu_host_page_size < TARGET_PAGE_SIZE)
318 qemu_host_page_size = TARGET_PAGE_SIZE;
319 qemu_host_page_bits = 0;
320#ifndef VBOX
321 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
322#else
323 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
324#endif
325 qemu_host_page_bits++;
326 qemu_host_page_mask = ~(qemu_host_page_size - 1);
327#ifndef VBOX
328 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
329 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
330#endif
331
332#ifdef VBOX
333 /* We use other means to set reserved bit on our pages */
334#else /* !VBOX */
335#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
336 {
337 long long startaddr, endaddr;
338 FILE *f;
339 int n;
340
341 mmap_lock();
342 last_brk = (unsigned long)sbrk(0);
343 f = fopen("/proc/self/maps", "r");
344 if (f) {
345 do {
346 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
347 if (n == 2) {
348 startaddr = MIN(startaddr,
349 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
350 endaddr = MIN(endaddr,
351 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
352 page_set_flags(startaddr & TARGET_PAGE_MASK,
353 TARGET_PAGE_ALIGN(endaddr),
354 PAGE_RESERVED);
355 }
356 } while (!feof(f));
357 fclose(f);
358 }
359 mmap_unlock();
360 }
361#endif
362#endif /* !VBOX */
363}
364
365static inline PageDesc **page_l1_map(target_ulong index)
366{
367#ifndef VBOX
368#if TARGET_LONG_BITS > 32
369 /* Host memory outside guest VM. For 32-bit targets we have already
370 excluded high addresses. */
371 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
372 return NULL;
373#endif
374 return &l1_map[index >> L2_BITS];
375#else /* VBOX */
376 PageDesc **l1_map;
377 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE * L0_SIZE,
378 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x L0_SIZE=%#x\n",
379 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE, L0_SIZE),
380 NULL);
381 l1_map = l0_map[index >> (L1_BITS + L2_BITS)];
382 if (RT_UNLIKELY(!l1_map))
383 {
384 unsigned i0 = index >> (L1_BITS + L2_BITS);
385 l0_map[i0] = l1_map = qemu_mallocz(sizeof(PageDesc *) * L1_SIZE);
386 if (RT_UNLIKELY(!l1_map))
387 return NULL;
388 if (i0 >= l0_map_max_used)
389 l0_map_max_used = i0 + 1;
390 }
391 return &l1_map[(index >> L2_BITS) & (L1_SIZE - 1)];
392#endif /* VBOX */
393}
394
395static inline PageDesc *page_find_alloc(target_ulong index)
396{
397 PageDesc **lp, *p;
398 lp = page_l1_map(index);
399 if (!lp)
400 return NULL;
401
402 p = *lp;
403 if (!p) {
404 /* allocate if not found */
405#if defined(CONFIG_USER_ONLY)
406 size_t len = sizeof(PageDesc) * L2_SIZE;
407 /* Don't use qemu_malloc because it may recurse. */
408 p = mmap(0, len, PROT_READ | PROT_WRITE,
409 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
410 *lp = p;
411 if (h2g_valid(p)) {
412 unsigned long addr = h2g(p);
413 page_set_flags(addr & TARGET_PAGE_MASK,
414 TARGET_PAGE_ALIGN(addr + len),
415 PAGE_RESERVED);
416 }
417#else
418 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
419 *lp = p;
420#endif
421 }
422 return p + (index & (L2_SIZE - 1));
423}
424
425static inline PageDesc *page_find(target_ulong index)
426{
427 PageDesc **lp, *p;
428 lp = page_l1_map(index);
429 if (!lp)
430 return NULL;
431
432 p = *lp;
433 if (!p)
434 return 0;
435 return p + (index & (L2_SIZE - 1));
436}
437
438static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
439{
440 void **lp, **p;
441 PhysPageDesc *pd;
442
443#ifndef VBOX
444 p = (void **)l1_phys_map;
445#if TARGET_PHYS_ADDR_SPACE_BITS > 32
446
447#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
448#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
449#endif
450 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
451 p = *lp;
452 if (!p) {
453 /* allocate if not found */
454 if (!alloc)
455 return NULL;
456 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
457 memset(p, 0, sizeof(void *) * L1_SIZE);
458 *lp = p;
459 }
460#endif
461#else /* VBOX */
462 /* level 0 lookup and lazy allocation of level 1 map. */
463 if (RT_UNLIKELY(index >= (target_phys_addr_t)L2_SIZE * L1_SIZE * L0_SIZE))
464 return NULL;
465 p = l0_phys_map[index >> (L1_BITS + L2_BITS)];
466 if (RT_UNLIKELY(!p)) {
467 if (!alloc)
468 return NULL;
469 p = qemu_vmalloc(sizeof(void **) * L1_SIZE);
470 memset(p, 0, sizeof(void **) * L1_SIZE);
471 l0_phys_map[index >> (L1_BITS + L2_BITS)] = p;
472 }
473
474 /* level 1 lookup and lazy allocation of level 2 map. */
475#endif /* VBOX */
476 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
477 pd = *lp;
478 if (!pd) {
479 int i;
480 /* allocate if not found */
481 if (!alloc)
482 return NULL;
483 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
484 *lp = pd;
485 for (i = 0; i < L2_SIZE; i++) {
486 pd[i].phys_offset = IO_MEM_UNASSIGNED;
487 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
488 }
489 }
490 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
491}
492
493static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
494{
495 return phys_page_find_alloc(index, 0);
496}
497
498#if !defined(CONFIG_USER_ONLY)
499static void tlb_protect_code(ram_addr_t ram_addr);
500static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
501 target_ulong vaddr);
502#define mmap_lock() do { } while(0)
503#define mmap_unlock() do { } while(0)
504#endif
505
506#ifdef VBOX /* We don't need such huge codegen buffer size, as execute
507 most of the code in raw or hwacc mode. */
508#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
509#else /* !VBOX */
510#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
511#endif /* !VBOX */
512
513#if defined(CONFIG_USER_ONLY)
514/* Currently it is not recommanded to allocate big chunks of data in
515 user mode. It will change when a dedicated libc will be used */
516#define USE_STATIC_CODE_GEN_BUFFER
517#endif
518
519/* VBox allocates codegen buffer dynamically */
520#ifndef VBOX
521#ifdef USE_STATIC_CODE_GEN_BUFFER
522static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
523#endif
524#endif
525
526static void code_gen_alloc(unsigned long tb_size)
527{
528#ifdef USE_STATIC_CODE_GEN_BUFFER
529 code_gen_buffer = static_code_gen_buffer;
530 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
531 map_exec(code_gen_buffer, code_gen_buffer_size);
532#else
533#ifdef VBOX
534 /* We cannot use phys_ram_size here, as it's 0 now,
535 * it only gets initialized once RAM registration callback
536 * (REMR3NotifyPhysRamRegister()) called.
537 */
538 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
539#else
540 code_gen_buffer_size = tb_size;
541 if (code_gen_buffer_size == 0) {
542#if defined(CONFIG_USER_ONLY)
543 /* in user mode, phys_ram_size is not meaningful */
544 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
545#else
546 /* XXX: needs ajustments */
547 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
548#endif
549 }
550 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
551 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
552#endif /* VBOX */
553 /* The code gen buffer location may have constraints depending on
554 the host cpu and OS */
555#ifdef VBOX
556 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
557
558 if (!code_gen_buffer) {
559 LogRel(("REM: failed allocate codegen buffer %lld\n",
560 code_gen_buffer_size));
561 return;
562 }
563#else /* !VBOX */
564#if defined(__linux__)
565 {
566 int flags;
567 void *start = NULL;
568
569 flags = MAP_PRIVATE | MAP_ANONYMOUS;
570#if defined(__x86_64__)
571 flags |= MAP_32BIT;
572 /* Cannot map more than that */
573 if (code_gen_buffer_size > (800 * 1024 * 1024))
574 code_gen_buffer_size = (800 * 1024 * 1024);
575#elif defined(__sparc_v9__)
576 // Map the buffer below 2G, so we can use direct calls and branches
577 flags |= MAP_FIXED;
578 start = (void *) 0x60000000UL;
579 if (code_gen_buffer_size > (512 * 1024 * 1024))
580 code_gen_buffer_size = (512 * 1024 * 1024);
581#elif defined(__arm__)
582 /* Map the buffer below 32M, so we can use direct calls and branches */
583 flags |= MAP_FIXED;
584 start = (void *) 0x01000000UL;
585 if (code_gen_buffer_size > 16 * 1024 * 1024)
586 code_gen_buffer_size = 16 * 1024 * 1024;
587#endif
588 code_gen_buffer = mmap(start, code_gen_buffer_size,
589 PROT_WRITE | PROT_READ | PROT_EXEC,
590 flags, -1, 0);
591 if (code_gen_buffer == MAP_FAILED) {
592 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
593 exit(1);
594 }
595 }
596#elif defined(__FreeBSD__)
597 {
598 int flags;
599 void *addr = NULL;
600 flags = MAP_PRIVATE | MAP_ANONYMOUS;
601#if defined(__x86_64__)
602 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
603 * 0x40000000 is free */
604 flags |= MAP_FIXED;
605 addr = (void *)0x40000000;
606 /* Cannot map more than that */
607 if (code_gen_buffer_size > (800 * 1024 * 1024))
608 code_gen_buffer_size = (800 * 1024 * 1024);
609#endif
610 code_gen_buffer = mmap(addr, code_gen_buffer_size,
611 PROT_WRITE | PROT_READ | PROT_EXEC,
612 flags, -1, 0);
613 if (code_gen_buffer == MAP_FAILED) {
614 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
615 exit(1);
616 }
617 }
618#else
619 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
620 map_exec(code_gen_buffer, code_gen_buffer_size);
621#endif
622#endif /* !VBOX */
623#endif /* !USE_STATIC_CODE_GEN_BUFFER */
624#ifndef VBOX
625 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
626#else
627 map_exec(code_gen_prologue, _1K);
628#endif
629 code_gen_buffer_max_size = code_gen_buffer_size -
630 code_gen_max_block_size();
631 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
632 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
633}
634
635/* Must be called before using the QEMU cpus. 'tb_size' is the size
636 (in bytes) allocated to the translation buffer. Zero means default
637 size. */
638void cpu_exec_init_all(unsigned long tb_size)
639{
640 cpu_gen_init();
641 code_gen_alloc(tb_size);
642 code_gen_ptr = code_gen_buffer;
643 page_init();
644#if !defined(CONFIG_USER_ONLY)
645 io_mem_init();
646#endif
647}
648
649#ifndef VBOX
650#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
651
652#define CPU_COMMON_SAVE_VERSION 1
653
654static void cpu_common_save(QEMUFile *f, void *opaque)
655{
656 CPUState *env = opaque;
657
658 qemu_put_be32s(f, &env->halted);
659 qemu_put_be32s(f, &env->interrupt_request);
660}
661
662static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
663{
664 CPUState *env = opaque;
665
666 if (version_id != CPU_COMMON_SAVE_VERSION)
667 return -EINVAL;
668
669 qemu_get_be32s(f, &env->halted);
670 qemu_get_be32s(f, &env->interrupt_request);
671 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
672 tlb_flush(env, 1);
673
674 return 0;
675}
676#endif
677#endif /* !VBOX */
678
679void cpu_exec_init(CPUState *env)
680{
681 CPUState **penv;
682 int cpu_index;
683
684 env->next_cpu = NULL;
685 penv = &first_cpu;
686 cpu_index = 0;
687 while (*penv != NULL) {
688 penv = (CPUState **)&(*penv)->next_cpu;
689 cpu_index++;
690 }
691 env->cpu_index = cpu_index;
692 TAILQ_INIT(&env->breakpoints);
693 TAILQ_INIT(&env->watchpoints);
694 *penv = env;
695#ifndef VBOX
696#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
697 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
698 cpu_common_save, cpu_common_load, env);
699 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
700 cpu_save, cpu_load, env);
701#endif
702#endif /* !VBOX */
703}
704
705static inline void invalidate_page_bitmap(PageDesc *p)
706{
707 if (p->code_bitmap) {
708 qemu_free(p->code_bitmap);
709 p->code_bitmap = NULL;
710 }
711 p->code_write_count = 0;
712}
713
714/* set to NULL all the 'first_tb' fields in all PageDescs */
715static void page_flush_tb(void)
716{
717 int i, j;
718 PageDesc *p;
719#ifdef VBOX
720 int k;
721#endif
722
723#ifdef VBOX
724 k = l0_map_max_used;
725 while (k-- > 0) {
726 PageDesc **l1_map = l0_map[k];
727 if (l1_map) {
728#endif
729 for(i = 0; i < L1_SIZE; i++) {
730 p = l1_map[i];
731 if (p) {
732 for(j = 0; j < L2_SIZE; j++) {
733 p->first_tb = NULL;
734 invalidate_page_bitmap(p);
735 p++;
736 }
737 }
738 }
739#ifdef VBOX
740 }
741 }
742#endif
743}
744
745/* flush all the translation blocks */
746/* XXX: tb_flush is currently not thread safe */
747void tb_flush(CPUState *env1)
748{
749 CPUState *env;
750#ifdef VBOX
751 STAM_PROFILE_START(&env1->StatTbFlush, a);
752#endif
753#if defined(DEBUG_FLUSH)
754 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
755 (unsigned long)(code_gen_ptr - code_gen_buffer),
756 nb_tbs, nb_tbs > 0 ?
757 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
758#endif
759 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
760 cpu_abort(env1, "Internal error: code buffer overflow\n");
761
762 nb_tbs = 0;
763
764 for(env = first_cpu; env != NULL; env = env->next_cpu) {
765 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
766 }
767
768 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
769 page_flush_tb();
770
771 code_gen_ptr = code_gen_buffer;
772 /* XXX: flush processor icache at this point if cache flush is
773 expensive */
774 tb_flush_count++;
775#ifdef VBOX
776 STAM_PROFILE_STOP(&env1->StatTbFlush, a);
777#endif
778}
779
780#ifdef DEBUG_TB_CHECK
781
782static void tb_invalidate_check(target_ulong address)
783{
784 TranslationBlock *tb;
785 int i;
786 address &= TARGET_PAGE_MASK;
787 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
788 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
789 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
790 address >= tb->pc + tb->size)) {
791 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
792 address, (long)tb->pc, tb->size);
793 }
794 }
795 }
796}
797
798/* verify that all the pages have correct rights for code */
799static void tb_page_check(void)
800{
801 TranslationBlock *tb;
802 int i, flags1, flags2;
803
804 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
805 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
806 flags1 = page_get_flags(tb->pc);
807 flags2 = page_get_flags(tb->pc + tb->size - 1);
808 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
809 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
810 (long)tb->pc, tb->size, flags1, flags2);
811 }
812 }
813 }
814}
815
816static void tb_jmp_check(TranslationBlock *tb)
817{
818 TranslationBlock *tb1;
819 unsigned int n1;
820
821 /* suppress any remaining jumps to this TB */
822 tb1 = tb->jmp_first;
823 for(;;) {
824 n1 = (long)tb1 & 3;
825 tb1 = (TranslationBlock *)((long)tb1 & ~3);
826 if (n1 == 2)
827 break;
828 tb1 = tb1->jmp_next[n1];
829 }
830 /* check end of list */
831 if (tb1 != tb) {
832 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
833 }
834}
835
836#endif
837
838/* invalidate one TB */
839static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
840 int next_offset)
841{
842 TranslationBlock *tb1;
843 for(;;) {
844 tb1 = *ptb;
845 if (tb1 == tb) {
846 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
847 break;
848 }
849 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
850 }
851}
852
853static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
854{
855 TranslationBlock *tb1;
856 unsigned int n1;
857
858 for(;;) {
859 tb1 = *ptb;
860 n1 = (long)tb1 & 3;
861 tb1 = (TranslationBlock *)((long)tb1 & ~3);
862 if (tb1 == tb) {
863 *ptb = tb1->page_next[n1];
864 break;
865 }
866 ptb = &tb1->page_next[n1];
867 }
868}
869
870static inline void tb_jmp_remove(TranslationBlock *tb, int n)
871{
872 TranslationBlock *tb1, **ptb;
873 unsigned int n1;
874
875 ptb = &tb->jmp_next[n];
876 tb1 = *ptb;
877 if (tb1) {
878 /* find tb(n) in circular list */
879 for(;;) {
880 tb1 = *ptb;
881 n1 = (long)tb1 & 3;
882 tb1 = (TranslationBlock *)((long)tb1 & ~3);
883 if (n1 == n && tb1 == tb)
884 break;
885 if (n1 == 2) {
886 ptb = &tb1->jmp_first;
887 } else {
888 ptb = &tb1->jmp_next[n1];
889 }
890 }
891 /* now we can suppress tb(n) from the list */
892 *ptb = tb->jmp_next[n];
893
894 tb->jmp_next[n] = NULL;
895 }
896}
897
898/* reset the jump entry 'n' of a TB so that it is not chained to
899 another TB */
900static inline void tb_reset_jump(TranslationBlock *tb, int n)
901{
902 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
903}
904
905void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
906{
907 CPUState *env;
908 PageDesc *p;
909 unsigned int h, n1;
910 target_phys_addr_t phys_pc;
911 TranslationBlock *tb1, *tb2;
912
913 /* remove the TB from the hash list */
914 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
915 h = tb_phys_hash_func(phys_pc);
916 tb_remove(&tb_phys_hash[h], tb,
917 offsetof(TranslationBlock, phys_hash_next));
918
919 /* remove the TB from the page list */
920 if (tb->page_addr[0] != page_addr) {
921 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
922 tb_page_remove(&p->first_tb, tb);
923 invalidate_page_bitmap(p);
924 }
925 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
926 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
927 tb_page_remove(&p->first_tb, tb);
928 invalidate_page_bitmap(p);
929 }
930
931 tb_invalidated_flag = 1;
932
933 /* remove the TB from the hash list */
934 h = tb_jmp_cache_hash_func(tb->pc);
935 for(env = first_cpu; env != NULL; env = env->next_cpu) {
936 if (env->tb_jmp_cache[h] == tb)
937 env->tb_jmp_cache[h] = NULL;
938 }
939
940 /* suppress this TB from the two jump lists */
941 tb_jmp_remove(tb, 0);
942 tb_jmp_remove(tb, 1);
943
944 /* suppress any remaining jumps to this TB */
945 tb1 = tb->jmp_first;
946 for(;;) {
947 n1 = (long)tb1 & 3;
948 if (n1 == 2)
949 break;
950 tb1 = (TranslationBlock *)((long)tb1 & ~3);
951 tb2 = tb1->jmp_next[n1];
952 tb_reset_jump(tb1, n1);
953 tb1->jmp_next[n1] = NULL;
954 tb1 = tb2;
955 }
956 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
957
958 tb_phys_invalidate_count++;
959}
960
961
962#ifdef VBOX
963
964void tb_invalidate_virt(CPUState *env, uint32_t eip)
965{
966# if 1
967 tb_flush(env);
968# else
969 uint8_t *cs_base, *pc;
970 unsigned int flags, h, phys_pc;
971 TranslationBlock *tb, **ptb;
972
973 flags = env->hflags;
974 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
975 cs_base = env->segs[R_CS].base;
976 pc = cs_base + eip;
977
978 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
979 flags);
980
981 if(tb)
982 {
983# ifdef DEBUG
984 printf("invalidating TB (%08X) at %08X\n", tb, eip);
985# endif
986 tb_invalidate(tb);
987 //Note: this will leak TBs, but the whole cache will be flushed
988 // when it happens too often
989 tb->pc = 0;
990 tb->cs_base = 0;
991 tb->flags = 0;
992 }
993# endif
994}
995
996# ifdef VBOX_STRICT
997/**
998 * Gets the page offset.
999 */
1000unsigned long get_phys_page_offset(target_ulong addr)
1001{
1002 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
1003 return p ? p->phys_offset : 0;
1004}
1005# endif /* VBOX_STRICT */
1006
1007#endif /* VBOX */
1008
1009static inline void set_bits(uint8_t *tab, int start, int len)
1010{
1011 int end, mask, end1;
1012
1013 end = start + len;
1014 tab += start >> 3;
1015 mask = 0xff << (start & 7);
1016 if ((start & ~7) == (end & ~7)) {
1017 if (start < end) {
1018 mask &= ~(0xff << (end & 7));
1019 *tab |= mask;
1020 }
1021 } else {
1022 *tab++ |= mask;
1023 start = (start + 8) & ~7;
1024 end1 = end & ~7;
1025 while (start < end1) {
1026 *tab++ = 0xff;
1027 start += 8;
1028 }
1029 if (start < end) {
1030 mask = ~(0xff << (end & 7));
1031 *tab |= mask;
1032 }
1033 }
1034}
1035
1036static void build_page_bitmap(PageDesc *p)
1037{
1038 int n, tb_start, tb_end;
1039 TranslationBlock *tb;
1040
1041 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
1042
1043 tb = p->first_tb;
1044 while (tb != NULL) {
1045 n = (long)tb & 3;
1046 tb = (TranslationBlock *)((long)tb & ~3);
1047 /* NOTE: this is subtle as a TB may span two physical pages */
1048 if (n == 0) {
1049 /* NOTE: tb_end may be after the end of the page, but
1050 it is not a problem */
1051 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1052 tb_end = tb_start + tb->size;
1053 if (tb_end > TARGET_PAGE_SIZE)
1054 tb_end = TARGET_PAGE_SIZE;
1055 } else {
1056 tb_start = 0;
1057 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1058 }
1059 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1060 tb = tb->page_next[n];
1061 }
1062}
1063
1064TranslationBlock *tb_gen_code(CPUState *env,
1065 target_ulong pc, target_ulong cs_base,
1066 int flags, int cflags)
1067{
1068 TranslationBlock *tb;
1069 uint8_t *tc_ptr;
1070 target_ulong phys_pc, phys_page2, virt_page2;
1071 int code_gen_size;
1072
1073 phys_pc = get_phys_addr_code(env, pc);
1074 tb = tb_alloc(pc);
1075 if (!tb) {
1076 /* flush must be done */
1077 tb_flush(env);
1078 /* cannot fail at this point */
1079 tb = tb_alloc(pc);
1080 /* Don't forget to invalidate previous TB info. */
1081 tb_invalidated_flag = 1;
1082 }
1083 tc_ptr = code_gen_ptr;
1084 tb->tc_ptr = tc_ptr;
1085 tb->cs_base = cs_base;
1086 tb->flags = flags;
1087 tb->cflags = cflags;
1088 cpu_gen_code(env, tb, &code_gen_size);
1089 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1090
1091 /* check next page if needed */
1092 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1093 phys_page2 = -1;
1094 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1095 phys_page2 = get_phys_addr_code(env, virt_page2);
1096 }
1097 tb_link_phys(tb, phys_pc, phys_page2);
1098 return tb;
1099}
1100
1101/* invalidate all TBs which intersect with the target physical page
1102 starting in range [start;end[. NOTE: start and end must refer to
1103 the same physical page. 'is_cpu_write_access' should be true if called
1104 from a real cpu write access: the virtual CPU will exit the current
1105 TB if code is modified inside this TB. */
1106void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1107 int is_cpu_write_access)
1108{
1109 TranslationBlock *tb, *tb_next, *saved_tb;
1110 CPUState *env = cpu_single_env;
1111 target_ulong tb_start, tb_end;
1112 PageDesc *p;
1113 int n;
1114#ifdef TARGET_HAS_PRECISE_SMC
1115 int current_tb_not_found = is_cpu_write_access;
1116 TranslationBlock *current_tb = NULL;
1117 int current_tb_modified = 0;
1118 target_ulong current_pc = 0;
1119 target_ulong current_cs_base = 0;
1120 int current_flags = 0;
1121#endif /* TARGET_HAS_PRECISE_SMC */
1122
1123 p = page_find(start >> TARGET_PAGE_BITS);
1124 if (!p)
1125 return;
1126 if (!p->code_bitmap &&
1127 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1128 is_cpu_write_access) {
1129 /* build code bitmap */
1130 build_page_bitmap(p);
1131 }
1132
1133 /* we remove all the TBs in the range [start, end[ */
1134 /* XXX: see if in some cases it could be faster to invalidate all the code */
1135 tb = p->first_tb;
1136 while (tb != NULL) {
1137 n = (long)tb & 3;
1138 tb = (TranslationBlock *)((long)tb & ~3);
1139 tb_next = tb->page_next[n];
1140 /* NOTE: this is subtle as a TB may span two physical pages */
1141 if (n == 0) {
1142 /* NOTE: tb_end may be after the end of the page, but
1143 it is not a problem */
1144 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1145 tb_end = tb_start + tb->size;
1146 } else {
1147 tb_start = tb->page_addr[1];
1148 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1149 }
1150 if (!(tb_end <= start || tb_start >= end)) {
1151#ifdef TARGET_HAS_PRECISE_SMC
1152 if (current_tb_not_found) {
1153 current_tb_not_found = 0;
1154 current_tb = NULL;
1155 if (env->mem_io_pc) {
1156 /* now we have a real cpu fault */
1157 current_tb = tb_find_pc(env->mem_io_pc);
1158 }
1159 }
1160 if (current_tb == tb &&
1161 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1162 /* If we are modifying the current TB, we must stop
1163 its execution. We could be more precise by checking
1164 that the modification is after the current PC, but it
1165 would require a specialized function to partially
1166 restore the CPU state */
1167
1168 current_tb_modified = 1;
1169 cpu_restore_state(current_tb, env,
1170 env->mem_io_pc, NULL);
1171 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1172 &current_flags);
1173 }
1174#endif /* TARGET_HAS_PRECISE_SMC */
1175 /* we need to do that to handle the case where a signal
1176 occurs while doing tb_phys_invalidate() */
1177 saved_tb = NULL;
1178 if (env) {
1179 saved_tb = env->current_tb;
1180 env->current_tb = NULL;
1181 }
1182 tb_phys_invalidate(tb, -1);
1183 if (env) {
1184 env->current_tb = saved_tb;
1185 if (env->interrupt_request && env->current_tb)
1186 cpu_interrupt(env, env->interrupt_request);
1187 }
1188 }
1189 tb = tb_next;
1190 }
1191#if !defined(CONFIG_USER_ONLY)
1192 /* if no code remaining, no need to continue to use slow writes */
1193 if (!p->first_tb) {
1194 invalidate_page_bitmap(p);
1195 if (is_cpu_write_access) {
1196 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1197 }
1198 }
1199#endif
1200#ifdef TARGET_HAS_PRECISE_SMC
1201 if (current_tb_modified) {
1202 /* we generate a block containing just the instruction
1203 modifying the memory. It will ensure that it cannot modify
1204 itself */
1205 env->current_tb = NULL;
1206 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1207 cpu_resume_from_signal(env, NULL);
1208 }
1209#endif
1210}
1211
1212/* len must be <= 8 and start must be a multiple of len */
1213static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1214{
1215 PageDesc *p;
1216 int offset, b;
1217#if 0
1218 if (1) {
1219 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1220 cpu_single_env->mem_io_vaddr, len,
1221 cpu_single_env->eip,
1222 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1223 }
1224#endif
1225 p = page_find(start >> TARGET_PAGE_BITS);
1226 if (!p)
1227 return;
1228 if (p->code_bitmap) {
1229 offset = start & ~TARGET_PAGE_MASK;
1230 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1231 if (b & ((1 << len) - 1))
1232 goto do_invalidate;
1233 } else {
1234 do_invalidate:
1235 tb_invalidate_phys_page_range(start, start + len, 1);
1236 }
1237}
1238
1239#if !defined(CONFIG_SOFTMMU)
1240static void tb_invalidate_phys_page(target_phys_addr_t addr,
1241 unsigned long pc, void *puc)
1242{
1243 TranslationBlock *tb;
1244 PageDesc *p;
1245 int n;
1246#ifdef TARGET_HAS_PRECISE_SMC
1247 TranslationBlock *current_tb = NULL;
1248 CPUState *env = cpu_single_env;
1249 int current_tb_modified = 0;
1250 target_ulong current_pc = 0;
1251 target_ulong current_cs_base = 0;
1252 int current_flags = 0;
1253#endif
1254
1255 addr &= TARGET_PAGE_MASK;
1256 p = page_find(addr >> TARGET_PAGE_BITS);
1257 if (!p)
1258 return;
1259 tb = p->first_tb;
1260#ifdef TARGET_HAS_PRECISE_SMC
1261 if (tb && pc != 0) {
1262 current_tb = tb_find_pc(pc);
1263 }
1264#endif
1265 while (tb != NULL) {
1266 n = (long)tb & 3;
1267 tb = (TranslationBlock *)((long)tb & ~3);
1268#ifdef TARGET_HAS_PRECISE_SMC
1269 if (current_tb == tb &&
1270 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1271 /* If we are modifying the current TB, we must stop
1272 its execution. We could be more precise by checking
1273 that the modification is after the current PC, but it
1274 would require a specialized function to partially
1275 restore the CPU state */
1276
1277 current_tb_modified = 1;
1278 cpu_restore_state(current_tb, env, pc, puc);
1279 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1280 &current_flags);
1281 }
1282#endif /* TARGET_HAS_PRECISE_SMC */
1283 tb_phys_invalidate(tb, addr);
1284 tb = tb->page_next[n];
1285 }
1286 p->first_tb = NULL;
1287#ifdef TARGET_HAS_PRECISE_SMC
1288 if (current_tb_modified) {
1289 /* we generate a block containing just the instruction
1290 modifying the memory. It will ensure that it cannot modify
1291 itself */
1292 env->current_tb = NULL;
1293 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1294 cpu_resume_from_signal(env, puc);
1295 }
1296#endif
1297}
1298#endif
1299
1300/* add the tb in the target page and protect it if necessary */
1301static inline void tb_alloc_page(TranslationBlock *tb,
1302 unsigned int n, target_ulong page_addr)
1303{
1304 PageDesc *p;
1305 TranslationBlock *last_first_tb;
1306
1307 tb->page_addr[n] = page_addr;
1308 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1309 tb->page_next[n] = p->first_tb;
1310 last_first_tb = p->first_tb;
1311 p->first_tb = (TranslationBlock *)((long)tb | n);
1312 invalidate_page_bitmap(p);
1313
1314#if defined(TARGET_HAS_SMC) || 1
1315
1316#if defined(CONFIG_USER_ONLY)
1317 if (p->flags & PAGE_WRITE) {
1318 target_ulong addr;
1319 PageDesc *p2;
1320 int prot;
1321
1322 /* force the host page as non writable (writes will have a
1323 page fault + mprotect overhead) */
1324 page_addr &= qemu_host_page_mask;
1325 prot = 0;
1326 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1327 addr += TARGET_PAGE_SIZE) {
1328
1329 p2 = page_find (addr >> TARGET_PAGE_BITS);
1330 if (!p2)
1331 continue;
1332 prot |= p2->flags;
1333 p2->flags &= ~PAGE_WRITE;
1334 page_get_flags(addr);
1335 }
1336 mprotect(g2h(page_addr), qemu_host_page_size,
1337 (prot & PAGE_BITS) & ~PAGE_WRITE);
1338#ifdef DEBUG_TB_INVALIDATE
1339 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1340 page_addr);
1341#endif
1342 }
1343#else
1344 /* if some code is already present, then the pages are already
1345 protected. So we handle the case where only the first TB is
1346 allocated in a physical page */
1347 if (!last_first_tb) {
1348 tlb_protect_code(page_addr);
1349 }
1350#endif
1351
1352#endif /* TARGET_HAS_SMC */
1353}
1354
1355/* Allocate a new translation block. Flush the translation buffer if
1356 too many translation blocks or too much generated code. */
1357TranslationBlock *tb_alloc(target_ulong pc)
1358{
1359 TranslationBlock *tb;
1360
1361 if (nb_tbs >= code_gen_max_blocks ||
1362#ifndef VBOX
1363 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1364#else
1365 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1366#endif
1367 return NULL;
1368 tb = &tbs[nb_tbs++];
1369 tb->pc = pc;
1370 tb->cflags = 0;
1371 return tb;
1372}
1373
1374void tb_free(TranslationBlock *tb)
1375{
1376 /* In practice this is mostly used for single use temporary TB
1377 Ignore the hard cases and just back up if this TB happens to
1378 be the last one generated. */
1379 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1380 code_gen_ptr = tb->tc_ptr;
1381 nb_tbs--;
1382 }
1383}
1384
1385/* add a new TB and link it to the physical page tables. phys_page2 is
1386 (-1) to indicate that only one page contains the TB. */
1387void tb_link_phys(TranslationBlock *tb,
1388 target_ulong phys_pc, target_ulong phys_page2)
1389{
1390 unsigned int h;
1391 TranslationBlock **ptb;
1392
1393 /* Grab the mmap lock to stop another thread invalidating this TB
1394 before we are done. */
1395 mmap_lock();
1396 /* add in the physical hash table */
1397 h = tb_phys_hash_func(phys_pc);
1398 ptb = &tb_phys_hash[h];
1399 tb->phys_hash_next = *ptb;
1400 *ptb = tb;
1401
1402 /* add in the page list */
1403 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1404 if (phys_page2 != -1)
1405 tb_alloc_page(tb, 1, phys_page2);
1406 else
1407 tb->page_addr[1] = -1;
1408
1409 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1410 tb->jmp_next[0] = NULL;
1411 tb->jmp_next[1] = NULL;
1412
1413 /* init original jump addresses */
1414 if (tb->tb_next_offset[0] != 0xffff)
1415 tb_reset_jump(tb, 0);
1416 if (tb->tb_next_offset[1] != 0xffff)
1417 tb_reset_jump(tb, 1);
1418
1419#ifdef DEBUG_TB_CHECK
1420 tb_page_check();
1421#endif
1422 mmap_unlock();
1423}
1424
1425/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1426 tb[1].tc_ptr. Return NULL if not found */
1427TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1428{
1429 int m_min, m_max, m;
1430 unsigned long v;
1431 TranslationBlock *tb;
1432
1433 if (nb_tbs <= 0)
1434 return NULL;
1435 if (tc_ptr < (unsigned long)code_gen_buffer ||
1436 tc_ptr >= (unsigned long)code_gen_ptr)
1437 return NULL;
1438 /* binary search (cf Knuth) */
1439 m_min = 0;
1440 m_max = nb_tbs - 1;
1441 while (m_min <= m_max) {
1442 m = (m_min + m_max) >> 1;
1443 tb = &tbs[m];
1444 v = (unsigned long)tb->tc_ptr;
1445 if (v == tc_ptr)
1446 return tb;
1447 else if (tc_ptr < v) {
1448 m_max = m - 1;
1449 } else {
1450 m_min = m + 1;
1451 }
1452 }
1453 return &tbs[m_max];
1454}
1455
1456static void tb_reset_jump_recursive(TranslationBlock *tb);
1457
1458static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1459{
1460 TranslationBlock *tb1, *tb_next, **ptb;
1461 unsigned int n1;
1462
1463 tb1 = tb->jmp_next[n];
1464 if (tb1 != NULL) {
1465 /* find head of list */
1466 for(;;) {
1467 n1 = (long)tb1 & 3;
1468 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1469 if (n1 == 2)
1470 break;
1471 tb1 = tb1->jmp_next[n1];
1472 }
1473 /* we are now sure now that tb jumps to tb1 */
1474 tb_next = tb1;
1475
1476 /* remove tb from the jmp_first list */
1477 ptb = &tb_next->jmp_first;
1478 for(;;) {
1479 tb1 = *ptb;
1480 n1 = (long)tb1 & 3;
1481 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1482 if (n1 == n && tb1 == tb)
1483 break;
1484 ptb = &tb1->jmp_next[n1];
1485 }
1486 *ptb = tb->jmp_next[n];
1487 tb->jmp_next[n] = NULL;
1488
1489 /* suppress the jump to next tb in generated code */
1490 tb_reset_jump(tb, n);
1491
1492 /* suppress jumps in the tb on which we could have jumped */
1493 tb_reset_jump_recursive(tb_next);
1494 }
1495}
1496
1497static void tb_reset_jump_recursive(TranslationBlock *tb)
1498{
1499 tb_reset_jump_recursive2(tb, 0);
1500 tb_reset_jump_recursive2(tb, 1);
1501}
1502
1503#if defined(TARGET_HAS_ICE)
1504static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1505{
1506 target_phys_addr_t addr;
1507 target_ulong pd;
1508 ram_addr_t ram_addr;
1509 PhysPageDesc *p;
1510
1511 addr = cpu_get_phys_page_debug(env, pc);
1512 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1513 if (!p) {
1514 pd = IO_MEM_UNASSIGNED;
1515 } else {
1516 pd = p->phys_offset;
1517 }
1518 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1519 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1520}
1521#endif
1522
1523/* Add a watchpoint. */
1524int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1525 int flags, CPUWatchpoint **watchpoint)
1526{
1527 target_ulong len_mask = ~(len - 1);
1528 CPUWatchpoint *wp;
1529
1530 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1531 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1532 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1533 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1534#ifndef VBOX
1535 return -EINVAL;
1536#else
1537 return VERR_INVALID_PARAMETER;
1538#endif
1539 }
1540 wp = qemu_malloc(sizeof(*wp));
1541
1542 wp->vaddr = addr;
1543 wp->len_mask = len_mask;
1544 wp->flags = flags;
1545
1546 /* keep all GDB-injected watchpoints in front */
1547 if (flags & BP_GDB)
1548 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1549 else
1550 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1551
1552 tlb_flush_page(env, addr);
1553
1554 if (watchpoint)
1555 *watchpoint = wp;
1556 return 0;
1557}
1558
1559/* Remove a specific watchpoint. */
1560int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1561 int flags)
1562{
1563 target_ulong len_mask = ~(len - 1);
1564 CPUWatchpoint *wp;
1565
1566 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1567 if (addr == wp->vaddr && len_mask == wp->len_mask
1568 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1569 cpu_watchpoint_remove_by_ref(env, wp);
1570 return 0;
1571 }
1572 }
1573#ifndef VBOX
1574 return -ENOENT;
1575#else
1576 return VERR_NOT_FOUND;
1577#endif
1578}
1579
1580/* Remove a specific watchpoint by reference. */
1581void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1582{
1583 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1584
1585 tlb_flush_page(env, watchpoint->vaddr);
1586
1587 qemu_free(watchpoint);
1588}
1589
1590/* Remove all matching watchpoints. */
1591void cpu_watchpoint_remove_all(CPUState *env, int mask)
1592{
1593 CPUWatchpoint *wp, *next;
1594
1595 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1596 if (wp->flags & mask)
1597 cpu_watchpoint_remove_by_ref(env, wp);
1598 }
1599}
1600
1601/* Add a breakpoint. */
1602int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1603 CPUBreakpoint **breakpoint)
1604{
1605#if defined(TARGET_HAS_ICE)
1606 CPUBreakpoint *bp;
1607
1608 bp = qemu_malloc(sizeof(*bp));
1609
1610 bp->pc = pc;
1611 bp->flags = flags;
1612
1613 /* keep all GDB-injected breakpoints in front */
1614 if (flags & BP_GDB)
1615 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1616 else
1617 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1618
1619 breakpoint_invalidate(env, pc);
1620
1621 if (breakpoint)
1622 *breakpoint = bp;
1623 return 0;
1624#else
1625 return -ENOSYS;
1626#endif
1627}
1628
1629/* Remove a specific breakpoint. */
1630int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1631{
1632#if defined(TARGET_HAS_ICE)
1633 CPUBreakpoint *bp;
1634
1635 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1636 if (bp->pc == pc && bp->flags == flags) {
1637 cpu_breakpoint_remove_by_ref(env, bp);
1638 return 0;
1639 }
1640 }
1641# ifndef VBOX
1642 return -ENOENT;
1643# else
1644 return VERR_NOT_FOUND;
1645# endif
1646#else
1647 return -ENOSYS;
1648#endif
1649}
1650
1651/* Remove a specific breakpoint by reference. */
1652void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1653{
1654#if defined(TARGET_HAS_ICE)
1655 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1656
1657 breakpoint_invalidate(env, breakpoint->pc);
1658
1659 qemu_free(breakpoint);
1660#endif
1661}
1662
1663/* Remove all matching breakpoints. */
1664void cpu_breakpoint_remove_all(CPUState *env, int mask)
1665{
1666#if defined(TARGET_HAS_ICE)
1667 CPUBreakpoint *bp, *next;
1668
1669 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1670 if (bp->flags & mask)
1671 cpu_breakpoint_remove_by_ref(env, bp);
1672 }
1673#endif
1674}
1675
1676/* enable or disable single step mode. EXCP_DEBUG is returned by the
1677 CPU loop after each instruction */
1678void cpu_single_step(CPUState *env, int enabled)
1679{
1680#if defined(TARGET_HAS_ICE)
1681 if (env->singlestep_enabled != enabled) {
1682 env->singlestep_enabled = enabled;
1683 /* must flush all the translated code to avoid inconsistancies */
1684 /* XXX: only flush what is necessary */
1685 tb_flush(env);
1686 }
1687#endif
1688}
1689
1690#ifndef VBOX
1691/* enable or disable low levels log */
1692void cpu_set_log(int log_flags)
1693{
1694 loglevel = log_flags;
1695 if (loglevel && !logfile) {
1696 logfile = fopen(logfilename, log_append ? "a" : "w");
1697 if (!logfile) {
1698 perror(logfilename);
1699 _exit(1);
1700 }
1701#if !defined(CONFIG_SOFTMMU)
1702 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1703 {
1704 static char logfile_buf[4096];
1705 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1706 }
1707#else
1708 setvbuf(logfile, NULL, _IOLBF, 0);
1709#endif
1710 log_append = 1;
1711 }
1712 if (!loglevel && logfile) {
1713 fclose(logfile);
1714 logfile = NULL;
1715 }
1716}
1717
1718void cpu_set_log_filename(const char *filename)
1719{
1720 logfilename = strdup(filename);
1721 if (logfile) {
1722 fclose(logfile);
1723 logfile = NULL;
1724 }
1725 cpu_set_log(loglevel);
1726}
1727#endif /* !VBOX */
1728
1729/* mask must never be zero, except for A20 change call */
1730void cpu_interrupt(CPUState *env, int mask)
1731{
1732#if !defined(USE_NPTL)
1733 TranslationBlock *tb;
1734 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1735#endif
1736 int old_mask;
1737
1738 if (mask & CPU_INTERRUPT_EXIT) {
1739 env->exit_request = 1;
1740 mask &= ~CPU_INTERRUPT_EXIT;
1741 }
1742
1743 old_mask = env->interrupt_request;
1744#ifdef VBOX
1745 VM_ASSERT_EMT(env->pVM);
1746 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1747#else /* !VBOX */
1748 /* FIXME: This is probably not threadsafe. A different thread could
1749 be in the middle of a read-modify-write operation. */
1750 env->interrupt_request |= mask;
1751#endif /* !VBOX */
1752#if defined(USE_NPTL)
1753 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1754 problem and hope the cpu will stop of its own accord. For userspace
1755 emulation this often isn't actually as bad as it sounds. Often
1756 signals are used primarily to interrupt blocking syscalls. */
1757#else
1758 if (use_icount) {
1759 env->icount_decr.u16.high = 0xffff;
1760#ifndef CONFIG_USER_ONLY
1761 if (!can_do_io(env)
1762 && (mask & ~old_mask) != 0) {
1763 cpu_abort(env, "Raised interrupt while not in I/O function");
1764 }
1765#endif
1766 } else {
1767 tb = env->current_tb;
1768 /* if the cpu is currently executing code, we must unlink it and
1769 all the potentially executing TB */
1770 if (tb && !testandset(&interrupt_lock)) {
1771 env->current_tb = NULL;
1772 tb_reset_jump_recursive(tb);
1773 resetlock(&interrupt_lock);
1774 }
1775 }
1776#endif
1777}
1778
1779void cpu_reset_interrupt(CPUState *env, int mask)
1780{
1781#ifdef VBOX
1782 /*
1783 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1784 * for future changes!
1785 */
1786 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1787#else /* !VBOX */
1788 env->interrupt_request &= ~mask;
1789#endif /* !VBOX */
1790}
1791
1792#ifndef VBOX
1793const CPULogItem cpu_log_items[] = {
1794 { CPU_LOG_TB_OUT_ASM, "out_asm",
1795 "show generated host assembly code for each compiled TB" },
1796 { CPU_LOG_TB_IN_ASM, "in_asm",
1797 "show target assembly code for each compiled TB" },
1798 { CPU_LOG_TB_OP, "op",
1799 "show micro ops for each compiled TB" },
1800 { CPU_LOG_TB_OP_OPT, "op_opt",
1801 "show micro ops "
1802#ifdef TARGET_I386
1803 "before eflags optimization and "
1804#endif
1805 "after liveness analysis" },
1806 { CPU_LOG_INT, "int",
1807 "show interrupts/exceptions in short format" },
1808 { CPU_LOG_EXEC, "exec",
1809 "show trace before each executed TB (lots of logs)" },
1810 { CPU_LOG_TB_CPU, "cpu",
1811 "show CPU state before block translation" },
1812#ifdef TARGET_I386
1813 { CPU_LOG_PCALL, "pcall",
1814 "show protected mode far calls/returns/exceptions" },
1815 { CPU_LOG_RESET, "cpu_reset",
1816 "show CPU state before CPU resets" },
1817#endif
1818#ifdef DEBUG_IOPORT
1819 { CPU_LOG_IOPORT, "ioport",
1820 "show all i/o ports accesses" },
1821#endif
1822 { 0, NULL, NULL },
1823};
1824
1825static int cmp1(const char *s1, int n, const char *s2)
1826{
1827 if (strlen(s2) != n)
1828 return 0;
1829 return memcmp(s1, s2, n) == 0;
1830}
1831
1832/* takes a comma separated list of log masks. Return 0 if error. */
1833int cpu_str_to_log_mask(const char *str)
1834{
1835 const CPULogItem *item;
1836 int mask;
1837 const char *p, *p1;
1838
1839 p = str;
1840 mask = 0;
1841 for(;;) {
1842 p1 = strchr(p, ',');
1843 if (!p1)
1844 p1 = p + strlen(p);
1845 if(cmp1(p,p1-p,"all")) {
1846 for(item = cpu_log_items; item->mask != 0; item++) {
1847 mask |= item->mask;
1848 }
1849 } else {
1850 for(item = cpu_log_items; item->mask != 0; item++) {
1851 if (cmp1(p, p1 - p, item->name))
1852 goto found;
1853 }
1854 return 0;
1855 }
1856 found:
1857 mask |= item->mask;
1858 if (*p1 != ',')
1859 break;
1860 p = p1 + 1;
1861 }
1862 return mask;
1863}
1864#endif /* !VBOX */
1865
1866#ifndef VBOX /* VBOX: we have our own routine. */
1867void cpu_abort(CPUState *env, const char *fmt, ...)
1868{
1869 va_list ap;
1870 va_list ap2;
1871
1872 va_start(ap, fmt);
1873 va_copy(ap2, ap);
1874 fprintf(stderr, "qemu: fatal: ");
1875 vfprintf(stderr, fmt, ap);
1876 fprintf(stderr, "\n");
1877#ifdef TARGET_I386
1878 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1879#else
1880 cpu_dump_state(env, stderr, fprintf, 0);
1881#endif
1882 if (qemu_log_enabled()) {
1883 qemu_log("qemu: fatal: ");
1884 qemu_log_vprintf(fmt, ap2);
1885 qemu_log("\n");
1886#ifdef TARGET_I386
1887 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1888#else
1889 log_cpu_state(env, 0);
1890#endif
1891 qemu_log_flush();
1892 qemu_log_close();
1893 }
1894 va_end(ap2);
1895 va_end(ap);
1896 abort();
1897}
1898#endif /* !VBOX */
1899
1900#ifndef VBOX
1901CPUState *cpu_copy(CPUState *env)
1902{
1903 CPUState *new_env = cpu_init(env->cpu_model_str);
1904 CPUState *next_cpu = new_env->next_cpu;
1905 int cpu_index = new_env->cpu_index;
1906#if defined(TARGET_HAS_ICE)
1907 CPUBreakpoint *bp;
1908 CPUWatchpoint *wp;
1909#endif
1910
1911 memcpy(new_env, env, sizeof(CPUState));
1912
1913 /* Preserve chaining and index. */
1914 new_env->next_cpu = next_cpu;
1915 new_env->cpu_index = cpu_index;
1916
1917 /* Clone all break/watchpoints.
1918 Note: Once we support ptrace with hw-debug register access, make sure
1919 BP_CPU break/watchpoints are handled correctly on clone. */
1920 TAILQ_INIT(&env->breakpoints);
1921 TAILQ_INIT(&env->watchpoints);
1922#if defined(TARGET_HAS_ICE)
1923 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1924 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1925 }
1926 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1927 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1928 wp->flags, NULL);
1929 }
1930#endif
1931
1932 return new_env;
1933}
1934#endif /* !VBOX */
1935
1936#if !defined(CONFIG_USER_ONLY)
1937
1938static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1939{
1940 unsigned int i;
1941
1942 /* Discard jump cache entries for any tb which might potentially
1943 overlap the flushed page. */
1944 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1945 memset (&env->tb_jmp_cache[i], 0,
1946 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1947
1948 i = tb_jmp_cache_hash_page(addr);
1949 memset (&env->tb_jmp_cache[i], 0,
1950 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1951
1952#ifdef VBOX
1953 /* inform raw mode about TLB page flush */
1954 remR3FlushPage(env, addr);
1955#endif /* VBOX */
1956}
1957
1958#ifdef VBOX
1959static CPUTLBEntry s_cputlb_empty_entry = {
1960 .addr_read = -1,
1961 .addr_write = -1,
1962 .addr_code = -1,
1963 .addend = -1,
1964};
1965#endif /* VBOX */
1966
1967/* NOTE: if flush_global is true, also flush global entries (not
1968 implemented yet) */
1969void tlb_flush(CPUState *env, int flush_global)
1970{
1971 int i;
1972
1973#if defined(DEBUG_TLB)
1974 printf("tlb_flush:\n");
1975#endif
1976 /* must reset current TB so that interrupts cannot modify the
1977 links while we are modifying them */
1978 env->current_tb = NULL;
1979
1980 for(i = 0; i < CPU_TLB_SIZE; i++) {
1981#ifdef VBOX
1982 int mmu_idx;
1983 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1984 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1985 }
1986#else /* !VBOX */
1987 env->tlb_table[0][i].addr_read = -1;
1988 env->tlb_table[0][i].addr_write = -1;
1989 env->tlb_table[0][i].addr_code = -1;
1990 env->tlb_table[1][i].addr_read = -1;
1991 env->tlb_table[1][i].addr_write = -1;
1992 env->tlb_table[1][i].addr_code = -1;
1993#if (NB_MMU_MODES >= 3)
1994 env->tlb_table[2][i].addr_read = -1;
1995 env->tlb_table[2][i].addr_write = -1;
1996 env->tlb_table[2][i].addr_code = -1;
1997#if (NB_MMU_MODES == 4)
1998 env->tlb_table[3][i].addr_read = -1;
1999 env->tlb_table[3][i].addr_write = -1;
2000 env->tlb_table[3][i].addr_code = -1;
2001#endif
2002#endif
2003#endif /* !VBOX */
2004 }
2005
2006 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
2007
2008#ifdef VBOX
2009 /* inform raw mode about TLB flush */
2010 remR3FlushTLB(env, flush_global);
2011#endif
2012#ifdef USE_KQEMU
2013 if (env->kqemu_enabled) {
2014 kqemu_flush(env, flush_global);
2015 }
2016#endif
2017 tlb_flush_count++;
2018}
2019
2020static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
2021{
2022 if (addr == (tlb_entry->addr_read &
2023 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2024 addr == (tlb_entry->addr_write &
2025 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2026 addr == (tlb_entry->addr_code &
2027 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
2028 tlb_entry->addr_read = -1;
2029 tlb_entry->addr_write = -1;
2030 tlb_entry->addr_code = -1;
2031 }
2032}
2033
2034void tlb_flush_page(CPUState *env, target_ulong addr)
2035{
2036 int i;
2037
2038#if defined(DEBUG_TLB)
2039 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
2040#endif
2041 /* must reset current TB so that interrupts cannot modify the
2042 links while we are modifying them */
2043 env->current_tb = NULL;
2044
2045 addr &= TARGET_PAGE_MASK;
2046 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2047 tlb_flush_entry(&env->tlb_table[0][i], addr);
2048 tlb_flush_entry(&env->tlb_table[1][i], addr);
2049#if (NB_MMU_MODES >= 3)
2050 tlb_flush_entry(&env->tlb_table[2][i], addr);
2051#if (NB_MMU_MODES == 4)
2052 tlb_flush_entry(&env->tlb_table[3][i], addr);
2053#endif
2054#endif
2055
2056 tlb_flush_jmp_cache(env, addr);
2057
2058#ifdef USE_KQEMU
2059 if (env->kqemu_enabled) {
2060 kqemu_flush_page(env, addr);
2061 }
2062#endif
2063}
2064
2065/* update the TLBs so that writes to code in the virtual page 'addr'
2066 can be detected */
2067static void tlb_protect_code(ram_addr_t ram_addr)
2068{
2069 cpu_physical_memory_reset_dirty(ram_addr,
2070 ram_addr + TARGET_PAGE_SIZE,
2071 CODE_DIRTY_FLAG);
2072#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
2073 /** @todo Retest this? This function has changed... */
2074 remR3ProtectCode(cpu_single_env, ram_addr);
2075#endif
2076}
2077
2078/* update the TLB so that writes in physical page 'phys_addr' are no longer
2079 tested for self modifying code */
2080static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2081 target_ulong vaddr)
2082{
2083#ifdef VBOX
2084 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2085#endif
2086 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
2087}
2088
2089static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2090 unsigned long start, unsigned long length)
2091{
2092 unsigned long addr;
2093
2094#ifdef VBOX
2095 if (start & 3)
2096 return;
2097#endif
2098 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2099 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2100 if ((addr - start) < length) {
2101 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2102 }
2103 }
2104}
2105
2106void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2107 int dirty_flags)
2108{
2109 CPUState *env;
2110 unsigned long length, start1;
2111 int i, mask, len;
2112 uint8_t *p;
2113
2114 start &= TARGET_PAGE_MASK;
2115 end = TARGET_PAGE_ALIGN(end);
2116
2117 length = end - start;
2118 if (length == 0)
2119 return;
2120 len = length >> TARGET_PAGE_BITS;
2121#ifdef USE_KQEMU
2122 /* XXX: should not depend on cpu context */
2123 env = first_cpu;
2124 if (env->kqemu_enabled) {
2125 ram_addr_t addr;
2126 addr = start;
2127 for(i = 0; i < len; i++) {
2128 kqemu_set_notdirty(env, addr);
2129 addr += TARGET_PAGE_SIZE;
2130 }
2131 }
2132#endif
2133 mask = ~dirty_flags;
2134 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2135#ifdef VBOX
2136 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2137#endif
2138 for(i = 0; i < len; i++)
2139 p[i] &= mask;
2140
2141 /* we modify the TLB cache so that the dirty bit will be set again
2142 when accessing the range */
2143#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2144 start1 = start;
2145#elif !defined(VBOX)
2146 start1 = start + (unsigned long)phys_ram_base;
2147#else
2148 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
2149#endif
2150 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2151 for(i = 0; i < CPU_TLB_SIZE; i++)
2152 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2153 for(i = 0; i < CPU_TLB_SIZE; i++)
2154 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2155#if (NB_MMU_MODES >= 3)
2156 for(i = 0; i < CPU_TLB_SIZE; i++)
2157 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2158#if (NB_MMU_MODES == 4)
2159 for(i = 0; i < CPU_TLB_SIZE; i++)
2160 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2161#endif
2162#endif
2163 }
2164}
2165
2166#ifndef VBOX
2167int cpu_physical_memory_set_dirty_tracking(int enable)
2168{
2169 in_migration = enable;
2170 return 0;
2171}
2172
2173int cpu_physical_memory_get_dirty_tracking(void)
2174{
2175 return in_migration;
2176}
2177#endif /* !VBOX */
2178
2179void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
2180{
2181 if (kvm_enabled())
2182 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
2183}
2184
2185#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2186DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2187#else
2188static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2189#endif
2190{
2191 ram_addr_t ram_addr;
2192
2193 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2194 /* RAM case */
2195#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2196 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2197#elif !defined(VBOX)
2198 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2199 tlb_entry->addend - (unsigned long)phys_ram_base;
2200#else
2201 Assert(phys_addend != -1);
2202 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2203#endif
2204 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2205 tlb_entry->addr_write |= TLB_NOTDIRTY;
2206 }
2207 }
2208}
2209
2210/* update the TLB according to the current state of the dirty bits */
2211void cpu_tlb_update_dirty(CPUState *env)
2212{
2213 int i;
2214#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2215 for(i = 0; i < CPU_TLB_SIZE; i++)
2216 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
2217 for(i = 0; i < CPU_TLB_SIZE; i++)
2218 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
2219# if (NB_MMU_MODES >= 3)
2220 for(i = 0; i < CPU_TLB_SIZE; i++)
2221 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
2222# if (NB_MMU_MODES == 4)
2223 for(i = 0; i < CPU_TLB_SIZE; i++)
2224 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
2225# endif
2226# endif
2227#else /* VBOX */
2228 for(i = 0; i < CPU_TLB_SIZE; i++)
2229 tlb_update_dirty(&env->tlb_table[0][i]);
2230 for(i = 0; i < CPU_TLB_SIZE; i++)
2231 tlb_update_dirty(&env->tlb_table[1][i]);
2232#if (NB_MMU_MODES >= 3)
2233 for(i = 0; i < CPU_TLB_SIZE; i++)
2234 tlb_update_dirty(&env->tlb_table[2][i]);
2235#if (NB_MMU_MODES == 4)
2236 for(i = 0; i < CPU_TLB_SIZE; i++)
2237 tlb_update_dirty(&env->tlb_table[3][i]);
2238#endif
2239#endif
2240#endif /* VBOX */
2241}
2242
2243static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2244{
2245 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2246 tlb_entry->addr_write = vaddr;
2247}
2248
2249/* update the TLB corresponding to virtual page vaddr
2250 so that it is no longer dirty */
2251static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2252{
2253 int i;
2254
2255 vaddr &= TARGET_PAGE_MASK;
2256 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2257 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
2258 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
2259#if (NB_MMU_MODES >= 3)
2260 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2261#if (NB_MMU_MODES == 4)
2262 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2263#endif
2264#endif
2265}
2266
2267/* add a new TLB entry. At most one entry for a given virtual address
2268 is permitted. Return 0 if OK or 2 if the page could not be mapped
2269 (can only happen in non SOFTMMU mode for I/O pages or pages
2270 conflicting with the host address space). */
2271int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2272 target_phys_addr_t paddr, int prot,
2273 int mmu_idx, int is_softmmu)
2274{
2275 PhysPageDesc *p;
2276 unsigned long pd;
2277 unsigned int index;
2278 target_ulong address;
2279 target_ulong code_address;
2280 target_phys_addr_t addend;
2281 int ret;
2282 CPUTLBEntry *te;
2283 CPUWatchpoint *wp;
2284 target_phys_addr_t iotlb;
2285#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2286 int read_mods = 0, write_mods = 0, code_mods = 0;
2287#endif
2288
2289 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2290 if (!p) {
2291 pd = IO_MEM_UNASSIGNED;
2292 } else {
2293 pd = p->phys_offset;
2294 }
2295#if defined(DEBUG_TLB)
2296 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2297 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2298#endif
2299
2300 ret = 0;
2301 address = vaddr;
2302 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2303 /* IO memory case (romd handled later) */
2304 address |= TLB_MMIO;
2305 }
2306#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2307 addend = pd & TARGET_PAGE_MASK;
2308#elif !defined(VBOX)
2309 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2310#else
2311 /** @todo this is racing the phys_page_find call above since it may register
2312 * a new chunk of memory... */
2313 addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
2314 pd & TARGET_PAGE_MASK,
2315 !!(prot & PAGE_WRITE));
2316#endif
2317
2318 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2319 /* Normal RAM. */
2320 iotlb = pd & TARGET_PAGE_MASK;
2321 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2322 iotlb |= IO_MEM_NOTDIRTY;
2323 else
2324 iotlb |= IO_MEM_ROM;
2325 } else {
2326 /* IO handlers are currently passed a phsical address.
2327 It would be nice to pass an offset from the base address
2328 of that region. This would avoid having to special case RAM,
2329 and avoid full address decoding in every device.
2330 We can't use the high bits of pd for this because
2331 IO_MEM_ROMD uses these as a ram address. */
2332 iotlb = (pd & ~TARGET_PAGE_MASK);
2333#ifndef VBOX
2334 if (p) {
2335#else
2336 if ( p->phys_offset
2337 && (pd & ~TARGET_PAGE_MASK) != env->pVM->rem.s.iMMIOMemType
2338 && (pd & ~TARGET_PAGE_MASK) != env->pVM->rem.s.iHandlerMemType) {
2339#endif
2340 iotlb += p->region_offset;
2341 } else {
2342 iotlb += paddr;
2343 }
2344 }
2345
2346 code_address = address;
2347
2348#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2349 if (addend & 0x3)
2350 {
2351 if (addend & 0x2)
2352 {
2353 /* catch write */
2354 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2355 write_mods |= TLB_MMIO;
2356 }
2357 else if (addend & 0x1)
2358 {
2359 /* catch all */
2360 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2361 {
2362 read_mods |= TLB_MMIO;
2363 write_mods |= TLB_MMIO;
2364 code_mods |= TLB_MMIO;
2365 }
2366 }
2367 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2368 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2369 addend &= ~(target_ulong)0x3;
2370 }
2371#endif
2372
2373 /* Make accesses to pages with watchpoints go via the
2374 watchpoint trap routines. */
2375 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2376 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2377 iotlb = io_mem_watch + paddr;
2378 /* TODO: The memory case can be optimized by not trapping
2379 reads of pages with a write breakpoint. */
2380 address |= TLB_MMIO;
2381 }
2382 }
2383
2384 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2385 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2386 te = &env->tlb_table[mmu_idx][index];
2387 te->addend = addend - vaddr;
2388 if (prot & PAGE_READ) {
2389 te->addr_read = address;
2390 } else {
2391 te->addr_read = -1;
2392 }
2393
2394 if (prot & PAGE_EXEC) {
2395 te->addr_code = code_address;
2396 } else {
2397 te->addr_code = -1;
2398 }
2399 if (prot & PAGE_WRITE) {
2400 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2401 (pd & IO_MEM_ROMD)) {
2402 /* Write access calls the I/O callback. */
2403 te->addr_write = address | TLB_MMIO;
2404 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2405 !cpu_physical_memory_is_dirty(pd)) {
2406 te->addr_write = address | TLB_NOTDIRTY;
2407 } else {
2408 te->addr_write = address;
2409 }
2410 } else {
2411 te->addr_write = -1;
2412 }
2413
2414#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2415 if (prot & PAGE_READ)
2416 te->addr_read |= read_mods;
2417 if (prot & PAGE_EXEC)
2418 te->addr_code |= code_mods;
2419 if (prot & PAGE_WRITE)
2420 te->addr_write |= write_mods;
2421
2422 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2423#endif
2424
2425#ifdef VBOX
2426 /* inform raw mode about TLB page change */
2427 remR3FlushPage(env, vaddr);
2428#endif
2429 return ret;
2430}
2431
2432#else
2433
2434void tlb_flush(CPUState *env, int flush_global)
2435{
2436}
2437
2438void tlb_flush_page(CPUState *env, target_ulong addr)
2439{
2440}
2441
2442int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2443 target_phys_addr_t paddr, int prot,
2444 int mmu_idx, int is_softmmu)
2445{
2446 return 0;
2447}
2448
2449#ifndef VBOX
2450/* dump memory mappings */
2451void page_dump(FILE *f)
2452{
2453 unsigned long start, end;
2454 int i, j, prot, prot1;
2455 PageDesc *p;
2456
2457 fprintf(f, "%-8s %-8s %-8s %s\n",
2458 "start", "end", "size", "prot");
2459 start = -1;
2460 end = -1;
2461 prot = 0;
2462 for(i = 0; i <= L1_SIZE; i++) {
2463 if (i < L1_SIZE)
2464 p = l1_map[i];
2465 else
2466 p = NULL;
2467 for(j = 0;j < L2_SIZE; j++) {
2468 if (!p)
2469 prot1 = 0;
2470 else
2471 prot1 = p[j].flags;
2472 if (prot1 != prot) {
2473 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2474 if (start != -1) {
2475 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2476 start, end, end - start,
2477 prot & PAGE_READ ? 'r' : '-',
2478 prot & PAGE_WRITE ? 'w' : '-',
2479 prot & PAGE_EXEC ? 'x' : '-');
2480 }
2481 if (prot1 != 0)
2482 start = end;
2483 else
2484 start = -1;
2485 prot = prot1;
2486 }
2487 if (!p)
2488 break;
2489 }
2490 }
2491}
2492#endif /* !VBOX */
2493
2494int page_get_flags(target_ulong address)
2495{
2496 PageDesc *p;
2497
2498 p = page_find(address >> TARGET_PAGE_BITS);
2499 if (!p)
2500 return 0;
2501 return p->flags;
2502}
2503
2504/* modify the flags of a page and invalidate the code if
2505 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2506 depending on PAGE_WRITE */
2507void page_set_flags(target_ulong start, target_ulong end, int flags)
2508{
2509 PageDesc *p;
2510 target_ulong addr;
2511
2512 /* mmap_lock should already be held. */
2513 start = start & TARGET_PAGE_MASK;
2514 end = TARGET_PAGE_ALIGN(end);
2515 if (flags & PAGE_WRITE)
2516 flags |= PAGE_WRITE_ORG;
2517#ifdef VBOX
2518 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2519#endif
2520 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2521 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2522 /* We may be called for host regions that are outside guest
2523 address space. */
2524 if (!p)
2525 return;
2526 /* if the write protection is set, then we invalidate the code
2527 inside */
2528 if (!(p->flags & PAGE_WRITE) &&
2529 (flags & PAGE_WRITE) &&
2530 p->first_tb) {
2531 tb_invalidate_phys_page(addr, 0, NULL);
2532 }
2533 p->flags = flags;
2534 }
2535}
2536
2537int page_check_range(target_ulong start, target_ulong len, int flags)
2538{
2539 PageDesc *p;
2540 target_ulong end;
2541 target_ulong addr;
2542
2543 if (start + len < start)
2544 /* we've wrapped around */
2545 return -1;
2546
2547 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2548 start = start & TARGET_PAGE_MASK;
2549
2550 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2551 p = page_find(addr >> TARGET_PAGE_BITS);
2552 if( !p )
2553 return -1;
2554 if( !(p->flags & PAGE_VALID) )
2555 return -1;
2556
2557 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2558 return -1;
2559 if (flags & PAGE_WRITE) {
2560 if (!(p->flags & PAGE_WRITE_ORG))
2561 return -1;
2562 /* unprotect the page if it was put read-only because it
2563 contains translated code */
2564 if (!(p->flags & PAGE_WRITE)) {
2565 if (!page_unprotect(addr, 0, NULL))
2566 return -1;
2567 }
2568 return 0;
2569 }
2570 }
2571 return 0;
2572}
2573
2574/* called from signal handler: invalidate the code and unprotect the
2575 page. Return TRUE if the fault was succesfully handled. */
2576int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2577{
2578 unsigned int page_index, prot, pindex;
2579 PageDesc *p, *p1;
2580 target_ulong host_start, host_end, addr;
2581
2582 /* Technically this isn't safe inside a signal handler. However we
2583 know this only ever happens in a synchronous SEGV handler, so in
2584 practice it seems to be ok. */
2585 mmap_lock();
2586
2587 host_start = address & qemu_host_page_mask;
2588 page_index = host_start >> TARGET_PAGE_BITS;
2589 p1 = page_find(page_index);
2590 if (!p1) {
2591 mmap_unlock();
2592 return 0;
2593 }
2594 host_end = host_start + qemu_host_page_size;
2595 p = p1;
2596 prot = 0;
2597 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2598 prot |= p->flags;
2599 p++;
2600 }
2601 /* if the page was really writable, then we change its
2602 protection back to writable */
2603 if (prot & PAGE_WRITE_ORG) {
2604 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2605 if (!(p1[pindex].flags & PAGE_WRITE)) {
2606 mprotect((void *)g2h(host_start), qemu_host_page_size,
2607 (prot & PAGE_BITS) | PAGE_WRITE);
2608 p1[pindex].flags |= PAGE_WRITE;
2609 /* and since the content will be modified, we must invalidate
2610 the corresponding translated code. */
2611 tb_invalidate_phys_page(address, pc, puc);
2612#ifdef DEBUG_TB_CHECK
2613 tb_invalidate_check(address);
2614#endif
2615 mmap_unlock();
2616 return 1;
2617 }
2618 }
2619 mmap_unlock();
2620 return 0;
2621}
2622
2623static inline void tlb_set_dirty(CPUState *env,
2624 unsigned long addr, target_ulong vaddr)
2625{
2626}
2627#endif /* defined(CONFIG_USER_ONLY) */
2628
2629#if !defined(CONFIG_USER_ONLY)
2630
2631static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2632 ram_addr_t memory, ram_addr_t region_offset);
2633static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2634 ram_addr_t orig_memory, ram_addr_t region_offset);
2635#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2636 need_subpage) \
2637 do { \
2638 if (addr > start_addr) \
2639 start_addr2 = 0; \
2640 else { \
2641 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2642 if (start_addr2 > 0) \
2643 need_subpage = 1; \
2644 } \
2645 \
2646 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2647 end_addr2 = TARGET_PAGE_SIZE - 1; \
2648 else { \
2649 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2650 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2651 need_subpage = 1; \
2652 } \
2653 } while (0)
2654
2655/* register physical memory. 'size' must be a multiple of the target
2656 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2657 io memory page. The address used when calling the IO function is
2658 the offset from the start of the region, plus region_offset. Both
2659 start_region and regon_offset are rounded down to a page boundary
2660 before calculating this offset. This should not be a problem unless
2661 the low bits of start_addr and region_offset differ. */
2662void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2663 ram_addr_t size,
2664 ram_addr_t phys_offset,
2665 ram_addr_t region_offset)
2666{
2667 target_phys_addr_t addr, end_addr;
2668 PhysPageDesc *p;
2669 CPUState *env;
2670 ram_addr_t orig_size = size;
2671 void *subpage;
2672
2673#ifdef USE_KQEMU
2674 /* XXX: should not depend on cpu context */
2675 env = first_cpu;
2676 if (env->kqemu_enabled) {
2677 kqemu_set_phys_mem(start_addr, size, phys_offset);
2678 }
2679#endif
2680 if (kvm_enabled())
2681 kvm_set_phys_mem(start_addr, size, phys_offset);
2682
2683 if (phys_offset == IO_MEM_UNASSIGNED) {
2684 region_offset = start_addr;
2685 }
2686 region_offset &= TARGET_PAGE_MASK;
2687 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2688 end_addr = start_addr + (target_phys_addr_t)size;
2689 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2690 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2691 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2692 ram_addr_t orig_memory = p->phys_offset;
2693 target_phys_addr_t start_addr2, end_addr2;
2694 int need_subpage = 0;
2695
2696 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2697 need_subpage);
2698 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2699 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2700 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2701 &p->phys_offset, orig_memory,
2702 p->region_offset);
2703 } else {
2704 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2705 >> IO_MEM_SHIFT];
2706 }
2707 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2708 region_offset);
2709 p->region_offset = 0;
2710 } else {
2711 p->phys_offset = phys_offset;
2712 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2713 (phys_offset & IO_MEM_ROMD))
2714 phys_offset += TARGET_PAGE_SIZE;
2715 }
2716 } else {
2717 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2718 p->phys_offset = phys_offset;
2719 p->region_offset = region_offset;
2720 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2721 (phys_offset & IO_MEM_ROMD)) {
2722 phys_offset += TARGET_PAGE_SIZE;
2723 } else {
2724 target_phys_addr_t start_addr2, end_addr2;
2725 int need_subpage = 0;
2726
2727 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2728 end_addr2, need_subpage);
2729
2730 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2731 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2732 &p->phys_offset, IO_MEM_UNASSIGNED,
2733 addr & TARGET_PAGE_MASK);
2734 subpage_register(subpage, start_addr2, end_addr2,
2735 phys_offset, region_offset);
2736 p->region_offset = 0;
2737 }
2738 }
2739 }
2740 region_offset += TARGET_PAGE_SIZE;
2741 }
2742
2743 /* since each CPU stores ram addresses in its TLB cache, we must
2744 reset the modified entries */
2745 /* XXX: slow ! */
2746 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2747 tlb_flush(env, 1);
2748 }
2749}
2750
2751/* XXX: temporary until new memory mapping API */
2752ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2753{
2754 PhysPageDesc *p;
2755
2756 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2757 if (!p)
2758 return IO_MEM_UNASSIGNED;
2759 return p->phys_offset;
2760}
2761
2762#ifndef VBOX
2763void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2764{
2765 if (kvm_enabled())
2766 kvm_coalesce_mmio_region(addr, size);
2767}
2768
2769void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2770{
2771 if (kvm_enabled())
2772 kvm_uncoalesce_mmio_region(addr, size);
2773}
2774
2775/* XXX: better than nothing */
2776ram_addr_t qemu_ram_alloc(ram_addr_t size)
2777{
2778 ram_addr_t addr;
2779 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2780 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2781 (uint64_t)size, (uint64_t)phys_ram_size);
2782 abort();
2783 }
2784 addr = phys_ram_alloc_offset;
2785 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2786 return addr;
2787}
2788
2789void qemu_ram_free(ram_addr_t addr)
2790{
2791}
2792#endif /* !VBOX */
2793
2794static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2795{
2796#ifdef DEBUG_UNASSIGNED
2797 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2798#endif
2799#if defined(TARGET_SPARC)
2800 do_unassigned_access(addr, 0, 0, 0, 1);
2801#endif
2802 return 0;
2803}
2804
2805static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2806{
2807#ifdef DEBUG_UNASSIGNED
2808 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2809#endif
2810#if defined(TARGET_SPARC)
2811 do_unassigned_access(addr, 0, 0, 0, 2);
2812#endif
2813 return 0;
2814}
2815
2816static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2817{
2818#ifdef DEBUG_UNASSIGNED
2819 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2820#endif
2821#if defined(TARGET_SPARC)
2822 do_unassigned_access(addr, 0, 0, 0, 4);
2823#endif
2824 return 0;
2825}
2826
2827static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2828{
2829#ifdef DEBUG_UNASSIGNED
2830 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2831#endif
2832#if defined(TARGET_SPARC)
2833 do_unassigned_access(addr, 1, 0, 0, 1);
2834#endif
2835}
2836
2837static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2838{
2839#ifdef DEBUG_UNASSIGNED
2840 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2841#endif
2842#if defined(TARGET_SPARC)
2843 do_unassigned_access(addr, 1, 0, 0, 2);
2844#endif
2845}
2846
2847static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2848{
2849#ifdef DEBUG_UNASSIGNED
2850 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2851#endif
2852#if defined(TARGET_SPARC)
2853 do_unassigned_access(addr, 1, 0, 0, 4);
2854#endif
2855}
2856
2857static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2858 unassigned_mem_readb,
2859 unassigned_mem_readw,
2860 unassigned_mem_readl,
2861};
2862
2863static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2864 unassigned_mem_writeb,
2865 unassigned_mem_writew,
2866 unassigned_mem_writel,
2867};
2868
2869static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2870 uint32_t val)
2871{
2872 int dirty_flags;
2873#ifdef VBOX
2874 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2875 dirty_flags = 0xff;
2876 else
2877#endif /* VBOX */
2878 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2879 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2880#if !defined(CONFIG_USER_ONLY)
2881 tb_invalidate_phys_page_fast(ram_addr, 1);
2882# ifdef VBOX
2883 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2884 dirty_flags = 0xff;
2885 else
2886# endif /* VBOX */
2887 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2888#endif
2889 }
2890#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2891 remR3PhysWriteU8(ram_addr, val);
2892#else
2893 stb_p(phys_ram_base + ram_addr, val);
2894#endif
2895#ifdef USE_KQEMU
2896 if (cpu_single_env->kqemu_enabled &&
2897 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2898 kqemu_modify_page(cpu_single_env, ram_addr);
2899#endif
2900 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2901#ifdef VBOX
2902 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2903#endif /* !VBOX */
2904 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2905 /* we remove the notdirty callback only if the code has been
2906 flushed */
2907 if (dirty_flags == 0xff)
2908 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2909}
2910
2911static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2912 uint32_t val)
2913{
2914 int dirty_flags;
2915#ifdef VBOX
2916 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2917 dirty_flags = 0xff;
2918 else
2919#endif /* VBOX */
2920 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2921 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2922#if !defined(CONFIG_USER_ONLY)
2923 tb_invalidate_phys_page_fast(ram_addr, 2);
2924# ifdef VBOX
2925 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2926 dirty_flags = 0xff;
2927 else
2928# endif /* VBOX */
2929 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2930#endif
2931 }
2932#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2933 remR3PhysWriteU16(ram_addr, val);
2934#else
2935 stw_p(phys_ram_base + ram_addr, val);
2936#endif
2937#ifdef USE_KQEMU
2938 if (cpu_single_env->kqemu_enabled &&
2939 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2940 kqemu_modify_page(cpu_single_env, ram_addr);
2941#endif
2942 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2943#ifdef VBOX
2944 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2945#endif
2946 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2947 /* we remove the notdirty callback only if the code has been
2948 flushed */
2949 if (dirty_flags == 0xff)
2950 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2951}
2952
2953static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2954 uint32_t val)
2955{
2956 int dirty_flags;
2957#ifdef VBOX
2958 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2959 dirty_flags = 0xff;
2960 else
2961#endif /* VBOX */
2962 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2963 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2964#if !defined(CONFIG_USER_ONLY)
2965 tb_invalidate_phys_page_fast(ram_addr, 4);
2966# ifdef VBOX
2967 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2968 dirty_flags = 0xff;
2969 else
2970# endif /* VBOX */
2971 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2972#endif
2973 }
2974#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2975 remR3PhysWriteU32(ram_addr, val);
2976#else
2977 stl_p(phys_ram_base + ram_addr, val);
2978#endif
2979#ifdef USE_KQEMU
2980 if (cpu_single_env->kqemu_enabled &&
2981 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2982 kqemu_modify_page(cpu_single_env, ram_addr);
2983#endif
2984 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2985#ifdef VBOX
2986 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2987#endif
2988 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2989 /* we remove the notdirty callback only if the code has been
2990 flushed */
2991 if (dirty_flags == 0xff)
2992 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2993}
2994
2995static CPUReadMemoryFunc *error_mem_read[3] = {
2996 NULL, /* never used */
2997 NULL, /* never used */
2998 NULL, /* never used */
2999};
3000
3001static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
3002 notdirty_mem_writeb,
3003 notdirty_mem_writew,
3004 notdirty_mem_writel,
3005};
3006
3007/* Generate a debug exception if a watchpoint has been hit. */
3008static void check_watchpoint(int offset, int len_mask, int flags)
3009{
3010 CPUState *env = cpu_single_env;
3011 target_ulong pc, cs_base;
3012 TranslationBlock *tb;
3013 target_ulong vaddr;
3014 CPUWatchpoint *wp;
3015 int cpu_flags;
3016
3017 if (env->watchpoint_hit) {
3018 /* We re-entered the check after replacing the TB. Now raise
3019 * the debug interrupt so that is will trigger after the
3020 * current instruction. */
3021 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3022 return;
3023 }
3024 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3025 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
3026 if ((vaddr == (wp->vaddr & len_mask) ||
3027 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3028 wp->flags |= BP_WATCHPOINT_HIT;
3029 if (!env->watchpoint_hit) {
3030 env->watchpoint_hit = wp;
3031 tb = tb_find_pc(env->mem_io_pc);
3032 if (!tb) {
3033 cpu_abort(env, "check_watchpoint: could not find TB for "
3034 "pc=%p", (void *)env->mem_io_pc);
3035 }
3036 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3037 tb_phys_invalidate(tb, -1);
3038 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3039 env->exception_index = EXCP_DEBUG;
3040 } else {
3041 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3042 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3043 }
3044 cpu_resume_from_signal(env, NULL);
3045 }
3046 } else {
3047 wp->flags &= ~BP_WATCHPOINT_HIT;
3048 }
3049 }
3050}
3051
3052/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3053 so these check for a hit then pass through to the normal out-of-line
3054 phys routines. */
3055static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3056{
3057 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3058 return ldub_phys(addr);
3059}
3060
3061static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3062{
3063 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3064 return lduw_phys(addr);
3065}
3066
3067static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3068{
3069 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3070 return ldl_phys(addr);
3071}
3072
3073static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3074 uint32_t val)
3075{
3076 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3077 stb_phys(addr, val);
3078}
3079
3080static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3081 uint32_t val)
3082{
3083 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3084 stw_phys(addr, val);
3085}
3086
3087static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3088 uint32_t val)
3089{
3090 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3091 stl_phys(addr, val);
3092}
3093
3094static CPUReadMemoryFunc *watch_mem_read[3] = {
3095 watch_mem_readb,
3096 watch_mem_readw,
3097 watch_mem_readl,
3098};
3099
3100static CPUWriteMemoryFunc *watch_mem_write[3] = {
3101 watch_mem_writeb,
3102 watch_mem_writew,
3103 watch_mem_writel,
3104};
3105
3106static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3107 unsigned int len)
3108{
3109 uint32_t ret;
3110 unsigned int idx;
3111
3112 idx = SUBPAGE_IDX(addr);
3113#if defined(DEBUG_SUBPAGE)
3114 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3115 mmio, len, addr, idx);
3116#endif
3117 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
3118 addr + mmio->region_offset[idx][0][len]);
3119
3120 return ret;
3121}
3122
3123static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3124 uint32_t value, unsigned int len)
3125{
3126 unsigned int idx;
3127
3128 idx = SUBPAGE_IDX(addr);
3129#if defined(DEBUG_SUBPAGE)
3130 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3131 mmio, len, addr, idx, value);
3132#endif
3133 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
3134 addr + mmio->region_offset[idx][1][len],
3135 value);
3136}
3137
3138static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3139{
3140#if defined(DEBUG_SUBPAGE)
3141 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3142#endif
3143
3144 return subpage_readlen(opaque, addr, 0);
3145}
3146
3147static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3148 uint32_t value)
3149{
3150#if defined(DEBUG_SUBPAGE)
3151 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3152#endif
3153 subpage_writelen(opaque, addr, value, 0);
3154}
3155
3156static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3157{
3158#if defined(DEBUG_SUBPAGE)
3159 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3160#endif
3161
3162 return subpage_readlen(opaque, addr, 1);
3163}
3164
3165static void subpage_writew (void *opaque, target_phys_addr_t addr,
3166 uint32_t value)
3167{
3168#if defined(DEBUG_SUBPAGE)
3169 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3170#endif
3171 subpage_writelen(opaque, addr, value, 1);
3172}
3173
3174static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3175{
3176#if defined(DEBUG_SUBPAGE)
3177 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3178#endif
3179
3180 return subpage_readlen(opaque, addr, 2);
3181}
3182
3183static void subpage_writel (void *opaque,
3184 target_phys_addr_t addr, uint32_t value)
3185{
3186#if defined(DEBUG_SUBPAGE)
3187 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3188#endif
3189 subpage_writelen(opaque, addr, value, 2);
3190}
3191
3192static CPUReadMemoryFunc *subpage_read[] = {
3193 &subpage_readb,
3194 &subpage_readw,
3195 &subpage_readl,
3196};
3197
3198static CPUWriteMemoryFunc *subpage_write[] = {
3199 &subpage_writeb,
3200 &subpage_writew,
3201 &subpage_writel,
3202};
3203
3204static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3205 ram_addr_t memory, ram_addr_t region_offset)
3206{
3207 int idx, eidx;
3208 unsigned int i;
3209
3210 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3211 return -1;
3212 idx = SUBPAGE_IDX(start);
3213 eidx = SUBPAGE_IDX(end);
3214#if defined(DEBUG_SUBPAGE)
3215 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3216 mmio, start, end, idx, eidx, memory);
3217#endif
3218 memory >>= IO_MEM_SHIFT;
3219 for (; idx <= eidx; idx++) {
3220 for (i = 0; i < 4; i++) {
3221 if (io_mem_read[memory][i]) {
3222 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3223 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3224 mmio->region_offset[idx][0][i] = region_offset;
3225 }
3226 if (io_mem_write[memory][i]) {
3227 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3228 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3229 mmio->region_offset[idx][1][i] = region_offset;
3230 }
3231 }
3232 }
3233
3234 return 0;
3235}
3236
3237static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3238 ram_addr_t orig_memory, ram_addr_t region_offset)
3239{
3240 subpage_t *mmio;
3241 int subpage_memory;
3242
3243 mmio = qemu_mallocz(sizeof(subpage_t));
3244
3245 mmio->base = base;
3246 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3247#if defined(DEBUG_SUBPAGE)
3248 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3249 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3250#endif
3251 *phys = subpage_memory | IO_MEM_SUBPAGE;
3252 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3253 region_offset);
3254
3255 return mmio;
3256}
3257
3258static int get_free_io_mem_idx(void)
3259{
3260 int i;
3261
3262 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3263 if (!io_mem_used[i]) {
3264 io_mem_used[i] = 1;
3265 return i;
3266 }
3267
3268 return -1;
3269}
3270
3271static void io_mem_init(void)
3272{
3273 int i;
3274
3275 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3276 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3277 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3278 for (i=0; i<5; i++)
3279 io_mem_used[i] = 1;
3280
3281 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3282 watch_mem_write, NULL);
3283
3284#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
3285 /* alloc dirty bits array */
3286 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3287 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
3288#endif /* !VBOX */
3289}
3290
3291/* mem_read and mem_write are arrays of functions containing the
3292 function to access byte (index 0), word (index 1) and dword (index
3293 2). Functions can be omitted with a NULL function pointer. The
3294 registered functions may be modified dynamically later.
3295 If io_index is non zero, the corresponding io zone is
3296 modified. If it is zero, a new io zone is allocated. The return
3297 value can be used with cpu_register_physical_memory(). (-1) is
3298 returned if error. */
3299int cpu_register_io_memory(int io_index,
3300 CPUReadMemoryFunc **mem_read,
3301 CPUWriteMemoryFunc **mem_write,
3302 void *opaque)
3303{
3304 int i, subwidth = 0;
3305
3306 if (io_index <= 0) {
3307 io_index = get_free_io_mem_idx();
3308 if (io_index == -1)
3309 return io_index;
3310 } else {
3311 if (io_index >= IO_MEM_NB_ENTRIES)
3312 return -1;
3313 }
3314
3315 for(i = 0;i < 3; i++) {
3316 if (!mem_read[i] || !mem_write[i])
3317 subwidth = IO_MEM_SUBWIDTH;
3318 io_mem_read[io_index][i] = mem_read[i];
3319 io_mem_write[io_index][i] = mem_write[i];
3320 }
3321 io_mem_opaque[io_index] = opaque;
3322 return (io_index << IO_MEM_SHIFT) | subwidth;
3323}
3324
3325void cpu_unregister_io_memory(int io_table_address)
3326{
3327 int i;
3328 int io_index = io_table_address >> IO_MEM_SHIFT;
3329
3330 for (i=0;i < 3; i++) {
3331 io_mem_read[io_index][i] = unassigned_mem_read[i];
3332 io_mem_write[io_index][i] = unassigned_mem_write[i];
3333 }
3334 io_mem_opaque[io_index] = NULL;
3335 io_mem_used[io_index] = 0;
3336}
3337
3338CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3339{
3340 return io_mem_write[io_index >> IO_MEM_SHIFT];
3341}
3342
3343CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3344{
3345 return io_mem_read[io_index >> IO_MEM_SHIFT];
3346}
3347
3348#endif /* !defined(CONFIG_USER_ONLY) */
3349
3350/* physical memory access (slow version, mainly for debug) */
3351#if defined(CONFIG_USER_ONLY)
3352void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3353 int len, int is_write)
3354{
3355 int l, flags;
3356 target_ulong page;
3357 void * p;
3358
3359 while (len > 0) {
3360 page = addr & TARGET_PAGE_MASK;
3361 l = (page + TARGET_PAGE_SIZE) - addr;
3362 if (l > len)
3363 l = len;
3364 flags = page_get_flags(page);
3365 if (!(flags & PAGE_VALID))
3366 return;
3367 if (is_write) {
3368 if (!(flags & PAGE_WRITE))
3369 return;
3370 /* XXX: this code should not depend on lock_user */
3371 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3372 /* FIXME - should this return an error rather than just fail? */
3373 return;
3374 memcpy(p, buf, l);
3375 unlock_user(p, addr, l);
3376 } else {
3377 if (!(flags & PAGE_READ))
3378 return;
3379 /* XXX: this code should not depend on lock_user */
3380 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3381 /* FIXME - should this return an error rather than just fail? */
3382 return;
3383 memcpy(buf, p, l);
3384 unlock_user(p, addr, 0);
3385 }
3386 len -= l;
3387 buf += l;
3388 addr += l;
3389 }
3390}
3391
3392#else
3393void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3394 int len, int is_write)
3395{
3396 int l, io_index;
3397 uint8_t *ptr;
3398 uint32_t val;
3399 target_phys_addr_t page;
3400 unsigned long pd;
3401 PhysPageDesc *p;
3402
3403 while (len > 0) {
3404 page = addr & TARGET_PAGE_MASK;
3405 l = (page + TARGET_PAGE_SIZE) - addr;
3406 if (l > len)
3407 l = len;
3408 p = phys_page_find(page >> TARGET_PAGE_BITS);
3409 if (!p) {
3410 pd = IO_MEM_UNASSIGNED;
3411 } else {
3412 pd = p->phys_offset;
3413 }
3414
3415 if (is_write) {
3416 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3417 target_phys_addr_t addr1 = addr;
3418 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3419 if (p)
3420 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3421 /* XXX: could force cpu_single_env to NULL to avoid
3422 potential bugs */
3423 if (l >= 4 && ((addr1 & 3) == 0)) {
3424 /* 32 bit write access */
3425#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3426 val = ldl_p(buf);
3427#else
3428 val = *(const uint32_t *)buf;
3429#endif
3430 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3431 l = 4;
3432 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3433 /* 16 bit write access */
3434#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3435 val = lduw_p(buf);
3436#else
3437 val = *(const uint16_t *)buf;
3438#endif
3439 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3440 l = 2;
3441 } else {
3442 /* 8 bit write access */
3443#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3444 val = ldub_p(buf);
3445#else
3446 val = *(const uint8_t *)buf;
3447#endif
3448 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3449 l = 1;
3450 }
3451 } else {
3452 unsigned long addr1;
3453 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3454 /* RAM case */
3455#ifdef VBOX
3456 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3457#else
3458 ptr = phys_ram_base + addr1;
3459 memcpy(ptr, buf, l);
3460#endif
3461 if (!cpu_physical_memory_is_dirty(addr1)) {
3462 /* invalidate code */
3463 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3464 /* set dirty bit */
3465#ifdef VBOX
3466 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3467#endif
3468 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3469 (0xff & ~CODE_DIRTY_FLAG);
3470 }
3471 }
3472 } else {
3473 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3474 !(pd & IO_MEM_ROMD)) {
3475 target_phys_addr_t addr1 = addr;
3476 /* I/O case */
3477 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3478 if (p)
3479 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3480 if (l >= 4 && ((addr1 & 3) == 0)) {
3481 /* 32 bit read access */
3482 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3483#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3484 stl_p(buf, val);
3485#else
3486 *(uint32_t *)buf = val;
3487#endif
3488 l = 4;
3489 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3490 /* 16 bit read access */
3491 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3492#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3493 stw_p(buf, val);
3494#else
3495 *(uint16_t *)buf = val;
3496#endif
3497 l = 2;
3498 } else {
3499 /* 8 bit read access */
3500 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3501#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3502 stb_p(buf, val);
3503#else
3504 *(uint8_t *)buf = val;
3505#endif
3506 l = 1;
3507 }
3508 } else {
3509 /* RAM case */
3510#ifdef VBOX
3511 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3512#else
3513 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3514 (addr & ~TARGET_PAGE_MASK);
3515 memcpy(buf, ptr, l);
3516#endif
3517 }
3518 }
3519 len -= l;
3520 buf += l;
3521 addr += l;
3522 }
3523}
3524
3525#ifndef VBOX
3526
3527/* used for ROM loading : can write in RAM and ROM */
3528void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3529 const uint8_t *buf, int len)
3530{
3531 int l;
3532 uint8_t *ptr;
3533 target_phys_addr_t page;
3534 unsigned long pd;
3535 PhysPageDesc *p;
3536
3537 while (len > 0) {
3538 page = addr & TARGET_PAGE_MASK;
3539 l = (page + TARGET_PAGE_SIZE) - addr;
3540 if (l > len)
3541 l = len;
3542 p = phys_page_find(page >> TARGET_PAGE_BITS);
3543 if (!p) {
3544 pd = IO_MEM_UNASSIGNED;
3545 } else {
3546 pd = p->phys_offset;
3547 }
3548
3549 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3550 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3551 !(pd & IO_MEM_ROMD)) {
3552 /* do nothing */
3553 } else {
3554 unsigned long addr1;
3555 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3556 /* ROM/RAM case */
3557 ptr = phys_ram_base + addr1;
3558 memcpy(ptr, buf, l);
3559 }
3560 len -= l;
3561 buf += l;
3562 addr += l;
3563 }
3564}
3565
3566typedef struct {
3567 void *buffer;
3568 target_phys_addr_t addr;
3569 target_phys_addr_t len;
3570} BounceBuffer;
3571
3572static BounceBuffer bounce;
3573
3574typedef struct MapClient {
3575 void *opaque;
3576 void (*callback)(void *opaque);
3577 LIST_ENTRY(MapClient) link;
3578} MapClient;
3579
3580static LIST_HEAD(map_client_list, MapClient) map_client_list
3581 = LIST_HEAD_INITIALIZER(map_client_list);
3582
3583void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3584{
3585 MapClient *client = qemu_malloc(sizeof(*client));
3586
3587 client->opaque = opaque;
3588 client->callback = callback;
3589 LIST_INSERT_HEAD(&map_client_list, client, link);
3590 return client;
3591}
3592
3593void cpu_unregister_map_client(void *_client)
3594{
3595 MapClient *client = (MapClient *)_client;
3596
3597 LIST_REMOVE(client, link);
3598}
3599
3600static void cpu_notify_map_clients(void)
3601{
3602 MapClient *client;
3603
3604 while (!LIST_EMPTY(&map_client_list)) {
3605 client = LIST_FIRST(&map_client_list);
3606 client->callback(client->opaque);
3607 LIST_REMOVE(client, link);
3608 }
3609}
3610
3611/* Map a physical memory region into a host virtual address.
3612 * May map a subset of the requested range, given by and returned in *plen.
3613 * May return NULL if resources needed to perform the mapping are exhausted.
3614 * Use only for reads OR writes - not for read-modify-write operations.
3615 * Use cpu_register_map_client() to know when retrying the map operation is
3616 * likely to succeed.
3617 */
3618void *cpu_physical_memory_map(target_phys_addr_t addr,
3619 target_phys_addr_t *plen,
3620 int is_write)
3621{
3622 target_phys_addr_t len = *plen;
3623 target_phys_addr_t done = 0;
3624 int l;
3625 uint8_t *ret = NULL;
3626 uint8_t *ptr;
3627 target_phys_addr_t page;
3628 unsigned long pd;
3629 PhysPageDesc *p;
3630 unsigned long addr1;
3631
3632 while (len > 0) {
3633 page = addr & TARGET_PAGE_MASK;
3634 l = (page + TARGET_PAGE_SIZE) - addr;
3635 if (l > len)
3636 l = len;
3637 p = phys_page_find(page >> TARGET_PAGE_BITS);
3638 if (!p) {
3639 pd = IO_MEM_UNASSIGNED;
3640 } else {
3641 pd = p->phys_offset;
3642 }
3643
3644 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3645 if (done || bounce.buffer) {
3646 break;
3647 }
3648 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3649 bounce.addr = addr;
3650 bounce.len = l;
3651 if (!is_write) {
3652 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3653 }
3654 ptr = bounce.buffer;
3655 } else {
3656 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3657 ptr = phys_ram_base + addr1;
3658 }
3659 if (!done) {
3660 ret = ptr;
3661 } else if (ret + done != ptr) {
3662 break;
3663 }
3664
3665 len -= l;
3666 addr += l;
3667 done += l;
3668 }
3669 *plen = done;
3670 return ret;
3671}
3672
3673/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3674 * Will also mark the memory as dirty if is_write == 1. access_len gives
3675 * the amount of memory that was actually read or written by the caller.
3676 */
3677void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3678 int is_write, target_phys_addr_t access_len)
3679{
3680 if (buffer != bounce.buffer) {
3681 if (is_write) {
3682 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3683 while (access_len) {
3684 unsigned l;
3685 l = TARGET_PAGE_SIZE;
3686 if (l > access_len)
3687 l = access_len;
3688 if (!cpu_physical_memory_is_dirty(addr1)) {
3689 /* invalidate code */
3690 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3691 /* set dirty bit */
3692 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3693 (0xff & ~CODE_DIRTY_FLAG);
3694 }
3695 addr1 += l;
3696 access_len -= l;
3697 }
3698 }
3699 return;
3700 }
3701 if (is_write) {
3702 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3703 }
3704 qemu_free(bounce.buffer);
3705 bounce.buffer = NULL;
3706 cpu_notify_map_clients();
3707}
3708
3709#endif /* !VBOX */
3710
3711/* warning: addr must be aligned */
3712uint32_t ldl_phys(target_phys_addr_t addr)
3713{
3714 int io_index;
3715 uint8_t *ptr;
3716 uint32_t val;
3717 unsigned long pd;
3718 PhysPageDesc *p;
3719
3720 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3721 if (!p) {
3722 pd = IO_MEM_UNASSIGNED;
3723 } else {
3724 pd = p->phys_offset;
3725 }
3726
3727 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3728 !(pd & IO_MEM_ROMD)) {
3729 /* I/O case */
3730 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3731 if (p)
3732 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3733 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3734 } else {
3735 /* RAM case */
3736#ifndef VBOX
3737 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3738 (addr & ~TARGET_PAGE_MASK);
3739 val = ldl_p(ptr);
3740#else
3741 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3742#endif
3743 }
3744 return val;
3745}
3746
3747/* warning: addr must be aligned */
3748uint64_t ldq_phys(target_phys_addr_t addr)
3749{
3750 int io_index;
3751 uint8_t *ptr;
3752 uint64_t val;
3753 unsigned long pd;
3754 PhysPageDesc *p;
3755
3756 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3757 if (!p) {
3758 pd = IO_MEM_UNASSIGNED;
3759 } else {
3760 pd = p->phys_offset;
3761 }
3762
3763 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3764 !(pd & IO_MEM_ROMD)) {
3765 /* I/O case */
3766 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3767 if (p)
3768 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3769#ifdef TARGET_WORDS_BIGENDIAN
3770 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3771 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3772#else
3773 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3774 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3775#endif
3776 } else {
3777 /* RAM case */
3778#ifndef VBOX
3779 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3780 (addr & ~TARGET_PAGE_MASK);
3781 val = ldq_p(ptr);
3782#else
3783 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3784#endif
3785 }
3786 return val;
3787}
3788
3789/* XXX: optimize */
3790uint32_t ldub_phys(target_phys_addr_t addr)
3791{
3792 uint8_t val;
3793 cpu_physical_memory_read(addr, &val, 1);
3794 return val;
3795}
3796
3797/* XXX: optimize */
3798uint32_t lduw_phys(target_phys_addr_t addr)
3799{
3800 uint16_t val;
3801 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3802 return tswap16(val);
3803}
3804
3805/* warning: addr must be aligned. The ram page is not masked as dirty
3806 and the code inside is not invalidated. It is useful if the dirty
3807 bits are used to track modified PTEs */
3808void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3809{
3810 int io_index;
3811 uint8_t *ptr;
3812 unsigned long pd;
3813 PhysPageDesc *p;
3814
3815 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3816 if (!p) {
3817 pd = IO_MEM_UNASSIGNED;
3818 } else {
3819 pd = p->phys_offset;
3820 }
3821
3822 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3823 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3824 if (p)
3825 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3826 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3827 } else {
3828#ifndef VBOX
3829 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3830 ptr = phys_ram_base + addr1;
3831 stl_p(ptr, val);
3832#else
3833 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3834#endif
3835
3836#ifndef VBOX
3837 if (unlikely(in_migration)) {
3838 if (!cpu_physical_memory_is_dirty(addr1)) {
3839 /* invalidate code */
3840 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3841 /* set dirty bit */
3842 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3843 (0xff & ~CODE_DIRTY_FLAG);
3844 }
3845 }
3846#endif /* !VBOX */
3847 }
3848}
3849
3850void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3851{
3852 int io_index;
3853 uint8_t *ptr;
3854 unsigned long pd;
3855 PhysPageDesc *p;
3856
3857 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3858 if (!p) {
3859 pd = IO_MEM_UNASSIGNED;
3860 } else {
3861 pd = p->phys_offset;
3862 }
3863
3864 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3865 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3866 if (p)
3867 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3868#ifdef TARGET_WORDS_BIGENDIAN
3869 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3870 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3871#else
3872 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3873 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3874#endif
3875 } else {
3876#ifndef VBOX
3877 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3878 (addr & ~TARGET_PAGE_MASK);
3879 stq_p(ptr, val);
3880#else
3881 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3882#endif
3883 }
3884}
3885
3886/* warning: addr must be aligned */
3887void stl_phys(target_phys_addr_t addr, uint32_t val)
3888{
3889 int io_index;
3890 uint8_t *ptr;
3891 unsigned long pd;
3892 PhysPageDesc *p;
3893
3894 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3895 if (!p) {
3896 pd = IO_MEM_UNASSIGNED;
3897 } else {
3898 pd = p->phys_offset;
3899 }
3900
3901 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3902 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3903 if (p)
3904 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3905 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3906 } else {
3907 unsigned long addr1;
3908 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3909 /* RAM case */
3910#ifndef VBOX
3911 ptr = phys_ram_base + addr1;
3912 stl_p(ptr, val);
3913#else
3914 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3915#endif
3916 if (!cpu_physical_memory_is_dirty(addr1)) {
3917 /* invalidate code */
3918 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3919 /* set dirty bit */
3920#ifdef VBOX
3921 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3922#endif
3923 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3924 (0xff & ~CODE_DIRTY_FLAG);
3925 }
3926 }
3927}
3928
3929/* XXX: optimize */
3930void stb_phys(target_phys_addr_t addr, uint32_t val)
3931{
3932 uint8_t v = val;
3933 cpu_physical_memory_write(addr, &v, 1);
3934}
3935
3936/* XXX: optimize */
3937void stw_phys(target_phys_addr_t addr, uint32_t val)
3938{
3939 uint16_t v = tswap16(val);
3940 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3941}
3942
3943/* XXX: optimize */
3944void stq_phys(target_phys_addr_t addr, uint64_t val)
3945{
3946 val = tswap64(val);
3947 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3948}
3949
3950#endif
3951
3952/* virtual memory access for debug */
3953int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3954 uint8_t *buf, int len, int is_write)
3955{
3956 int l;
3957 target_phys_addr_t phys_addr;
3958 target_ulong page;
3959
3960 while (len > 0) {
3961 page = addr & TARGET_PAGE_MASK;
3962 phys_addr = cpu_get_phys_page_debug(env, page);
3963 /* if no physical page mapped, return an error */
3964 if (phys_addr == -1)
3965 return -1;
3966 l = (page + TARGET_PAGE_SIZE) - addr;
3967 if (l > len)
3968 l = len;
3969 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3970 buf, l, is_write);
3971 len -= l;
3972 buf += l;
3973 addr += l;
3974 }
3975 return 0;
3976}
3977
3978/* in deterministic execution mode, instructions doing device I/Os
3979 must be at the end of the TB */
3980void cpu_io_recompile(CPUState *env, void *retaddr)
3981{
3982 TranslationBlock *tb;
3983 uint32_t n, cflags;
3984 target_ulong pc, cs_base;
3985 uint64_t flags;
3986
3987 tb = tb_find_pc((unsigned long)retaddr);
3988 if (!tb) {
3989 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3990 retaddr);
3991 }
3992 n = env->icount_decr.u16.low + tb->icount;
3993 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3994 /* Calculate how many instructions had been executed before the fault
3995 occurred. */
3996 n = n - env->icount_decr.u16.low;
3997 /* Generate a new TB ending on the I/O insn. */
3998 n++;
3999 /* On MIPS and SH, delay slot instructions can only be restarted if
4000 they were already the first instruction in the TB. If this is not
4001 the first instruction in a TB then re-execute the preceding
4002 branch. */
4003#if defined(TARGET_MIPS)
4004 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4005 env->active_tc.PC -= 4;
4006 env->icount_decr.u16.low++;
4007 env->hflags &= ~MIPS_HFLAG_BMASK;
4008 }
4009#elif defined(TARGET_SH4)
4010 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4011 && n > 1) {
4012 env->pc -= 2;
4013 env->icount_decr.u16.low++;
4014 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4015 }
4016#endif
4017 /* This should never happen. */
4018 if (n > CF_COUNT_MASK)
4019 cpu_abort(env, "TB too big during recompile");
4020
4021 cflags = n | CF_LAST_IO;
4022 pc = tb->pc;
4023 cs_base = tb->cs_base;
4024 flags = tb->flags;
4025 tb_phys_invalidate(tb, -1);
4026 /* FIXME: In theory this could raise an exception. In practice
4027 we have already translated the block once so it's probably ok. */
4028 tb_gen_code(env, pc, cs_base, flags, cflags);
4029 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4030 the first in the TB) then we end up generating a whole new TB and
4031 repeating the fault, which is horribly inefficient.
4032 Better would be to execute just this insn uncached, or generate a
4033 second new TB. */
4034 cpu_resume_from_signal(env, NULL);
4035}
4036
4037#ifndef VBOX
4038void dump_exec_info(FILE *f,
4039 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4040{
4041 int i, target_code_size, max_target_code_size;
4042 int direct_jmp_count, direct_jmp2_count, cross_page;
4043 TranslationBlock *tb;
4044
4045 target_code_size = 0;
4046 max_target_code_size = 0;
4047 cross_page = 0;
4048 direct_jmp_count = 0;
4049 direct_jmp2_count = 0;
4050 for(i = 0; i < nb_tbs; i++) {
4051 tb = &tbs[i];
4052 target_code_size += tb->size;
4053 if (tb->size > max_target_code_size)
4054 max_target_code_size = tb->size;
4055 if (tb->page_addr[1] != -1)
4056 cross_page++;
4057 if (tb->tb_next_offset[0] != 0xffff) {
4058 direct_jmp_count++;
4059 if (tb->tb_next_offset[1] != 0xffff) {
4060 direct_jmp2_count++;
4061 }
4062 }
4063 }
4064 /* XXX: avoid using doubles ? */
4065 cpu_fprintf(f, "Translation buffer state:\n");
4066 cpu_fprintf(f, "gen code size %ld/%ld\n",
4067 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4068 cpu_fprintf(f, "TB count %d/%d\n",
4069 nb_tbs, code_gen_max_blocks);
4070 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4071 nb_tbs ? target_code_size / nb_tbs : 0,
4072 max_target_code_size);
4073 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4074 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4075 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4076 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4077 cross_page,
4078 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4079 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4080 direct_jmp_count,
4081 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4082 direct_jmp2_count,
4083 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4084 cpu_fprintf(f, "\nStatistics:\n");
4085 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4086 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4087 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4088 tcg_dump_info(f, cpu_fprintf);
4089}
4090#endif /* !VBOX */
4091
4092#if !defined(CONFIG_USER_ONLY)
4093
4094#define MMUSUFFIX _cmmu
4095#define GETPC() NULL
4096#define env cpu_single_env
4097#define SOFTMMU_CODE_ACCESS
4098
4099#define SHIFT 0
4100#include "softmmu_template.h"
4101
4102#define SHIFT 1
4103#include "softmmu_template.h"
4104
4105#define SHIFT 2
4106#include "softmmu_template.h"
4107
4108#define SHIFT 3
4109#include "softmmu_template.h"
4110
4111#undef env
4112
4113#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette