Changeset 37689 in vbox for trunk/src/recompiler/exec.c
- Timestamp:
- Jun 29, 2011 4:01:23 PM (13 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler/exec.c
r37675 r37689 57 57 #ifndef VBOX 58 58 #include "hw/hw.h" 59 #endif 59 #include "hw/qdev.h" 60 #endif /* !VBOX */ 60 61 #include "osdep.h" 61 62 #include "kvm.h" 63 #include "qemu-timer.h" 62 64 #if defined(CONFIG_USER_ONLY) 63 65 #include <qemu.h> 66 #include <signal.h> 67 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 68 #include <sys/param.h> 69 #if __FreeBSD_version >= 700104 70 #define HAVE_KINFO_GETVMMAP 71 #define sigqueue sigqueue_freebsd /* avoid redefinition */ 72 #include <sys/time.h> 73 #include <sys/proc.h> 74 #include <machine/profile.h> 75 #define _KERNEL 76 #include <sys/user.h> 77 #undef _KERNEL 78 #undef sigqueue 79 #include <libutil.h> 80 #endif 81 #endif 64 82 #endif 65 83 … … 83 101 #define SMC_BITMAP_USE_THRESHOLD 10 84 102 85 #if defined(TARGET_SPARC64)86 #define TARGET_PHYS_ADDR_SPACE_BITS 4187 #elif defined(TARGET_SPARC)88 #define TARGET_PHYS_ADDR_SPACE_BITS 3689 #elif defined(TARGET_ALPHA)90 #define TARGET_PHYS_ADDR_SPACE_BITS 4291 #define TARGET_VIRT_ADDR_SPACE_BITS 4292 #elif defined(TARGET_PPC64)93 #define TARGET_PHYS_ADDR_SPACE_BITS 4294 #elif defined(TARGET_X86_64)95 #define TARGET_PHYS_ADDR_SPACE_BITS 4296 #elif defined(TARGET_I386)97 #define TARGET_PHYS_ADDR_SPACE_BITS 3698 #else99 #define TARGET_PHYS_ADDR_SPACE_BITS 32100 #endif101 102 103 static TranslationBlock *tbs; 103 int code_gen_max_blocks;104 static int code_gen_max_blocks; 104 105 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; 105 106 static int nb_tbs; … … 126 127 uint8_t code_gen_prologue[1024] code_gen_section; 127 128 #else /* VBOX */ 128 extern uint8_t *code_gen_prologue;129 extern uint8_t *code_gen_prologue; 129 130 #endif /* VBOX */ 130 131 static uint8_t *code_gen_buffer; … … 132 133 /* threshold to flush the translated code buffer */ 133 134 static unsigned long code_gen_buffer_max_size; 134 uint8_t *code_gen_ptr; 135 136 #ifndef VBOX 135 static uint8_t *code_gen_ptr; 136 137 137 #if !defined(CONFIG_USER_ONLY) 138 # ifndef VBOX 138 139 int phys_ram_fd; 139 uint8_t *phys_ram_dirty;140 140 static int in_migration; 141 142 typedef struct RAMBlock { 143 uint8_t *host; 144 ram_addr_t offset; 145 ram_addr_t length; 146 struct RAMBlock *next; 147 } RAMBlock; 148 149 static RAMBlock *ram_blocks; 150 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug) 151 then we can no longer assume contiguous ram offsets, and external uses 152 of this variable will break. */ 153 ram_addr_t last_ram_offset; 154 #endif 155 #else /* VBOX */ 156 /* we have memory ranges (the high PC-BIOS mapping) which 157 causes some pages to fall outside the dirty map here. */ 158 RTGCPHYS phys_ram_dirty_size; 159 uint8_t *phys_ram_dirty; 160 #endif /* VBOX */ 141 # endif /* !VBOX */ 142 143 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) }; 144 #endif 161 145 162 146 CPUState *first_cpu; … … 184 168 } PageDesc; 185 169 170 /* In system mode we want L1_MAP to be based on ram offsets, 171 while in user mode we want it to be based on virtual addresses. */ 172 #if !defined(CONFIG_USER_ONLY) 173 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS 174 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS 175 #else 176 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS 177 #endif 178 #else 179 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS 180 #endif 181 182 /* Size of the L2 (and L3, etc) page tables. */ 183 #define L2_BITS 10 184 #define L2_SIZE (1 << L2_BITS) 185 186 /* The bits remaining after N lower levels of page tables. */ 187 #define P_L1_BITS_REM \ 188 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS) 189 #define V_L1_BITS_REM \ 190 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS) 191 192 /* Size of the L1 page table. Avoid silly small sizes. */ 193 #if P_L1_BITS_REM < 4 194 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS) 195 #else 196 #define P_L1_BITS P_L1_BITS_REM 197 #endif 198 199 #if V_L1_BITS_REM < 4 200 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS) 201 #else 202 #define V_L1_BITS V_L1_BITS_REM 203 #endif 204 205 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS) 206 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS) 207 208 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS) 209 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) 210 211 unsigned long qemu_real_host_page_size; 212 unsigned long qemu_host_page_bits; 213 unsigned long qemu_host_page_size; 214 unsigned long qemu_host_page_mask; 215 216 /* This is a multi-level map on the virtual address space. 217 The bottom level has pointers to PageDesc. */ 218 static void *l1_map[V_L1_SIZE]; 219 220 #if !defined(CONFIG_USER_ONLY) 186 221 typedef struct PhysPageDesc { 187 222 /* offset in host memory of the page + io_index in the low bits */ … … 190 225 } PhysPageDesc; 191 226 192 #define L2_BITS 10 193 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS) 194 /* XXX: this is a temporary hack for alpha target. 195 * In the future, this is to be replaced by a multi-level table 196 * to actually be able to handle the complete 64 bits address space. 197 */ 198 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS) 199 #else 200 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) 201 #endif 202 #ifdef VBOX 203 #define L0_BITS (TARGET_PHYS_ADDR_SPACE_BITS - 32) 204 #endif 205 206 #ifdef VBOX 207 #define L0_SIZE (1 << L0_BITS) 208 #endif 209 #define L1_SIZE (1 << L1_BITS) 210 #define L2_SIZE (1 << L2_BITS) 211 212 unsigned long qemu_real_host_page_size; 213 unsigned long qemu_host_page_bits; 214 unsigned long qemu_host_page_size; 215 unsigned long qemu_host_page_mask; 216 217 /* XXX: for system emulation, it could just be an array */ 218 #ifndef VBOX 219 static PageDesc *l1_map[L1_SIZE]; 220 static PhysPageDesc **l1_phys_map; 221 #else 222 static unsigned l0_map_max_used = 0; 223 static PageDesc **l0_map[L0_SIZE]; 224 static void **l0_phys_map[L0_SIZE]; 225 #endif 226 227 #if !defined(CONFIG_USER_ONLY) 227 /* This is a multi-level map on the physical address space. 228 The bottom level has pointers to PhysPageDesc. */ 229 static void *l1_phys_map[P_L1_SIZE]; 230 228 231 static void io_mem_init(void); 229 232 … … 238 241 #ifndef VBOX 239 242 /* log support */ 243 #ifdef WIN32 244 static const char *logfilename = "qemu.log"; 245 #else 240 246 static const char *logfilename = "/tmp/qemu.log"; 247 #endif 241 248 #endif /* !VBOX */ 242 249 FILE *logfile; … … 244 251 #ifndef VBOX 245 252 static int log_append = 0; 246 #endif 253 #endif /* !VBOX */ 247 254 248 255 /* statistics */ 249 256 #ifndef VBOX 257 #if !defined(CONFIG_USER_ONLY) 250 258 static int tlb_flush_count; 259 #endif 251 260 static int tb_flush_count; 252 261 static int tb_phys_invalidate_count; … … 256 265 uint32_t tb_phys_invalidate_count; 257 266 #endif /* VBOX */ 258 259 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)260 typedef struct subpage_t {261 target_phys_addr_t base;262 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];263 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];264 void *opaque[TARGET_PAGE_SIZE][2][4];265 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];266 } subpage_t;267 267 268 268 #ifndef VBOX … … 325 325 qemu_host_page_size = TARGET_PAGE_SIZE; 326 326 qemu_host_page_bits = 0; 327 #ifndef VBOX 328 while ((1 << qemu_host_page_bits) < qemu_host_page_size) 329 #else 330 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size) 331 #endif 327 while ((1 << qemu_host_page_bits) < VBOX_ONLY((int))qemu_host_page_size) 332 328 qemu_host_page_bits++; 333 329 qemu_host_page_mask = ~(qemu_host_page_size - 1); 334 #ifndef VBOX 335 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); 336 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); 337 #endif 338 339 #ifdef VBOX 340 /* We use other means to set reserved bit on our pages */ 341 #else /* !VBOX */ 342 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY) 330 331 #ifndef VBOX /* We use other means to set reserved bit on our pages */ 332 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) 343 333 { 344 long long startaddr, endaddr; 334 #ifdef HAVE_KINFO_GETVMMAP 335 struct kinfo_vmentry *freep; 336 int i, cnt; 337 338 freep = kinfo_getvmmap(getpid(), &cnt); 339 if (freep) { 340 mmap_lock(); 341 for (i = 0; i < cnt; i++) { 342 unsigned long startaddr, endaddr; 343 344 startaddr = freep[i].kve_start; 345 endaddr = freep[i].kve_end; 346 if (h2g_valid(startaddr)) { 347 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 348 349 if (h2g_valid(endaddr)) { 350 endaddr = h2g(endaddr); 351 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 352 } else { 353 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS 354 endaddr = ~0ul; 355 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 356 #endif 357 } 358 } 359 } 360 free(freep); 361 mmap_unlock(); 362 } 363 #else 345 364 FILE *f; 346 int n; 347 348 mmap_lock(); 365 349 366 last_brk = (unsigned long)sbrk(0); 350 f = fopen("/proc/self/maps", "r"); 367 368 f = fopen("/compat/linux/proc/self/maps", "r"); 351 369 if (f) { 370 mmap_lock(); 371 352 372 do { 353 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr); 354 if (n == 2) { 355 startaddr = MIN(startaddr, 356 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1); 357 endaddr = MIN(endaddr, 358 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1); 359 page_set_flags(startaddr & TARGET_PAGE_MASK, 360 TARGET_PAGE_ALIGN(endaddr), 361 PAGE_RESERVED); 373 unsigned long startaddr, endaddr; 374 int n; 375 376 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); 377 378 if (n == 2 && h2g_valid(startaddr)) { 379 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 380 381 if (h2g_valid(endaddr)) { 382 endaddr = h2g(endaddr); 383 } else { 384 endaddr = ~0ul; 385 } 386 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 362 387 } 363 388 } while (!feof(f)); 389 364 390 fclose(f); 365 } 366 mmap_unlock(); 391 mmap_unlock(); 392 } 393 #endif 367 394 } 368 395 #endif … … 370 397 } 371 398 372 static inline PageDesc **page_l1_map(target_ulong index) 373 { 374 #ifndef VBOX 375 #if TARGET_LONG_BITS > 32 376 /* Host memory outside guest VM. For 32-bit targets we have already 377 excluded high addresses. */ 378 if (index > ((target_ulong)L2_SIZE * L1_SIZE)) 379 return NULL; 380 #endif 381 return &l1_map[index >> L2_BITS]; 382 #else /* VBOX */ 383 PageDesc **l1_map; 384 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE * L0_SIZE, 385 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x L0_SIZE=%#x\n", 386 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE, L0_SIZE), 387 NULL); 388 l1_map = l0_map[index >> (L1_BITS + L2_BITS)]; 389 if (RT_UNLIKELY(!l1_map)) 390 { 391 unsigned i0 = index >> (L1_BITS + L2_BITS); 392 l0_map[i0] = l1_map = qemu_mallocz(sizeof(PageDesc *) * L1_SIZE); 393 if (RT_UNLIKELY(!l1_map)) 399 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) 400 { 401 PageDesc *pd; 402 void **lp; 403 int i; 404 405 #if defined(CONFIG_USER_ONLY) 406 /* We can't use qemu_malloc because it may recurse into a locked mutex. */ 407 # define ALLOC(P, SIZE) \ 408 do { \ 409 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \ 410 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \ 411 } while (0) 412 #else 413 # define ALLOC(P, SIZE) \ 414 do { P = qemu_mallocz(SIZE); } while (0) 415 #endif 416 417 /* Level 1. Always allocated. */ 418 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1)); 419 420 /* Level 2..N-1. */ 421 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) { 422 void **p = *lp; 423 424 if (p == NULL) { 425 if (!alloc) { 426 return NULL; 427 } 428 ALLOC(p, sizeof(void *) * L2_SIZE); 429 *lp = p; 430 } 431 432 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1)); 433 } 434 435 pd = *lp; 436 if (pd == NULL) { 437 if (!alloc) { 394 438 return NULL; 395 if (i0 >= l0_map_max_used) 396 l0_map_max_used = i0 + 1; 397 } 398 return &l1_map[(index >> L2_BITS) & (L1_SIZE - 1)]; 399 #endif /* VBOX */ 400 } 401 402 static inline PageDesc *page_find_alloc(target_ulong index) 403 { 404 PageDesc **lp, *p; 405 lp = page_l1_map(index); 406 if (!lp) 407 return NULL; 408 409 p = *lp; 410 if (!p) { 411 /* allocate if not found */ 412 #if defined(CONFIG_USER_ONLY) 413 size_t len = sizeof(PageDesc) * L2_SIZE; 414 /* Don't use qemu_malloc because it may recurse. */ 415 p = mmap(NULL, len, PROT_READ | PROT_WRITE, 416 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 417 *lp = p; 418 if (h2g_valid(p)) { 419 unsigned long addr = h2g(p); 420 page_set_flags(addr & TARGET_PAGE_MASK, 421 TARGET_PAGE_ALIGN(addr + len), 422 PAGE_RESERVED); 423 } 424 #else 425 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE); 426 *lp = p; 427 #endif 428 } 429 return p + (index & (L2_SIZE - 1)); 430 } 431 432 static inline PageDesc *page_find(target_ulong index) 433 { 434 PageDesc **lp, *p; 435 lp = page_l1_map(index); 436 if (!lp) 437 return NULL; 438 439 p = *lp; 440 if (!p) { 441 return NULL; 442 } 443 return p + (index & (L2_SIZE - 1)); 444 } 445 439 } 440 ALLOC(pd, sizeof(PageDesc) * L2_SIZE); 441 *lp = pd; 442 } 443 444 #undef ALLOC 445 446 return pd + (index & (L2_SIZE - 1)); 447 } 448 449 static inline PageDesc *page_find(tb_page_addr_t index) 450 { 451 return page_find_alloc(index, 0); 452 } 453 454 #if !defined(CONFIG_USER_ONLY) 446 455 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) 447 456 { 448 void **lp, **p;449 457 PhysPageDesc *pd; 450 451 #ifndef VBOX 452 p = (void **)l1_phys_map; 453 #if TARGET_PHYS_ADDR_SPACE_BITS > 32 454 455 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) 456 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS 457 #endif 458 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1)); 459 p = *lp; 460 if (!p) { 461 /* allocate if not found */ 462 if (!alloc) 458 void **lp; 459 int i; 460 461 /* Level 1. Always allocated. */ 462 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1)); 463 464 /* Level 2..N-1. */ 465 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) { 466 void **p = *lp; 467 if (p == NULL) { 468 if (!alloc) { 469 return NULL; 470 } 471 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE); 472 } 473 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1)); 474 } 475 476 pd = *lp; 477 if (pd == NULL) { 478 int i; 479 480 if (!alloc) { 463 481 return NULL; 464 p = qemu_vmalloc(sizeof(void *) * L1_SIZE); 465 memset(p, 0, sizeof(void *) * L1_SIZE); 466 *lp = p; 467 } 468 #endif 469 #else /* VBOX */ 470 /* level 0 lookup and lazy allocation of level 1 map. */ 471 if (RT_UNLIKELY(index >= (target_phys_addr_t)L2_SIZE * L1_SIZE * L0_SIZE)) 472 return NULL; 473 p = l0_phys_map[index >> (L1_BITS + L2_BITS)]; 474 if (RT_UNLIKELY(!p)) { 475 if (!alloc) 476 return NULL; 477 p = qemu_vmalloc(sizeof(void **) * L1_SIZE); 478 memset(p, 0, sizeof(void **) * L1_SIZE); 479 l0_phys_map[index >> (L1_BITS + L2_BITS)] = p; 480 } 481 482 /* level 1 lookup and lazy allocation of level 2 map. */ 483 #endif /* VBOX */ 484 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); 485 pd = *lp; 486 if (!pd) { 487 int i; 488 /* allocate if not found */ 489 if (!alloc) 490 return NULL; 491 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); 492 *lp = pd; 482 } 483 484 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE); 485 493 486 for (i = 0; i < L2_SIZE; i++) { 494 pd[i].phys_offset = IO_MEM_UNASSIGNED; 495 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS; 496 } 497 } 498 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); 487 pd[i].phys_offset = IO_MEM_UNASSIGNED; 488 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS; 489 } 490 } 491 492 return pd + (index & (L2_SIZE - 1)); 499 493 } 500 494 … … 504 498 } 505 499 506 #if !defined(CONFIG_USER_ONLY)507 500 static void tlb_protect_code(ram_addr_t ram_addr); 508 501 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, … … 530 523 531 524 #ifdef USE_STATIC_CODE_GEN_BUFFER 532 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]; 525 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] 526 __attribute__((aligned (CODE_GEN_ALIGN))); 533 527 #endif 534 528 … … 594 588 if (code_gen_buffer_size > 16 * 1024 * 1024) 595 589 code_gen_buffer_size = 16 * 1024 * 1024; 590 #elif defined(__s390x__) 591 /* Map the buffer so that we can use direct calls and branches. */ 592 /* We have a +- 4GB range on the branches; leave some slop. */ 593 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) { 594 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024; 595 } 596 start = (void *)0x90000000UL; 596 597 #endif 597 598 code_gen_buffer = mmap(start, code_gen_buffer_size, … … 637 638 #endif 638 639 code_gen_buffer_max_size = code_gen_buffer_size - 639 code_gen_max_block_size();640 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE); 640 641 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; 641 642 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock)); … … 654 655 io_mem_init(); 655 656 #endif 657 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE) 658 /* There's no guest base to take into account, so go ahead and 659 initialize the prologue now. */ 660 tcg_prologue_init(&tcg_ctx); 661 #endif 656 662 } 657 663 658 664 #ifndef VBOX 659 665 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) 660 661 static void cpu_common_pre_save(void *opaque)662 {663 CPUState *env = opaque;664 665 cpu_synchronize_state(env);666 }667 668 static int cpu_common_pre_load(void *opaque)669 {670 CPUState *env = opaque;671 672 cpu_synchronize_state(env);673 return 0;674 }675 666 676 667 static int cpu_common_post_load(void *opaque, int version_id) … … 691 682 .minimum_version_id = 1, 692 683 .minimum_version_id_old = 1, 693 .pre_save = cpu_common_pre_save,694 .pre_load = cpu_common_pre_load,695 684 .post_load = cpu_common_post_load, 696 685 .fields = (VMStateField []) { … … 742 731 #endif 743 732 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) 744 vmstate_register( cpu_index, &vmstate_cpu_common, env);745 register_savevm( "cpu", cpu_index, CPU_SAVE_VERSION,733 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env); 734 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION, 746 735 cpu_save, cpu_load, env); 747 736 #endif … … 758 747 } 759 748 760 /* set to NULL all the 'first_tb' fields in all PageDescs */ 749 /* Set to NULL all the 'first_tb' fields in all PageDescs. */ 750 751 static void page_flush_tb_1 (int level, void **lp) 752 { 753 int i; 754 755 if (*lp == NULL) { 756 return; 757 } 758 if (level == 0) { 759 PageDesc *pd = *lp; 760 for (i = 0; i < L2_SIZE; ++i) { 761 pd[i].first_tb = NULL; 762 invalidate_page_bitmap(pd + i); 763 } 764 } else { 765 void **pp = *lp; 766 for (i = 0; i < L2_SIZE; ++i) { 767 page_flush_tb_1 (level - 1, pp + i); 768 } 769 } 770 } 771 761 772 static void page_flush_tb(void) 762 773 { 763 int i, j; 764 PageDesc *p; 765 #ifdef VBOX 766 int k; 767 #endif 768 769 #ifdef VBOX 770 k = l0_map_max_used; 771 while (k-- > 0) { 772 PageDesc **l1_map = l0_map[k]; 773 if (l1_map) { 774 #endif 775 for(i = 0; i < L1_SIZE; i++) { 776 p = l1_map[i]; 777 if (p) { 778 for(j = 0; j < L2_SIZE; j++) { 779 p->first_tb = NULL; 780 invalidate_page_bitmap(p); 781 p++; 782 } 783 } 784 } 785 #ifdef VBOX 786 } 787 } 788 #endif 774 int i; 775 for (i = 0; i < V_L1_SIZE; i++) { 776 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i); 777 } 789 778 } 790 779 … … 930 919 } 931 920 932 void tb_phys_invalidate(TranslationBlock *tb, t arget_ulongpage_addr)921 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) 933 922 { 934 923 CPUState *env; 935 924 PageDesc *p; 936 925 unsigned int h, n1; 937 t arget_phys_addr_t phys_pc;926 tb_page_addr_t phys_pc; 938 927 TranslationBlock *tb1, *tb2; 939 928 … … 1094 1083 TranslationBlock *tb; 1095 1084 uint8_t *tc_ptr; 1096 target_ulong phys_pc, phys_page2, virt_page2; 1085 tb_page_addr_t phys_pc, phys_page2; 1086 target_ulong virt_page2; 1097 1087 int code_gen_size; 1098 1088 1099 phys_pc = get_p hys_addr_code(env, pc);1089 phys_pc = get_page_addr_code(env, pc); 1100 1090 tb = tb_alloc(pc); 1101 1091 if (!tb) { … … 1119 1109 phys_page2 = -1; 1120 1110 if ((pc & TARGET_PAGE_MASK) != virt_page2) { 1121 phys_page2 = get_p hys_addr_code(env, virt_page2);1122 } 1123 tb_link_p hys(tb, phys_pc, phys_page2);1111 phys_page2 = get_page_addr_code(env, virt_page2); 1112 } 1113 tb_link_page(tb, phys_pc, phys_page2); 1124 1114 return tb; 1125 1115 } … … 1130 1120 from a real cpu write access: the virtual CPU will exit the current 1131 1121 TB if code is modified inside this TB. */ 1132 void tb_invalidate_phys_page_range(t arget_phys_addr_t start, target_phys_addr_t end,1122 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, 1133 1123 int is_cpu_write_access) 1134 1124 { 1135 1125 TranslationBlock *tb, *tb_next, *saved_tb; 1136 1126 CPUState *env = cpu_single_env; 1137 t arget_ulongtb_start, tb_end;1127 tb_page_addr_t tb_start, tb_end; 1138 1128 PageDesc *p; 1139 1129 int n; … … 1237 1227 1238 1228 /* len must be <= 8 and start must be a multiple of len */ 1239 static inline void tb_invalidate_phys_page_fast(t arget_phys_addr_t start, int len)1229 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) 1240 1230 { 1241 1231 PageDesc *p; … … 1264 1254 1265 1255 #if !defined(CONFIG_SOFTMMU) 1266 static void tb_invalidate_phys_page(t arget_phys_addr_t addr,1256 static void tb_invalidate_phys_page(tb_page_addr_t addr, 1267 1257 unsigned long pc, void *puc) 1268 1258 { … … 1326 1316 /* add the tb in the target page and protect it if necessary */ 1327 1317 static inline void tb_alloc_page(TranslationBlock *tb, 1328 unsigned int n, t arget_ulongpage_addr)1318 unsigned int n, tb_page_addr_t page_addr) 1329 1319 { 1330 1320 PageDesc *p; … … 1332 1322 1333 1323 tb->page_addr[n] = page_addr; 1334 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS );1324 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1); 1335 1325 tb->page_next[n] = p->first_tb; 1336 1326 last_first_tb = p->first_tb; … … 1358 1348 prot |= p2->flags; 1359 1349 p2->flags &= ~PAGE_WRITE; 1360 page_get_flags(addr);1361 1350 } 1362 1351 mprotect(g2h(page_addr), qemu_host_page_size, … … 1386 1375 1387 1376 if (nb_tbs >= code_gen_max_blocks || 1388 #ifndef VBOX 1389 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size) 1390 #else 1391 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size) 1392 #endif 1377 (code_gen_ptr - code_gen_buffer) >= VBOX_ONLY((unsigned long))code_gen_buffer_max_size) 1393 1378 return NULL; 1394 1379 tb = &tbs[nb_tbs++]; … … 1411 1396 /* add a new TB and link it to the physical page tables. phys_page2 is 1412 1397 (-1) to indicate that only one page contains the TB. */ 1413 void tb_link_p hys(TranslationBlock *tb,1414 t arget_ulong phys_pc, target_ulongphys_page2)1398 void tb_link_page(TranslationBlock *tb, 1399 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2) 1415 1400 { 1416 1401 unsigned int h; … … 1528 1513 1529 1514 #if defined(TARGET_HAS_ICE) 1515 #if defined(CONFIG_USER_ONLY) 1516 static void breakpoint_invalidate(CPUState *env, target_ulong pc) 1517 { 1518 tb_invalidate_phys_page_range(pc, pc + 1, 0); 1519 } 1520 #else 1530 1521 static void breakpoint_invalidate(CPUState *env, target_ulong pc) 1531 1522 { … … 1546 1537 } 1547 1538 #endif 1548 1539 #endif /* TARGET_HAS_ICE */ 1540 1541 #if defined(CONFIG_USER_ONLY) 1542 void cpu_watchpoint_remove_all(CPUState *env, int mask) 1543 1544 { 1545 } 1546 1547 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, 1548 int flags, CPUWatchpoint **watchpoint) 1549 { 1550 return -ENOSYS; 1551 } 1552 #else 1549 1553 /* Add a watchpoint. */ 1550 1554 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, … … 1624 1628 } 1625 1629 } 1630 #endif 1626 1631 1627 1632 /* Add a breakpoint. */ … … 1762 1767 static void cpu_unlink_tb(CPUState *env) 1763 1768 { 1764 #if defined(CONFIG_USE_NPTL)1765 1769 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the 1766 1770 problem and hope the cpu will stop of its own accord. For userspace 1767 1771 emulation this often isn't actually as bad as it sounds. Often 1768 1772 signals are used primarily to interrupt blocking syscalls. */ 1769 #else1770 1773 TranslationBlock *tb; 1771 1774 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED; 1772 1775 1776 spin_lock(&interrupt_lock); 1773 1777 tb = env->current_tb; 1774 1778 /* if the cpu is currently executing code, we must unlink it and 1775 1779 all the potentially executing TB */ 1776 if (tb && !testandset(&interrupt_lock)) {1780 if (tb) { 1777 1781 env->current_tb = NULL; 1778 1782 tb_reset_jump_recursive(tb); 1779 resetlock(&interrupt_lock); 1780 } 1781 #endif 1783 } 1784 spin_unlock(&interrupt_lock); 1782 1785 } 1783 1786 … … 1872 1875 { 0, NULL, NULL }, 1873 1876 }; 1877 1878 #ifndef CONFIG_USER_ONLY 1879 static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list 1880 = QLIST_HEAD_INITIALIZER(memory_client_list); 1881 1882 static void cpu_notify_set_memory(target_phys_addr_t start_addr, 1883 ram_addr_t size, 1884 ram_addr_t phys_offset) 1885 { 1886 CPUPhysMemoryClient *client; 1887 QLIST_FOREACH(client, &memory_client_list, list) { 1888 client->set_memory(client, start_addr, size, phys_offset); 1889 } 1890 } 1891 1892 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start, 1893 target_phys_addr_t end) 1894 { 1895 CPUPhysMemoryClient *client; 1896 QLIST_FOREACH(client, &memory_client_list, list) { 1897 int r = client->sync_dirty_bitmap(client, start, end); 1898 if (r < 0) 1899 return r; 1900 } 1901 return 0; 1902 } 1903 1904 static int cpu_notify_migration_log(int enable) 1905 { 1906 CPUPhysMemoryClient *client; 1907 QLIST_FOREACH(client, &memory_client_list, list) { 1908 int r = client->migration_log(client, enable); 1909 if (r < 0) 1910 return r; 1911 } 1912 return 0; 1913 } 1914 1915 static void phys_page_for_each_1(CPUPhysMemoryClient *client, 1916 int level, void **lp) 1917 { 1918 int i; 1919 1920 if (*lp == NULL) { 1921 return; 1922 } 1923 if (level == 0) { 1924 PhysPageDesc *pd = *lp; 1925 for (i = 0; i < L2_SIZE; ++i) { 1926 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) { 1927 client->set_memory(client, pd[i].region_offset, 1928 TARGET_PAGE_SIZE, pd[i].phys_offset); 1929 } 1930 } 1931 } else { 1932 void **pp = *lp; 1933 for (i = 0; i < L2_SIZE; ++i) { 1934 phys_page_for_each_1(client, level - 1, pp + i); 1935 } 1936 } 1937 } 1938 1939 static void phys_page_for_each(CPUPhysMemoryClient *client) 1940 { 1941 int i; 1942 for (i = 0; i < P_L1_SIZE; ++i) { 1943 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1, 1944 l1_phys_map + 1); 1945 } 1946 } 1947 1948 void cpu_register_phys_memory_client(CPUPhysMemoryClient *client) 1949 { 1950 QLIST_INSERT_HEAD(&memory_client_list, client, list); 1951 phys_page_for_each(client); 1952 } 1953 1954 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client) 1955 { 1956 QLIST_REMOVE(client, list); 1957 } 1958 #endif 1874 1959 1875 1960 static int cmp1(const char *s1, int n, const char *s2) … … 1912 1997 return mask; 1913 1998 } 1914 #endif /* !VBOX */ 1915 1916 #ifndef VBOX /* VBOX: we have our own routine. */ 1999 1917 2000 void cpu_abort(CPUState *env, const char *fmt, ...) 1918 2001 { … … 1944 2027 va_end(ap2); 1945 2028 va_end(ap); 2029 #if defined(CONFIG_USER_ONLY) 2030 { 2031 struct sigaction act; 2032 sigfillset(&act.sa_mask); 2033 act.sa_handler = SIG_DFL; 2034 sigaction(SIGABRT, &act, NULL); 2035 } 2036 #endif 1946 2037 abort(); 1947 2038 } 1948 #endif /* !VBOX */ 1949 1950 #ifndef VBOX /* not needed */ 2039 1951 2040 CPUState *cpu_copy(CPUState *env) 1952 2041 { … … 1982 2071 return new_env; 1983 2072 } 2073 1984 2074 #endif /* !VBOX */ 1985 1986 2075 #if !defined(CONFIG_USER_ONLY) 1987 2076 … … 1999 2088 memset (&env->tb_jmp_cache[i], 0, 2000 2089 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); 2001 2002 2090 #ifdef VBOX 2091 2003 2092 /* inform raw mode about TLB page flush */ 2004 2093 remR3FlushPage(env, addr); … … 2035 2124 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); 2036 2125 2126 env->tlb_flush_addr = -1; 2127 env->tlb_flush_mask = 0; 2128 tlb_flush_count++; 2037 2129 #ifdef VBOX 2130 2038 2131 /* inform raw mode about TLB flush */ 2039 2132 remR3FlushTLB(env, flush_global); 2040 #endif 2041 tlb_flush_count++; 2133 #endif /* VBOX */ 2042 2134 } 2043 2135 … … 2062 2154 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); 2063 2155 #endif 2156 /* Check if we need to flush due to large pages. */ 2157 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { 2158 #if defined(DEBUG_TLB) 2159 printf("tlb_flush_page: forced full flush (" 2160 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 2161 env->tlb_flush_addr, env->tlb_flush_mask); 2162 #endif 2163 tlb_flush(env, 1); 2164 return; 2165 } 2064 2166 /* must reset current TB so that interrupts cannot modify the 2065 2167 links while we are modifying them */ … … 2084 2186 /** @todo Retest this? This function has changed... */ 2085 2187 remR3ProtectCode(cpu_single_env, ram_addr); 2086 #endif 2188 #endif /* VBOX */ 2087 2189 } 2088 2190 … … 2092 2194 target_ulong vaddr) 2093 2195 { 2094 #ifdef VBOX 2095 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 2096 #endif 2097 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG; 2196 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG); 2098 2197 } 2099 2198 … … 2102 2201 { 2103 2202 unsigned long addr; 2104 2105 2203 #ifdef VBOX 2204 2106 2205 if (start & 3) 2107 2206 return; 2108 #endif 2207 #endif /* VBOX */ 2109 2208 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { 2110 2209 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; … … 2121 2220 CPUState *env; 2122 2221 unsigned long length, start1; 2123 int i, mask, len; 2124 uint8_t *p; 2222 int i; 2125 2223 2126 2224 start &= TARGET_PAGE_MASK; … … 2130 2228 if (length == 0) 2131 2229 return; 2132 len = length >> TARGET_PAGE_BITS; 2133 mask = ~dirty_flags; 2134 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); 2135 #ifdef VBOX 2136 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 2137 #endif 2138 for(i = 0; i < len; i++) 2139 p[i] &= mask; 2230 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags); 2140 2231 2141 2232 /* we modify the TLB cache so that the dirty bit will be set again … … 2166 2257 2167 2258 #ifndef VBOX 2259 2168 2260 int cpu_physical_memory_set_dirty_tracking(int enable) 2169 2261 { 2262 int ret = 0; 2170 2263 in_migration = enable; 2171 if (kvm_enabled()) { 2172 return kvm_set_migration_log(enable); 2173 } 2174 return 0; 2264 ret = cpu_notify_migration_log(!!enable); 2265 return ret; 2175 2266 } 2176 2267 … … 2179 2270 return in_migration; 2180 2271 } 2272 2181 2273 #endif /* !VBOX */ 2182 2274 … … 2185 2277 { 2186 2278 #ifndef VBOX 2187 int ret = 0; 2188 2189 if (kvm_enabled()) 2190 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr); 2279 int ret; 2280 2281 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr); 2191 2282 return ret; 2192 #else 2283 #else /* VBOX */ 2193 2284 return 0; 2194 #endif 2285 #endif /* VBOX */ 2195 2286 } 2196 2287 … … 2255 2346 } 2256 2347 2257 /* add a new TLB entry. At most one entry for a given virtual address 2258 is permitted. Return 0 if OK or 2 if the page could not be mapped 2259 (can only happen in non SOFTMMU mode for I/O pages or pages 2260 conflicting with the host address space). */ 2261 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 2262 target_phys_addr_t paddr, int prot, 2263 int mmu_idx, int is_softmmu) 2348 /* Our TLB does not support large pages, so remember the area covered by 2349 large pages and trigger a full TLB flush if these are invalidated. */ 2350 static void tlb_add_large_page(CPUState *env, target_ulong vaddr, 2351 target_ulong size) 2352 { 2353 target_ulong mask = ~(size - 1); 2354 2355 if (env->tlb_flush_addr == (target_ulong)-1) { 2356 env->tlb_flush_addr = vaddr & mask; 2357 env->tlb_flush_mask = mask; 2358 return; 2359 } 2360 /* Extend the existing region to include the new page. 2361 This is a compromise between unnecessary flushes and the cost 2362 of maintaining a full variable size TLB. */ 2363 mask &= env->tlb_flush_mask; 2364 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { 2365 mask <<= 1; 2366 } 2367 env->tlb_flush_addr &= mask; 2368 env->tlb_flush_mask = mask; 2369 } 2370 2371 /* Add a new TLB entry. At most one entry for a given virtual address 2372 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 2373 supplied size is only used by tlb_flush_page. */ 2374 void tlb_set_page(CPUState *env, target_ulong vaddr, 2375 target_phys_addr_t paddr, int prot, 2376 int mmu_idx, target_ulong size) 2264 2377 { 2265 2378 PhysPageDesc *p; … … 2268 2381 target_ulong address; 2269 2382 target_ulong code_address; 2270 target_phys_addr_t addend; 2271 int ret; 2383 unsigned long addend; 2272 2384 CPUTLBEntry *te; 2273 2385 CPUWatchpoint *wp; … … 2277 2389 #endif 2278 2390 2391 assert(size >= TARGET_PAGE_SIZE); 2392 if (size != TARGET_PAGE_SIZE) { 2393 tlb_add_large_page(env, vaddr, size); 2394 } 2279 2395 p = phys_page_find(paddr >> TARGET_PAGE_BITS); 2280 2396 if (!p) { … … 2288 2404 #endif 2289 2405 2290 ret = 0;2291 2406 address = vaddr; 2292 2407 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { … … 2357 2472 QTAILQ_FOREACH(wp, &env->watchpoints, entry) { 2358 2473 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { 2359 iotlb = io_mem_watch + paddr; 2360 /* TODO: The memory case can be optimized by not trapping 2361 reads of pages with a write breakpoint. */ 2362 address |= TLB_MMIO; 2474 /* Avoid trapping reads of pages with a write breakpoint. */ 2475 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { 2476 iotlb = io_mem_watch + paddr; 2477 address |= TLB_MMIO; 2478 break; 2479 } 2363 2480 } 2364 2481 } … … 2409 2526 remR3FlushPage(env, vaddr); 2410 2527 #endif 2411 return ret;2412 2528 } 2413 2529 … … 2421 2537 { 2422 2538 } 2423 2424 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,2425 target_phys_addr_t paddr, int prot,2426 int mmu_idx, int is_softmmu)2427 {2428 return 0;2429 }2430 2431 #ifndef VBOX2432 2539 2433 2540 /* … … 2435 2542 * and calls callback function 'fn' for each region. 2436 2543 */ 2437 int walk_memory_regions(void *priv, 2438 int (*fn)(void *, unsigned long, unsigned long, unsigned long)) 2439 { 2440 unsigned long start, end; 2441 PageDesc *p = NULL; 2442 int i, j, prot, prot1; 2443 int rc = 0; 2444 2445 start = end = -1; 2446 prot = 0; 2447 2448 for (i = 0; i <= L1_SIZE; i++) { 2449 p = (i < L1_SIZE) ? l1_map[i] : NULL; 2450 for (j = 0; j < L2_SIZE; j++) { 2451 prot1 = (p == NULL) ? 0 : p[j].flags; 2452 /* 2453 * "region" is one continuous chunk of memory 2454 * that has same protection flags set. 2455 */ 2456 if (prot1 != prot) { 2457 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); 2458 if (start != -1) { 2459 rc = (*fn)(priv, start, end, prot); 2460 /* callback can stop iteration by returning != 0 */ 2461 if (rc != 0) 2462 return (rc); 2544 2545 struct walk_memory_regions_data 2546 { 2547 walk_memory_regions_fn fn; 2548 void *priv; 2549 unsigned long start; 2550 int prot; 2551 }; 2552 2553 static int walk_memory_regions_end(struct walk_memory_regions_data *data, 2554 abi_ulong end, int new_prot) 2555 { 2556 if (data->start != -1ul) { 2557 int rc = data->fn(data->priv, data->start, end, data->prot); 2558 if (rc != 0) { 2559 return rc; 2560 } 2561 } 2562 2563 data->start = (new_prot ? end : -1ul); 2564 data->prot = new_prot; 2565 2566 return 0; 2567 } 2568 2569 static int walk_memory_regions_1(struct walk_memory_regions_data *data, 2570 abi_ulong base, int level, void **lp) 2571 { 2572 abi_ulong pa; 2573 int i, rc; 2574 2575 if (*lp == NULL) { 2576 return walk_memory_regions_end(data, base, 0); 2577 } 2578 2579 if (level == 0) { 2580 PageDesc *pd = *lp; 2581 for (i = 0; i < L2_SIZE; ++i) { 2582 int prot = pd[i].flags; 2583 2584 pa = base | (i << TARGET_PAGE_BITS); 2585 if (prot != data->prot) { 2586 rc = walk_memory_regions_end(data, pa, prot); 2587 if (rc != 0) { 2588 return rc; 2463 2589 } 2464 if (prot1 != 0)2465 start = end;2466 else2467 start = -1;2468 prot = prot1;2469 2590 } 2470 if (p == NULL) 2471 break; 2472 } 2473 } 2474 return (rc); 2475 } 2476 2477 static int dump_region(void *priv, unsigned long start, 2478 unsigned long end, unsigned long prot) 2591 } 2592 } else { 2593 void **pp = *lp; 2594 for (i = 0; i < L2_SIZE; ++i) { 2595 pa = base | ((abi_ulong)i << 2596 (TARGET_PAGE_BITS + L2_BITS * level)); 2597 rc = walk_memory_regions_1(data, pa, level - 1, pp + i); 2598 if (rc != 0) { 2599 return rc; 2600 } 2601 } 2602 } 2603 2604 return 0; 2605 } 2606 2607 int walk_memory_regions(void *priv, walk_memory_regions_fn fn) 2608 { 2609 struct walk_memory_regions_data data; 2610 unsigned long i; 2611 2612 data.fn = fn; 2613 data.priv = priv; 2614 data.start = -1ul; 2615 data.prot = 0; 2616 2617 for (i = 0; i < V_L1_SIZE; i++) { 2618 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT, 2619 V_L1_SHIFT / L2_BITS - 1, l1_map + i); 2620 if (rc != 0) { 2621 return rc; 2622 } 2623 } 2624 2625 return walk_memory_regions_end(&data, 0, 0); 2626 } 2627 2628 static int dump_region(void *priv, abi_ulong start, 2629 abi_ulong end, unsigned long prot) 2479 2630 { 2480 2631 FILE *f = (FILE *)priv; 2481 2632 2482 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", 2633 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx 2634 " "TARGET_ABI_FMT_lx" %c%c%c\n", 2483 2635 start, end, end - start, 2484 2636 ((prot & PAGE_READ) ? 'r' : '-'), … … 2497 2649 } 2498 2650 2499 #endif /* !VBOX */2500 2501 2651 int page_get_flags(target_ulong address) 2502 2652 { … … 2509 2659 } 2510 2660 2511 /* modify the flags of a page and invalidate the code if2512 necessary. The flag PAGE_WRITE_ORG is positioned automatically2513 depending on PAGE_WRITE*/2661 /* Modify the flags of a page and invalidate the code if necessary. 2662 The flag PAGE_WRITE_ORG is positioned automatically depending 2663 on PAGE_WRITE. The mmap_lock should already be held. */ 2514 2664 void page_set_flags(target_ulong start, target_ulong end, int flags) 2515 2665 { 2516 PageDesc *p; 2517 target_ulong addr; 2518 2519 /* mmap_lock should already be held. */ 2666 target_ulong addr, len; 2667 2668 /* This function should never be called with addresses outside the 2669 guest address space. If this assert fires, it probably indicates 2670 a missing call to h2g_valid. */ 2671 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS 2672 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2673 #endif 2674 assert(start < end); 2675 2520 2676 start = start & TARGET_PAGE_MASK; 2521 2677 end = TARGET_PAGE_ALIGN(end); 2522 if (flags & PAGE_WRITE) 2678 2679 if (flags & PAGE_WRITE) { 2523 2680 flags |= PAGE_WRITE_ORG; 2681 } 2682 2524 2683 #ifdef VBOX 2525 2684 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n")); 2526 2685 #endif 2527 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { 2528 p = page_find_alloc(addr >> TARGET_PAGE_BITS); 2529 /* We may be called for host regions that are outside guest 2530 address space. */ 2531 if (!p) 2532 return; 2533 /* if the write protection is set, then we invalidate the code 2534 inside */ 2686 for (addr = start, len = end - start; 2687 len != 0; 2688 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2689 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2690 2691 /* If the write protection bit is set, then we invalidate 2692 the code inside. */ 2535 2693 if (!(p->flags & PAGE_WRITE) && 2536 2694 (flags & PAGE_WRITE) && … … 2548 2706 target_ulong addr; 2549 2707 2550 if (start + len < start) 2551 /* we've wrapped around */ 2708 /* This function should never be called with addresses outside the 2709 guest address space. If this assert fires, it probably indicates 2710 a missing call to h2g_valid. */ 2711 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS 2712 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2713 #endif 2714 2715 if (len == 0) { 2716 return 0; 2717 } 2718 if (start + len - 1 < start) { 2719 /* We've wrapped around. */ 2552 2720 return -1; 2721 } 2553 2722 2554 2723 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */ 2555 2724 start = start & TARGET_PAGE_MASK; 2556 2725 2557 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { 2726 for (addr = start, len = end - start; 2727 len != 0; 2728 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2558 2729 p = page_find(addr >> TARGET_PAGE_BITS); 2559 2730 if( !p ) … … 2583 2754 int page_unprotect(target_ulong address, unsigned long pc, void *puc) 2584 2755 { 2585 unsigned int p age_index, prot, pindex;2586 PageDesc *p , *p1;2756 unsigned int prot; 2757 PageDesc *p; 2587 2758 target_ulong host_start, host_end, addr; 2588 2759 … … 2592 2763 mmap_lock(); 2593 2764 2594 host_start = address & qemu_host_page_mask; 2595 page_index = host_start >> TARGET_PAGE_BITS; 2596 p1 = page_find(page_index); 2597 if (!p1) { 2765 p = page_find(address >> TARGET_PAGE_BITS); 2766 if (!p) { 2598 2767 mmap_unlock(); 2599 2768 return 0; 2600 2769 } 2601 host_end = host_start + qemu_host_page_size; 2602 p = p1; 2603 prot = 0; 2604 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) { 2605 prot |= p->flags; 2606 p++; 2607 } 2770 2608 2771 /* if the page was really writable, then we change its 2609 2772 protection back to writable */ 2610 if (prot & PAGE_WRITE_ORG) { 2611 pindex = (address - host_start) >> TARGET_PAGE_BITS; 2612 if (!(p1[pindex].flags & PAGE_WRITE)) { 2613 mprotect((void *)g2h(host_start), qemu_host_page_size, 2614 (prot & PAGE_BITS) | PAGE_WRITE); 2615 p1[pindex].flags |= PAGE_WRITE; 2773 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { 2774 host_start = address & qemu_host_page_mask; 2775 host_end = host_start + qemu_host_page_size; 2776 2777 prot = 0; 2778 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { 2779 p = page_find(addr >> TARGET_PAGE_BITS); 2780 p->flags |= PAGE_WRITE; 2781 prot |= p->flags; 2782 2616 2783 /* and since the content will be modified, we must invalidate 2617 2784 the corresponding translated code. */ 2618 tb_invalidate_phys_page(addr ess, pc, puc);2785 tb_invalidate_phys_page(addr, pc, puc); 2619 2786 #ifdef DEBUG_TB_CHECK 2620 tb_invalidate_check(address); 2621 #endif 2622 mmap_unlock(); 2623 return 1; 2624 } 2787 tb_invalidate_check(addr); 2788 #endif 2789 } 2790 mprotect((void *)g2h(host_start), qemu_host_page_size, 2791 prot & PAGE_BITS); 2792 2793 mmap_unlock(); 2794 return 1; 2625 2795 } 2626 2796 mmap_unlock(); … … 2636 2806 #if !defined(CONFIG_USER_ONLY) 2637 2807 2808 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) 2809 typedef struct subpage_t { 2810 target_phys_addr_t base; 2811 ram_addr_t sub_io_index[TARGET_PAGE_SIZE]; 2812 ram_addr_t region_offset[TARGET_PAGE_SIZE]; 2813 } subpage_t; 2814 2638 2815 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 2639 2816 ram_addr_t memory, ram_addr_t region_offset); 2640 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 2641 ram_addr_t orig_memory, ram_addr_t region_offset); 2817 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 2818 ram_addr_t orig_memory, 2819 ram_addr_t region_offset); 2642 2820 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \ 2643 2821 need_subpage) \ … … 2677 2855 CPUState *env; 2678 2856 ram_addr_t orig_size = size; 2679 void *subpage; 2680 2681 if (kvm_enabled()) 2682 kvm_set_phys_mem(start_addr, size, phys_offset); 2857 subpage_t *subpage; 2858 2859 #ifndef VBOX 2860 cpu_notify_set_memory(start_addr, size, phys_offset); 2861 #endif /* !VBOX */ 2683 2862 2684 2863 if (phys_offset == IO_MEM_UNASSIGNED) { … … 2697 2876 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, 2698 2877 need_subpage); 2699 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {2878 if (need_subpage) { 2700 2879 if (!(orig_memory & IO_MEM_SUBPAGE)) { 2701 2880 subpage = subpage_init((addr & TARGET_PAGE_MASK), … … 2729 2908 end_addr2, need_subpage); 2730 2909 2731 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {2910 if (need_subpage) { 2732 2911 subpage = subpage_init((addr & TARGET_PAGE_MASK), 2733 2912 &p->phys_offset, IO_MEM_UNASSIGNED, … … 2762 2941 2763 2942 #ifndef VBOX 2943 2764 2944 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) 2765 2945 { … … 2774 2954 } 2775 2955 2776 ram_addr_t qemu_ram_alloc(ram_addr_t size) 2777 { 2778 RAMBlock *new_block; 2956 void qemu_flush_coalesced_mmio_buffer(void) 2957 { 2958 if (kvm_enabled()) 2959 kvm_flush_coalesced_mmio_buffer(); 2960 } 2961 2962 #if defined(__linux__) && !defined(TARGET_S390X) 2963 2964 #include <sys/vfs.h> 2965 2966 #define HUGETLBFS_MAGIC 0x958458f6 2967 2968 static long gethugepagesize(const char *path) 2969 { 2970 struct statfs fs; 2971 int ret; 2972 2973 do { 2974 ret = statfs(path, &fs); 2975 } while (ret != 0 && errno == EINTR); 2976 2977 if (ret != 0) { 2978 perror(path); 2979 return 0; 2980 } 2981 2982 if (fs.f_type != HUGETLBFS_MAGIC) 2983 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path); 2984 2985 return fs.f_bsize; 2986 } 2987 2988 static void *file_ram_alloc(RAMBlock *block, 2989 ram_addr_t memory, 2990 const char *path) 2991 { 2992 char *filename; 2993 void *area; 2994 int fd; 2995 #ifdef MAP_POPULATE 2996 int flags; 2997 #endif 2998 unsigned long hpagesize; 2999 3000 hpagesize = gethugepagesize(path); 3001 if (!hpagesize) { 3002 return NULL; 3003 } 3004 3005 if (memory < hpagesize) { 3006 return NULL; 3007 } 3008 3009 if (kvm_enabled() && !kvm_has_sync_mmu()) { 3010 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n"); 3011 return NULL; 3012 } 3013 3014 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) { 3015 return NULL; 3016 } 3017 3018 fd = mkstemp(filename); 3019 if (fd < 0) { 3020 perror("unable to create backing store for hugepages"); 3021 free(filename); 3022 return NULL; 3023 } 3024 unlink(filename); 3025 free(filename); 3026 3027 memory = (memory+hpagesize-1) & ~(hpagesize-1); 3028 3029 /* 3030 * ftruncate is not supported by hugetlbfs in older 3031 * hosts, so don't bother bailing out on errors. 3032 * If anything goes wrong with it under other filesystems, 3033 * mmap will fail. 3034 */ 3035 if (ftruncate(fd, memory)) 3036 perror("ftruncate"); 3037 3038 #ifdef MAP_POPULATE 3039 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case 3040 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED 3041 * to sidestep this quirk. 3042 */ 3043 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE; 3044 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0); 3045 #else 3046 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); 3047 #endif 3048 if (area == MAP_FAILED) { 3049 perror("file_ram_alloc: can't mmap RAM pages"); 3050 close(fd); 3051 return (NULL); 3052 } 3053 block->fd = fd; 3054 return area; 3055 } 3056 #endif 3057 3058 static ram_addr_t find_ram_offset(ram_addr_t size) 3059 { 3060 RAMBlock *block, *next_block; 3061 ram_addr_t offset = 0, mingap = ULONG_MAX; 3062 3063 if (QLIST_EMPTY(&ram_list.blocks)) 3064 return 0; 3065 3066 QLIST_FOREACH(block, &ram_list.blocks, next) { 3067 ram_addr_t end, next = ULONG_MAX; 3068 3069 end = block->offset + block->length; 3070 3071 QLIST_FOREACH(next_block, &ram_list.blocks, next) { 3072 if (next_block->offset >= end) { 3073 next = MIN(next, next_block->offset); 3074 } 3075 } 3076 if (next - end >= size && next - end < mingap) { 3077 offset = end; 3078 mingap = next - end; 3079 } 3080 } 3081 return offset; 3082 } 3083 3084 static ram_addr_t last_ram_offset(void) 3085 { 3086 RAMBlock *block; 3087 ram_addr_t last = 0; 3088 3089 QLIST_FOREACH(block, &ram_list.blocks, next) 3090 last = MAX(last, block->offset + block->length); 3091 3092 return last; 3093 } 3094 3095 ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, 3096 ram_addr_t size, void *host) 3097 { 3098 RAMBlock *new_block, *block; 2779 3099 2780 3100 size = TARGET_PAGE_ALIGN(size); 2781 new_block = qemu_malloc(sizeof(*new_block)); 2782 2783 #if defined(TARGET_S390X) && defined(CONFIG_KVM) 2784 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */ 2785 new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE, 2786 MAP_SHARED | MAP_ANONYMOUS, -1, 0); 2787 #else 2788 new_block->host = qemu_vmalloc(size); 2789 #endif 2790 #ifdef MADV_MERGEABLE 2791 madvise(new_block->host, size, MADV_MERGEABLE); 2792 #endif 2793 new_block->offset = last_ram_offset; 3101 new_block = qemu_mallocz(sizeof(*new_block)); 3102 3103 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) { 3104 char *id = dev->parent_bus->info->get_dev_path(dev); 3105 if (id) { 3106 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); 3107 qemu_free(id); 3108 } 3109 } 3110 pstrcat(new_block->idstr, sizeof(new_block->idstr), name); 3111 3112 QLIST_FOREACH(block, &ram_list.blocks, next) { 3113 if (!strcmp(block->idstr, new_block->idstr)) { 3114 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", 3115 new_block->idstr); 3116 abort(); 3117 } 3118 } 3119 3120 new_block->host = host; 3121 3122 new_block->offset = find_ram_offset(size); 2794 3123 new_block->length = size; 2795 3124 2796 new_block->next = ram_blocks; 2797 ram_blocks = new_block; 2798 2799 phys_ram_dirty = qemu_realloc(phys_ram_dirty, 2800 (last_ram_offset + size) >> TARGET_PAGE_BITS); 2801 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS), 3125 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); 3126 3127 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty, 3128 last_ram_offset() >> TARGET_PAGE_BITS); 3129 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), 2802 3130 0xff, size >> TARGET_PAGE_BITS); 2803 2804 last_ram_offset += size;2805 3131 2806 3132 if (kvm_enabled()) … … 2810 3136 } 2811 3137 3138 ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size) 3139 { 3140 RAMBlock *new_block, *block; 3141 3142 size = TARGET_PAGE_ALIGN(size); 3143 new_block = qemu_mallocz(sizeof(*new_block)); 3144 3145 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) { 3146 char *id = dev->parent_bus->info->get_dev_path(dev); 3147 if (id) { 3148 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); 3149 qemu_free(id); 3150 } 3151 } 3152 pstrcat(new_block->idstr, sizeof(new_block->idstr), name); 3153 3154 QLIST_FOREACH(block, &ram_list.blocks, next) { 3155 if (!strcmp(block->idstr, new_block->idstr)) { 3156 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", 3157 new_block->idstr); 3158 abort(); 3159 } 3160 } 3161 3162 if (mem_path) { 3163 #if defined (__linux__) && !defined(TARGET_S390X) 3164 new_block->host = file_ram_alloc(new_block, size, mem_path); 3165 if (!new_block->host) { 3166 new_block->host = qemu_vmalloc(size); 3167 #ifdef MADV_MERGEABLE 3168 madvise(new_block->host, size, MADV_MERGEABLE); 3169 #endif 3170 } 3171 #else 3172 fprintf(stderr, "-mem-path option unsupported\n"); 3173 exit(1); 3174 #endif 3175 } else { 3176 #if defined(TARGET_S390X) && defined(CONFIG_KVM) 3177 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */ 3178 new_block->host = mmap((void*)0x1000000, size, 3179 PROT_EXEC|PROT_READ|PROT_WRITE, 3180 MAP_SHARED | MAP_ANONYMOUS, -1, 0); 3181 #else 3182 new_block->host = qemu_vmalloc(size); 3183 #endif 3184 #ifdef MADV_MERGEABLE 3185 madvise(new_block->host, size, MADV_MERGEABLE); 3186 #endif 3187 } 3188 new_block->offset = find_ram_offset(size); 3189 new_block->length = size; 3190 3191 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); 3192 3193 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty, 3194 last_ram_offset() >> TARGET_PAGE_BITS); 3195 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), 3196 0xff, size >> TARGET_PAGE_BITS); 3197 3198 if (kvm_enabled()) 3199 kvm_setup_guest_memory(new_block->host, size); 3200 3201 return new_block->offset; 3202 } 3203 2812 3204 void qemu_ram_free(ram_addr_t addr) 2813 3205 { 2814 /* TODO: implement this. */ 3206 RAMBlock *block; 3207 3208 QLIST_FOREACH(block, &ram_list.blocks, next) { 3209 if (addr == block->offset) { 3210 QLIST_REMOVE(block, next); 3211 if (mem_path) { 3212 #if defined (__linux__) && !defined(TARGET_S390X) 3213 if (block->fd) { 3214 munmap(block->host, block->length); 3215 close(block->fd); 3216 } else { 3217 qemu_vfree(block->host); 3218 } 3219 #endif 3220 } else { 3221 #if defined(TARGET_S390X) && defined(CONFIG_KVM) 3222 munmap(block->host, block->length); 3223 #else 3224 qemu_vfree(block->host); 3225 #endif 3226 } 3227 qemu_free(block); 3228 return; 3229 } 3230 } 3231 2815 3232 } 2816 3233 … … 2825 3242 void *qemu_get_ram_ptr(ram_addr_t addr) 2826 3243 { 2827 RAMBlock *prev;2828 RAMBlock **prevp;2829 3244 RAMBlock *block; 2830 3245 2831 prev = NULL; 2832 prevp = &ram_blocks; 2833 block = ram_blocks; 2834 while (block && (block->offset > addr 2835 || block->offset + block->length <= addr)) { 2836 if (prev) 2837 prevp = &prev->next; 2838 prev = block; 2839 block = block->next; 2840 } 2841 if (!block) { 2842 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 2843 abort(); 2844 } 2845 /* Move this entry to to start of the list. */ 2846 if (prev) { 2847 prev->next = block->next; 2848 block->next = *prevp; 2849 *prevp = block; 2850 } 2851 return block->host + (addr - block->offset); 3246 QLIST_FOREACH(block, &ram_list.blocks, next) { 3247 if (addr - block->offset < block->length) { 3248 QLIST_REMOVE(block, next); 3249 QLIST_INSERT_HEAD(&ram_list.blocks, block, next); 3250 return block->host + (addr - block->offset); 3251 } 3252 } 3253 3254 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 3255 abort(); 3256 3257 return NULL; 2852 3258 } 2853 3259 … … 2856 3262 ram_addr_t qemu_ram_addr_from_host(void *ptr) 2857 3263 { 2858 RAMBlock *prev;2859 RAMBlock **prevp;2860 3264 RAMBlock *block; 2861 3265 uint8_t *host = ptr; 2862 3266 2863 prev = NULL; 2864 prevp = &ram_blocks; 2865 block = ram_blocks; 2866 while (block && (block->host > host 2867 || block->host + block->length <= host)) { 2868 if (prev) 2869 prevp = &prev->next; 2870 prev = block; 2871 block = block->next; 2872 } 2873 if (!block) { 2874 fprintf(stderr, "Bad ram pointer %p\n", ptr); 2875 abort(); 2876 } 2877 return block->offset + (host - block->host); 3267 QLIST_FOREACH(block, &ram_list.blocks, next) { 3268 if (host - block->host < block->length) { 3269 return block->offset + (host - block->host); 3270 } 3271 } 3272 3273 fprintf(stderr, "Bad ram pointer %p\n", ptr); 3274 abort(); 3275 3276 return 0; 2878 3277 } 2879 3278 … … 2959 3358 { 2960 3359 int dirty_flags; 2961 #ifdef VBOX 2962 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 2963 dirty_flags = 0xff; 2964 else 2965 #endif /* VBOX */ 2966 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 3360 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 2967 3361 if (!(dirty_flags & CODE_DIRTY_FLAG)) { 2968 3362 #if !defined(CONFIG_USER_ONLY) 2969 3363 tb_invalidate_phys_page_fast(ram_addr, 1); 2970 # ifdef VBOX 2971 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 2972 dirty_flags = 0xff; 2973 else 2974 # endif /* VBOX */ 2975 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 3364 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 2976 3365 #endif 2977 3366 } … … 2981 3370 stb_p(qemu_get_ram_ptr(ram_addr), val); 2982 3371 #endif 2983 #ifdef CONFIG_KQEMU2984 if (cpu_single_env->kqemu_enabled &&2985 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)2986 kqemu_modify_page(cpu_single_env, ram_addr);2987 #endif2988 3372 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); 2989 #ifdef VBOX 2990 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 2991 #endif /* !VBOX */ 2992 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; 3373 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); 2993 3374 /* we remove the notdirty callback only if the code has been 2994 3375 flushed */ … … 3001 3382 { 3002 3383 int dirty_flags; 3003 #ifdef VBOX 3004 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 3005 dirty_flags = 0xff; 3006 else 3007 #endif /* VBOX */ 3008 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 3384 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 3009 3385 if (!(dirty_flags & CODE_DIRTY_FLAG)) { 3010 3386 #if !defined(CONFIG_USER_ONLY) 3011 3387 tb_invalidate_phys_page_fast(ram_addr, 2); 3012 # ifdef VBOX 3013 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 3014 dirty_flags = 0xff; 3015 else 3016 # endif /* VBOX */ 3017 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 3388 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 3018 3389 #endif 3019 3390 } … … 3024 3395 #endif 3025 3396 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); 3026 #ifdef VBOX 3027 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 3028 #endif 3029 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; 3397 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); 3030 3398 /* we remove the notdirty callback only if the code has been 3031 3399 flushed */ … … 3038 3406 { 3039 3407 int dirty_flags; 3040 #ifdef VBOX 3041 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 3042 dirty_flags = 0xff; 3043 else 3044 #endif /* VBOX */ 3045 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 3408 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 3046 3409 if (!(dirty_flags & CODE_DIRTY_FLAG)) { 3047 3410 #if !defined(CONFIG_USER_ONLY) 3048 3411 tb_invalidate_phys_page_fast(ram_addr, 4); 3049 # ifdef VBOX 3050 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 3051 dirty_flags = 0xff; 3052 else 3053 # endif /* VBOX */ 3054 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 3412 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 3055 3413 #endif 3056 3414 } … … 3061 3419 #endif 3062 3420 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); 3063 #ifdef VBOX 3064 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 3065 #endif 3066 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; 3421 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); 3067 3422 /* we remove the notdirty callback only if the code has been 3068 3423 flushed */ … … 3182 3537 }; 3183 3538 3184 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr, 3185 unsigned int len) 3186 { 3187 uint32_t ret; 3188 unsigned int idx; 3189 3190 idx = SUBPAGE_IDX(addr); 3539 static inline uint32_t subpage_readlen (subpage_t *mmio, 3540 target_phys_addr_t addr, 3541 unsigned int len) 3542 { 3543 unsigned int idx = SUBPAGE_IDX(addr); 3191 3544 #if defined(DEBUG_SUBPAGE) 3192 3545 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__, 3193 3546 mmio, len, addr, idx); 3194 3547 #endif 3195 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], 3196 addr + mmio->region_offset[idx][0][len]);3197 3198 return ret;3548 3549 addr += mmio->region_offset[idx]; 3550 idx = mmio->sub_io_index[idx]; 3551 return io_mem_read[idx][len](io_mem_opaque[idx], addr); 3199 3552 } 3200 3553 3201 3554 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr, 3202 uint32_t value, unsigned int len) 3203 { 3204 unsigned int idx; 3205 3206 idx = SUBPAGE_IDX(addr); 3555 uint32_t value, unsigned int len) 3556 { 3557 unsigned int idx = SUBPAGE_IDX(addr); 3207 3558 #if defined(DEBUG_SUBPAGE) 3208 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__, 3209 mmio, len, addr, idx, value); 3210 #endif 3211 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], 3212 addr + mmio->region_offset[idx][1][len], 3213 value); 3559 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", 3560 __func__, mmio, len, addr, idx, value); 3561 #endif 3562 3563 addr += mmio->region_offset[idx]; 3564 idx = mmio->sub_io_index[idx]; 3565 io_mem_write[idx][len](io_mem_opaque[idx], addr, value); 3214 3566 } 3215 3567 3216 3568 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr) 3217 3569 { 3218 #if defined(DEBUG_SUBPAGE)3219 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);3220 #endif3221 3222 3570 return subpage_readlen(opaque, addr, 0); 3223 3571 } … … 3226 3574 uint32_t value) 3227 3575 { 3228 #if defined(DEBUG_SUBPAGE)3229 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);3230 #endif3231 3576 subpage_writelen(opaque, addr, value, 0); 3232 3577 } … … 3234 3579 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr) 3235 3580 { 3236 #if defined(DEBUG_SUBPAGE)3237 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);3238 #endif3239 3240 3581 return subpage_readlen(opaque, addr, 1); 3241 3582 } … … 3244 3585 uint32_t value) 3245 3586 { 3246 #if defined(DEBUG_SUBPAGE)3247 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);3248 #endif3249 3587 subpage_writelen(opaque, addr, value, 1); 3250 3588 } … … 3252 3590 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr) 3253 3591 { 3254 #if defined(DEBUG_SUBPAGE)3255 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);3256 #endif3257 3258 3592 return subpage_readlen(opaque, addr, 2); 3259 3593 } 3260 3594 3261 static void subpage_writel (void *opaque, 3262 target_phys_addr_t addr, uint32_t value) 3263 { 3264 #if defined(DEBUG_SUBPAGE) 3265 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value); 3266 #endif 3595 static void subpage_writel (void *opaque, target_phys_addr_t addr, 3596 uint32_t value) 3597 { 3267 3598 subpage_writelen(opaque, addr, value, 2); 3268 3599 } … … 3284 3615 { 3285 3616 int idx, eidx; 3286 unsigned int i;3287 3617 3288 3618 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) … … 3294 3624 mmio, start, end, idx, eidx, memory); 3295 3625 #endif 3296 memory >>= IO_MEM_SHIFT;3626 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3297 3627 for (; idx <= eidx; idx++) { 3298 for (i = 0; i < 4; i++) { 3299 if (io_mem_read[memory][i]) { 3300 mmio->mem_read[idx][i] = &io_mem_read[memory][i]; 3301 mmio->opaque[idx][0][i] = io_mem_opaque[memory]; 3302 mmio->region_offset[idx][0][i] = region_offset; 3303 } 3304 if (io_mem_write[memory][i]) { 3305 mmio->mem_write[idx][i] = &io_mem_write[memory][i]; 3306 mmio->opaque[idx][1][i] = io_mem_opaque[memory]; 3307 mmio->region_offset[idx][1][i] = region_offset; 3308 } 3309 } 3628 mmio->sub_io_index[idx] = memory; 3629 mmio->region_offset[idx] = region_offset; 3310 3630 } 3311 3631 … … 3313 3633 } 3314 3634 3315 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 3316 ram_addr_t orig_memory, ram_addr_t region_offset) 3635 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 3636 ram_addr_t orig_memory, 3637 ram_addr_t region_offset) 3317 3638 { 3318 3639 subpage_t *mmio; … … 3328 3649 #endif 3329 3650 *phys = subpage_memory | IO_MEM_SUBPAGE; 3330 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory, 3331 region_offset); 3651 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset); 3332 3652 3333 3653 return mmio; … … 3343 3663 return i; 3344 3664 } 3345 3665 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES); 3346 3666 return -1; 3347 3667 } … … 3359 3679 void *opaque) 3360 3680 { 3361 int i , subwidth = 0;3681 int i; 3362 3682 3363 3683 if (io_index <= 0) { … … 3371 3691 } 3372 3692 3373 for(i = 0;i < 3; i++) { 3374 if (!mem_read[i] || !mem_write[i]) 3375 subwidth = IO_MEM_SUBWIDTH; 3376 io_mem_read[io_index][i] = mem_read[i]; 3377 io_mem_write[io_index][i] = mem_write[i]; 3693 for (i = 0; i < 3; ++i) { 3694 io_mem_read[io_index][i] 3695 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]); 3696 } 3697 for (i = 0; i < 3; ++i) { 3698 io_mem_write[io_index][i] 3699 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]); 3378 3700 } 3379 3701 io_mem_opaque[io_index] = opaque; 3380 return (io_index << IO_MEM_SHIFT) | subwidth; 3702 3703 return (io_index << IO_MEM_SHIFT); 3381 3704 } 3382 3705 … … 3419 3742 /* physical memory access (slow version, mainly for debug) */ 3420 3743 #if defined(CONFIG_USER_ONLY) 3421 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,3422 3744 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 3745 uint8_t *buf, int len, int is_write) 3423 3746 { 3424 3747 int l, flags; … … 3433 3756 flags = page_get_flags(page); 3434 3757 if (!(flags & PAGE_VALID)) 3435 return ;3758 return -1; 3436 3759 if (is_write) { 3437 3760 if (!(flags & PAGE_WRITE)) 3438 return ;3761 return -1; 3439 3762 /* XXX: this code should not depend on lock_user */ 3440 3763 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) 3441 /* FIXME - should this return an error rather than just fail? */ 3442 return; 3764 return -1; 3443 3765 memcpy(p, buf, l); 3444 3766 unlock_user(p, addr, l); 3445 3767 } else { 3446 3768 if (!(flags & PAGE_READ)) 3447 return ;3769 return -1; 3448 3770 /* XXX: this code should not depend on lock_user */ 3449 3771 if (!(p = lock_user(VERIFY_READ, addr, l, 1))) 3450 /* FIXME - should this return an error rather than just fail? */ 3451 return; 3772 return -1; 3452 3773 memcpy(buf, p, l); 3453 3774 unlock_user(p, addr, 0); … … 3457 3778 addr += l; 3458 3779 } 3780 return 0; 3459 3781 } 3460 3782 … … 3532 3854 tb_invalidate_phys_page_range(addr1, addr1 + l, 0); 3533 3855 /* set dirty bit */ 3534 #ifdef VBOX 3535 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 3536 #endif 3537 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 3538 (0xff & ~CODE_DIRTY_FLAG); 3856 cpu_physical_memory_set_dirty_flags( 3857 addr1, (0xff & ~CODE_DIRTY_FLAG)); 3539 3858 } 3540 3859 } … … 3760 4079 tb_invalidate_phys_page_range(addr1, addr1 + l, 0); 3761 4080 /* set dirty bit */ 3762 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=3763 (0xff & ~CODE_DIRTY_FLAG);4081 cpu_physical_memory_set_dirty_flags( 4082 addr1, (0xff & ~CODE_DIRTY_FLAG)); 3764 4083 } 3765 4084 addr1 += l; … … 3865 4184 } 3866 4185 3867 /* XXX: optimize*/4186 /* warning: addr must be aligned */ 3868 4187 uint32_t lduw_phys(target_phys_addr_t addr) 3869 4188 { 3870 uint16_t val; 3871 cpu_physical_memory_read(addr, (uint8_t *)&val, 2); 3872 return tswap16(val); 4189 int io_index; 4190 uint8_t *ptr; 4191 uint64_t val; 4192 unsigned long pd; 4193 PhysPageDesc *p; 4194 4195 p = phys_page_find(addr >> TARGET_PAGE_BITS); 4196 if (!p) { 4197 pd = IO_MEM_UNASSIGNED; 4198 } else { 4199 pd = p->phys_offset; 4200 } 4201 4202 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 4203 !(pd & IO_MEM_ROMD)) { 4204 /* I/O case */ 4205 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 4206 if (p) 4207 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 4208 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); 4209 } else { 4210 /* RAM case */ 4211 #ifndef VBOX 4212 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + 4213 (addr & ~TARGET_PAGE_MASK); 4214 val = lduw_p(ptr); 4215 #else 4216 val = remR3PhysReadU16((pd & TARGET_PAGE_MASK) | (addr & ~TARGET_PAGE_MASK)); 4217 #endif 4218 } 4219 return val; 3873 4220 } 3874 4221 … … 3910 4257 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); 3911 4258 /* set dirty bit */ 3912 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=3913 (0xff & ~CODE_DIRTY_FLAG);4259 cpu_physical_memory_set_dirty_flags( 4260 addr1, (0xff & ~CODE_DIRTY_FLAG)); 3914 4261 } 3915 4262 } … … 3988 4335 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); 3989 4336 /* set dirty bit */ 3990 #ifdef VBOX 3991 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 3992 #endif 3993 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 3994 (0xff & ~CODE_DIRTY_FLAG); 4337 cpu_physical_memory_set_dirty_flags(addr1, 4338 (0xff & ~CODE_DIRTY_FLAG)); 3995 4339 } 3996 4340 } … … 4004 4348 } 4005 4349 4006 /* XXX: optimize*/4350 /* warning: addr must be aligned */ 4007 4351 void stw_phys(target_phys_addr_t addr, uint32_t val) 4008 4352 { 4009 uint16_t v = tswap16(val); 4010 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2); 4353 int io_index; 4354 uint8_t *ptr; 4355 unsigned long pd; 4356 PhysPageDesc *p; 4357 4358 p = phys_page_find(addr >> TARGET_PAGE_BITS); 4359 if (!p) { 4360 pd = IO_MEM_UNASSIGNED; 4361 } else { 4362 pd = p->phys_offset; 4363 } 4364 4365 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 4366 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 4367 if (p) 4368 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 4369 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); 4370 } else { 4371 unsigned long addr1; 4372 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 4373 /* RAM case */ 4374 #ifndef VBOX 4375 ptr = qemu_get_ram_ptr(addr1); 4376 stw_p(ptr, val); 4377 #else 4378 remR3PhysWriteU16(addr1, val); NOREF(ptr); 4379 #endif 4380 if (!cpu_physical_memory_is_dirty(addr1)) { 4381 /* invalidate code */ 4382 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0); 4383 /* set dirty bit */ 4384 cpu_physical_memory_set_dirty_flags(addr1, 4385 (0xff & ~CODE_DIRTY_FLAG)); 4386 } 4387 } 4011 4388 } 4012 4389 … … 4017 4394 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8); 4018 4395 } 4019 4020 #endif4021 4396 4022 4397 #ifndef VBOX … … 4039 4414 l = len; 4040 4415 phys_addr += (addr & ~TARGET_PAGE_MASK); 4041 #if !defined(CONFIG_USER_ONLY)4042 4416 if (is_write) 4043 4417 cpu_physical_memory_write_rom(phys_addr, buf, l); 4044 4418 else 4045 #endif4046 4419 cpu_physical_memory_rw(phys_addr, buf, l, is_write); 4047 4420 len -= l; … … 4052 4425 } 4053 4426 #endif /* !VBOX */ 4427 #endif 4054 4428 4055 4429 /* in deterministic execution mode, instructions doing device I/Os … … 4111 4485 cpu_resume_from_signal(env, NULL); 4112 4486 } 4487 4488 #if !defined(CONFIG_USER_ONLY) 4113 4489 4114 4490 #ifndef VBOX … … 4167 4543 #endif /* !VBOX */ 4168 4544 4169 #if !defined(CONFIG_USER_ONLY)4170 4171 4545 #define MMUSUFFIX _cmmu 4172 4546 #define GETPC() NULL
Note:
See TracChangeset
for help on using the changeset viewer.