Changeset 6532 in vbox
- Timestamp:
- Jan 28, 2008 6:10:09 PM (17 years ago)
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/pgm.h
r5999 r6532 38 38 * @{ 39 39 */ 40 41 /** Enable dynamic allocation of guest physical RAM. */42 #define PGM_DYNAMIC_RAM_ALLOC43 40 44 41 /** Chunk size for dynamically allocated physical memory. */ -
trunk/src/VBox/VMM/MM.cpp
r6529 r6532 209 209 return VINF_SUCCESS; 210 210 } 211 #ifdef PGM_DYNAMIC_RAM_ALLOC212 211 Log(("MM: %llu bytes of RAM%s\n", cbRam, fPreAlloc ? " (PreAlloc)" : "")); 213 212 pVM->mm.s.pvRamBaseHC = 0; /** @todo obsolete */ … … 233 232 } 234 233 } 235 #else236 unsigned cPages = cbRam >> PAGE_SHIFT;237 Log(("MM: %llu bytes of RAM (%d pages)\n", cbRam, cPages));238 rc = SUPPageAlloc(cPages, &pVM->mm.s.pvRamBaseHC);239 if (VBOX_SUCCESS(rc))240 {241 pVM->mm.s.cbRamBase = cPages << PAGE_SHIFT;242 rc = MMR3PhysRegister(pVM, pVM->mm.s.pvRamBaseHC, 0, pVM->mm.s.cbRamBase, 0, "Main Memory");243 if (VBOX_SUCCESS(rc))244 return rc;245 SUPPageFree(pVM->mm.s.pvRamBaseHC);246 }247 else248 LogRel(("MMR3InitPage: Failed to allocate %u bytes of RAM! rc=%Vrc\n", cPages << PAGE_SHIFT));249 #endif250 234 } 251 235 else -
trunk/src/VBox/VMM/PGMPhys.cpp
r6528 r6532 268 268 PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc) 269 269 { 270 #ifdef PGM_DYNAMIC_RAM_ALLOC271 270 NOREF(pszDesc); 272 271 … … 325 324 326 325 return VINF_SUCCESS; 327 #else /* !PGM_DYNAMIC_RAM_ALLOC */328 AssertReleaseMsgFailed(("Shouldn't ever get here when PGM_DYNAMIC_RAM_ALLOC isn't defined!\n"));329 return VERR_INTERNAL_ERROR;330 #endif /* !PGM_DYNAMIC_RAM_ALLOC */331 326 } 332 327 -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r6528 r6532 763 763 #endif 764 764 765 #ifdef PGM_DYNAMIC_RAM_ALLOC766 765 if ((GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK) != ((GCPhys+cbRange-1) & PGM_DYNAMIC_CHUNK_BASE_MASK)) 767 766 { … … 770 769 return VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY; 771 770 } 772 #endif773 771 774 772 PPGMRAMRANGE pRam; -
trunk/src/recompiler/cpu-all.h
r6211 r6532 1 1 /* 2 2 * defines common to all virtual CPUs 3 * 3 * 4 4 * Copyright (c) 2003 Fabrice Bellard 5 5 * … … 33 33 #endif 34 34 35 /* some important defines: 36 * 35 /* some important defines: 36 * 37 37 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned 38 38 * memory accesses. 39 * 39 * 40 40 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and 41 41 * otherwise little endian. 42 * 42 * 43 43 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet)) 44 * 44 * 45 45 * TARGET_WORDS_BIGENDIAN : same for target cpu 46 46 */ … … 156 156 * (empty): integer access 157 157 * f : float access 158 * 158 * 159 159 * sign is: 160 160 * (empty): for floats or 32 bit size … … 167 167 * l: 32 bits 168 168 * q: 64 bits 169 * 169 * 170 170 * endian is: 171 171 * (empty): target cpu endianness or 8 bit access … … 196 196 void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val); 197 197 198 #ifdef PGM_DYNAMIC_RAM_ALLOC199 198 void remR3GrowDynRange(unsigned long physaddr); 200 #endif201 199 #if 0 /*defined(RT_ARCH_AMD64) && defined(VBOX_STRICT)*/ 202 200 # define VBOX_CHECK_ADDR(ptr) do { if ((uintptr_t)(ptr) >= _4G) __asm__("int3"); } while (0) 203 201 #else 204 202 # define VBOX_CHECK_ADDR(ptr) do { } while (0) 205 #endif 203 #endif 206 204 207 205 static inline int ldub_p(void *ptr) … … 759 757 760 758 761 #if defined(CONFIG_USER_ONLY) 759 #if defined(CONFIG_USER_ONLY) 762 760 763 761 /* if user mode, no other memory access functions */ … … 821 819 /* original state of the write flag (used when tracking self-modifying 822 820 code */ 823 #define PAGE_WRITE_ORG 0x0010 821 #define PAGE_WRITE_ORG 0x0010 824 822 825 823 void page_dump(FILE *f); … … 892 890 #endif /* SINGLE_CPU_DEFINES */ 893 891 894 void cpu_dump_state(CPUState *env, FILE *f, 892 void cpu_dump_state(CPUState *env, FILE *f, 895 893 int (*cpu_fprintf)(FILE *f, const char *fmt, ...), 896 894 int flags); … … 938 936 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr); 939 937 940 #define CPU_LOG_TB_OUT_ASM (1 << 0) 938 #define CPU_LOG_TB_OUT_ASM (1 << 0) 941 939 #define CPU_LOG_TB_IN_ASM (1 << 1) 942 940 #define CPU_LOG_TB_OP (1 << 2) … … 985 983 extern uint32_t phys_ram_dirty_size; 986 984 #endif /* VBOX */ 987 #if !defined(VBOX) || !(defined(PGM_DYNAMIC_RAM_ALLOC) || defined(REM_PHYS_ADDR_IN_TLB))985 #if !defined(VBOX) 988 986 extern uint8_t *phys_ram_base; 989 987 #endif … … 999 997 #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT) 1000 998 #define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */ 1001 #if defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC)999 #if defined(VBOX) 1002 1000 #define IO_MEM_RAM_MISSING (5 << IO_MEM_SHIFT) /* used internally, never use directly */ 1003 1001 #endif … … 1010 1008 typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr); 1011 1009 1012 void cpu_register_physical_memory(target_phys_addr_t start_addr, 1010 void cpu_register_physical_memory(target_phys_addr_t start_addr, 1013 1011 unsigned long size, 1014 1012 unsigned long phys_offset); … … 1023 1021 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 1024 1022 int len, int is_write); 1025 static inline void cpu_physical_memory_read(target_phys_addr_t addr, 1023 static inline void cpu_physical_memory_read(target_phys_addr_t addr, 1026 1024 uint8_t *buf, int len) 1027 1025 { 1028 1026 cpu_physical_memory_rw(addr, buf, len, 0); 1029 1027 } 1030 static inline void cpu_physical_memory_write(target_phys_addr_t addr, 1028 static inline void cpu_physical_memory_write(target_phys_addr_t addr, 1031 1029 const uint8_t *buf, int len) 1032 1030 { … … 1043 1041 void stq_phys(target_phys_addr_t addr, uint64_t val); 1044 1042 1045 void cpu_physical_memory_write_rom(target_phys_addr_t addr, 1043 void cpu_physical_memory_write_rom(target_phys_addr_t addr, 1046 1044 const uint8_t *buf, int len); 1047 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 1045 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 1048 1046 uint8_t *buf, int len, int is_write); 1049 1047 … … 1065 1063 } 1066 1064 1067 static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, 1065 static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, 1068 1066 int dirty_flags) 1069 1067 { … … 1082 1080 { 1083 1081 #ifdef VBOX 1084 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 1082 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 1085 1083 { 1086 1084 Log(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr)); … … 1104 1102 #if defined(__powerpc__) 1105 1103 1106 static inline uint32_t get_tbl(void) 1104 static inline uint32_t get_tbl(void) 1107 1105 { 1108 1106 uint32_t tbl; … … 1111 1109 } 1112 1110 1113 static inline uint32_t get_tbu(void) 1111 static inline uint32_t get_tbu(void) 1114 1112 { 1115 1113 uint32_t tbl; -
trunk/src/recompiler/exec-all.h
r3727 r6532 1 1 /* 2 2 * internal execution defines for qemu 3 * 3 * 4 4 * Copyright (c) 2003 Fabrice Bellard 5 5 * … … 22 22 #ifndef VBOX 23 23 #define DEBUG_DISAS 24 #endif 24 #endif 25 25 26 26 #ifdef VBOX … … 82 82 typedef void (GenOpFunc2)(long, long); 83 83 typedef void (GenOpFunc3)(long, long, long); 84 84 85 85 #if defined(TARGET_I386) 86 86 … … 97 97 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb, 98 98 int max_code_size, int *gen_code_size_ptr); 99 int cpu_restore_state(struct TranslationBlock *tb, 99 int cpu_restore_state(struct TranslationBlock *tb, 100 100 CPUState *env, unsigned long searched_pc, 101 101 void *puc); 102 102 int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb, 103 103 int max_code_size, int *gen_code_size_ptr); 104 int cpu_restore_state_copy(struct TranslationBlock *tb, 104 int cpu_restore_state_copy(struct TranslationBlock *tb, 105 105 CPUState *env, unsigned long searched_pc, 106 106 void *puc); … … 108 108 void cpu_exec_init(CPUState *env); 109 109 int page_unprotect(target_ulong address, unsigned long pc, void *puc); 110 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 110 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 111 111 int is_cpu_write_access); 112 112 void tb_invalidate_page_range(target_ulong start, target_ulong end); 113 113 void tlb_flush_page(CPUState *env, target_ulong addr); 114 114 void tlb_flush(CPUState *env, int flush_global); 115 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 116 target_phys_addr_t paddr, int prot, 115 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 116 target_phys_addr_t paddr, int prot, 117 117 int is_user, int is_softmmu); 118 static inline int tlb_set_page(CPUState *env, target_ulong vaddr, 119 target_phys_addr_t paddr, int prot, 118 static inline int tlb_set_page(CPUState *env, target_ulong vaddr, 119 target_phys_addr_t paddr, int prot, 120 120 int is_user, int is_softmmu) 121 121 { … … 167 167 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE) 168 168 169 #if defined(__powerpc__) 169 #if defined(__powerpc__) 170 170 #define USE_DIRECT_JUMP 171 171 #endif … … 194 194 uint8_t *tc_ptr; /* pointer to the translated code */ 195 195 /* next matching tb for physical address. */ 196 struct TranslationBlock *phys_hash_next; 196 struct TranslationBlock *phys_hash_next; 197 197 /* first and second physical page containing code. The lower bit 198 198 of the pointer tells the index in page_next[] */ 199 struct TranslationBlock *page_next[2]; 200 target_ulong page_addr[2]; 199 struct TranslationBlock *page_next[2]; 200 target_ulong page_addr[2]; 201 201 202 202 /* the following data are used to directly call another TB from … … 215 215 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 = 216 216 jmp_first */ 217 struct TranslationBlock *jmp_next[2]; 217 struct TranslationBlock *jmp_next[2]; 218 218 struct TranslationBlock *jmp_first; 219 219 } TranslationBlock; … … 241 241 TranslationBlock *tb_alloc(target_ulong pc); 242 242 void tb_flush(CPUState *env); 243 void tb_link_phys(TranslationBlock *tb, 243 void tb_link_phys(TranslationBlock *tb, 244 244 target_ulong phys_pc, target_ulong phys_page2); 245 245 … … 277 277 #endif 278 278 279 static inline void tb_set_jmp_target(TranslationBlock *tb, 279 static inline void tb_set_jmp_target(TranslationBlock *tb, 280 280 int n, unsigned long addr) 281 281 { … … 292 292 293 293 /* set the jump target */ 294 static inline void tb_set_jmp_target(TranslationBlock *tb, 294 static inline void tb_set_jmp_target(TranslationBlock *tb, 295 295 int n, unsigned long addr) 296 296 { … … 300 300 #endif 301 301 302 static inline void tb_add_jump(TranslationBlock *tb, int n, 302 static inline void tb_add_jump(TranslationBlock *tb, int n, 303 303 TranslationBlock *tb_next) 304 304 { … … 307 307 /* patch the native jump address */ 308 308 tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr); 309 309 310 310 /* add in TB jmp circular list */ 311 311 tb->jmp_next[n] = tb_next->jmp_first; … … 388 388 dummy_label ## n: ;\ 389 389 } while (0) 390 # endif 390 # endif 391 391 # else /* !VBOX */ 392 392 #define GOTO_TB(opname, tbparam, n)\ … … 429 429 { 430 430 long int readval = 0; 431 431 432 432 __asm__ __volatile__ ("lock; cmpxchgl %2, %0" 433 433 : "+m" (*p), "+a" (readval) … … 442 442 { 443 443 long int readval = 0; 444 444 445 445 __asm__ __volatile__ ("lock; cmpxchgl %2, %0" 446 446 : "+m" (*p), "+a" (readval) … … 459 459 " jl 0b" 460 460 : "=&d" (ret) 461 : "r" (1), "a" (p), "0" (*p) 461 : "r" (1), "a" (p), "0" (*p) 462 462 : "cc", "memory" ); 463 463 return ret; … … 505 505 : "=r"(ret) 506 506 : "0"(1), "r"(spinlock)); 507 507 508 508 return ret; 509 509 } … … 571 571 #if !defined(CONFIG_USER_ONLY) 572 572 573 void tlb_fill(target_ulong addr, int is_write, int is_user, 573 void tlb_fill(target_ulong addr, int is_write, int is_user, 574 574 void *retaddr); 575 575 … … 604 604 # ifdef VBOX 605 605 target_ulong remR3PhysGetPhysicalAddressCode(CPUState *env, target_ulong addr, CPUTLBEntry *pTLBEntry); 606 # if defined(PGM_DYNAMIC_RAM_ALLOC) &&!defined(REM_PHYS_ADDR_IN_TLB)606 # if !defined(REM_PHYS_ADDR_IN_TLB) 607 607 target_ulong remR3HCVirt2GCPhys(void *env, void *addr); 608 608 # endif … … 631 631 #error unimplemented CPU 632 632 #endif 633 if (__builtin_expect(env->tlb_table[is_user][index].addr_code != 633 if (__builtin_expect(env->tlb_table[is_user][index].addr_code != 634 634 (addr & TARGET_PAGE_MASK), 0)) { 635 635 ldub_code(addr); … … 646 646 # if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 647 647 return addr + env->tlb_table[is_user][index].addend; 648 # elif defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC)648 # elif defined(VBOX) 649 649 return remR3HCVirt2GCPhys(env, (void *)(addr + env->tlb_table[is_user][index].addend)); 650 650 # else … … 670 670 { 671 671 return(env->kqemu_enabled && 672 (env->cr[0] & CR0_PE_MASK) && 672 (env->cr[0] & CR0_PE_MASK) && 673 673 !(env->hflags & HF_INHIBIT_IRQ_MASK) && 674 674 (env->eflags & IF_MASK) && 675 675 !(env->eflags & VM_MASK) && 676 (env->kqemu_enabled == 2 || 676 (env->kqemu_enabled == 2 || 677 677 ((env->hflags & HF_CPL_MASK) == 3 && 678 678 (env->eflags & IOPL_MASK) != IOPL_MASK))); -
trunk/src/recompiler/exec.c
r4376 r6532 1 1 /* 2 2 * virtual page mapping and translated block handling 3 * 3 * 4 4 * Copyright (c) 2003 Fabrice Bellard 5 5 * … … 55 55 56 56 /* make various TB consistency checks */ 57 //#define DEBUG_TB_CHECK 58 //#define DEBUG_TLB_CHECK 57 //#define DEBUG_TB_CHECK 58 //#define DEBUG_TLB_CHECK 59 59 60 60 #if !defined(CONFIG_USER_ONLY) … … 86 86 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; 87 87 88 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] 88 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] 89 89 #if defined(__MINGW32__) 90 90 __attribute__((aligned (16))); 91 91 #else 92 92 __attribute__((aligned (32))); 93 #endif 93 #endif 94 94 uint8_t *code_gen_ptr; 95 95 … … 100 100 #else /* VBOX */ 101 101 RTGCPHYS phys_ram_size; 102 /* we have memory ranges (the high PC-BIOS mapping) which 102 /* we have memory ranges (the high PC-BIOS mapping) which 103 103 causes some pages to fall outside the dirty map here. */ 104 104 uint32_t phys_ram_dirty_size; 105 105 #endif /* VBOX */ 106 #if !defined(VBOX) || !(defined(PGM_DYNAMIC_RAM_ALLOC) || defined(REM_PHYS_ADDR_IN_TLB))106 #if !defined(VBOX) 107 107 uint8_t *phys_ram_base; 108 108 #endif … … 112 112 /* current CPU in the current thread. It is only valid inside 113 113 cpu_exec() */ 114 CPUState *cpu_single_env; 114 CPUState *cpu_single_env; 115 115 116 116 typedef struct PageDesc { … … 181 181 SYSTEM_INFO system_info; 182 182 DWORD old_protect; 183 183 184 184 GetSystemInfo(&system_info); 185 185 qemu_real_host_page_size = system_info.dwPageSize; 186 186 187 187 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer), 188 188 PAGE_EXECUTE_READWRITE, &old_protect); … … 195 195 start = (unsigned long)code_gen_buffer; 196 196 start &= ~(qemu_real_host_page_size - 1); 197 197 198 198 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer); 199 199 end += qemu_real_host_page_size - 1; 200 200 end &= ~(qemu_real_host_page_size - 1); 201 202 mprotect((void *)start, end - start, 201 202 mprotect((void *)start, end - start, 203 203 PROT_READ | PROT_WRITE | PROT_EXEC); 204 204 } … … 277 277 pd[i].phys_offset = IO_MEM_UNASSIGNED; 278 278 } 279 #if defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC)279 #if defined(VBOX) 280 280 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); 281 281 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)) … … 294 294 #if !defined(CONFIG_USER_ONLY) 295 295 static void tlb_protect_code(ram_addr_t ram_addr); 296 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 296 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 297 297 target_ulong vaddr); 298 298 #endif … … 352 352 CPUState *env; 353 353 #if defined(DEBUG_FLUSH) 354 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 355 code_gen_ptr - code_gen_buffer, 356 nb_tbs, 354 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 355 code_gen_ptr - code_gen_buffer, 356 nb_tbs, 357 357 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0); 358 358 #endif 359 359 nb_tbs = 0; 360 360 361 361 for(env = first_cpu; env != NULL; env = env->next_cpu) { 362 362 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); … … 395 395 TranslationBlock *tb; 396 396 int i, flags1, flags2; 397 397 398 398 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { 399 399 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { … … 504 504 target_ulong phys_pc; 505 505 TranslationBlock *tb1, *tb2; 506 506 507 507 /* remove the TB from the hash list */ 508 508 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 509 509 h = tb_phys_hash_func(phys_pc); 510 tb_remove(&tb_phys_hash[h], tb, 510 tb_remove(&tb_phys_hash[h], tb, 511 511 offsetof(TranslationBlock, phys_hash_next)); 512 512 … … 570 570 pc = cs_base + eip; 571 571 572 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base, 572 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base, 573 573 flags); 574 574 575 if(tb) 575 if(tb) 576 576 { 577 577 # ifdef DEBUG … … 631 631 int n, tb_start, tb_end; 632 632 TranslationBlock *tb; 633 633 634 634 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8); 635 635 if (!p->code_bitmap) … … 660 660 #ifdef TARGET_HAS_PRECISE_SMC 661 661 662 static void tb_gen_code(CPUState *env, 662 static void tb_gen_code(CPUState *env, 663 663 target_ulong pc, target_ulong cs_base, int flags, 664 664 int cflags) … … 684 684 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); 685 685 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); 686 686 687 687 /* check next page if needed */ 688 688 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; … … 694 694 } 695 695 #endif 696 696 697 697 /* invalidate all TBs which intersect with the target physical page 698 698 starting in range [start;end[. NOTE: start and end must refer to … … 700 700 from a real cpu write access: the virtual CPU will exit the current 701 701 TB if code is modified inside this TB. */ 702 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 702 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 703 703 int is_cpu_write_access) 704 704 { … … 711 711 712 712 p = page_find(start >> TARGET_PAGE_BITS); 713 if (!p) 713 if (!p) 714 714 return; 715 if (!p->code_bitmap && 715 if (!p->code_bitmap && 716 716 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && 717 717 is_cpu_write_access) { … … 760 760 would require a specialized function to partially 761 761 restore the CPU state */ 762 762 763 763 current_tb_modified = 1; 764 cpu_restore_state(current_tb, env, 764 cpu_restore_state(current_tb, env, 765 765 env->mem_write_pc, NULL); 766 766 #if defined(TARGET_I386) … … 805 805 itself */ 806 806 env->current_tb = NULL; 807 tb_gen_code(env, current_pc, current_cs_base, current_flags, 807 tb_gen_code(env, current_pc, current_cs_base, current_flags, 808 808 CF_SINGLE_INSN); 809 809 cpu_resume_from_signal(env, NULL); … … 820 820 if (1) { 821 821 if (loglevel) { 822 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 823 cpu_single_env->mem_write_vaddr, len, 824 cpu_single_env->eip, 822 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 823 cpu_single_env->mem_write_vaddr, len, 824 cpu_single_env->eip, 825 825 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); 826 826 } … … 828 828 #endif 829 829 p = page_find(start >> TARGET_PAGE_BITS); 830 if (!p) 830 if (!p) 831 831 return; 832 832 if (p->code_bitmap) { … … 842 842 843 843 #if !defined(CONFIG_SOFTMMU) 844 static void tb_invalidate_phys_page(target_ulong addr, 844 static void tb_invalidate_phys_page(target_ulong addr, 845 845 unsigned long pc, void *puc) 846 846 { … … 855 855 addr &= TARGET_PAGE_MASK; 856 856 p = page_find(addr >> TARGET_PAGE_BITS); 857 if (!p) 857 if (!p) 858 858 return; 859 859 tb = p->first_tb; … … 879 879 would require a specialized function to partially 880 880 restore the CPU state */ 881 881 882 882 current_tb_modified = 1; 883 883 cpu_restore_state(current_tb, env, pc, puc); … … 902 902 itself */ 903 903 env->current_tb = NULL; 904 tb_gen_code(env, current_pc, current_cs_base, current_flags, 904 tb_gen_code(env, current_pc, current_cs_base, current_flags, 905 905 CF_SINGLE_INSN); 906 906 cpu_resume_from_signal(env, puc); … … 911 911 912 912 /* add the tb in the target page and protect it if necessary */ 913 static inline void tb_alloc_page(TranslationBlock *tb, 913 static inline void tb_alloc_page(TranslationBlock *tb, 914 914 unsigned int n, target_ulong page_addr) 915 915 { … … 946 946 page_get_flags(addr); 947 947 } 948 mprotect(g2h(page_addr), qemu_host_page_size, 948 mprotect(g2h(page_addr), qemu_host_page_size, 949 949 (prot & PAGE_BITS) & ~PAGE_WRITE); 950 950 #ifdef DEBUG_TB_INVALIDATE 951 printf("protecting code page: 0x%08lx\n", 951 printf("protecting code page: 0x%08lx\n", 952 952 page_addr); 953 953 #endif … … 971 971 TranslationBlock *tb; 972 972 973 if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 973 if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 974 974 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE) 975 975 return NULL; … … 982 982 /* add a new TB and link it to the physical page tables. phys_page2 is 983 983 (-1) to indicate that only one page contains the TB. */ 984 void tb_link_phys(TranslationBlock *tb, 984 void tb_link_phys(TranslationBlock *tb, 985 985 target_ulong phys_pc, target_ulong phys_page2) 986 986 { … … 1048 1048 m_min = m + 1; 1049 1049 } 1050 } 1050 } 1051 1051 return &tbs[m_max]; 1052 1052 } … … 1084 1084 *ptb = tb->jmp_next[n]; 1085 1085 tb->jmp_next[n] = NULL; 1086 1086 1087 1087 /* suppress the jump to next tb in generated code */ 1088 1088 tb_reset_jump(tb, n); … … 1124 1124 #if defined(TARGET_HAS_ICE) 1125 1125 int i; 1126 1126 1127 1127 for(i = 0; i < env->nb_breakpoints; i++) { 1128 1128 if (env->breakpoints[i] == pc) … … 1133 1133 return -1; 1134 1134 env->breakpoints[env->nb_breakpoints++] = pc; 1135 1135 1136 1136 breakpoint_invalidate(env, pc); 1137 1137 return 0; … … 1243 1243 #ifndef VBOX 1244 1244 CPULogItem cpu_log_items[] = { 1245 { CPU_LOG_TB_OUT_ASM, "out_asm", 1245 { CPU_LOG_TB_OUT_ASM, "out_asm", 1246 1246 "show generated host assembly code for each compiled TB" }, 1247 1247 { CPU_LOG_TB_IN_ASM, "in_asm", 1248 1248 "show target assembly code for each compiled TB" }, 1249 { CPU_LOG_TB_OP, "op", 1249 { CPU_LOG_TB_OP, "op", 1250 1250 "show micro ops for each compiled TB (only usable if 'in_asm' used)" }, 1251 1251 #ifdef TARGET_I386 … … 1276 1276 return memcmp(s1, s2, n) == 0; 1277 1277 } 1278 1278 1279 1279 /* takes a comma separated list of log masks. Return 0 if error. */ 1280 1280 int cpu_str_to_log_mask(const char *str) … … 1373 1373 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) 1374 1374 { 1375 if (addr == (tlb_entry->addr_read & 1375 if (addr == (tlb_entry->addr_read & 1376 1376 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || 1377 addr == (tlb_entry->addr_write & 1377 addr == (tlb_entry->addr_write & 1378 1378 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || 1379 addr == (tlb_entry->addr_code & 1379 addr == (tlb_entry->addr_code & 1380 1380 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 1381 1381 tlb_entry->addr_read = -1; … … 1429 1429 static void tlb_protect_code(ram_addr_t ram_addr) 1430 1430 { 1431 cpu_physical_memory_reset_dirty(ram_addr, 1431 cpu_physical_memory_reset_dirty(ram_addr, 1432 1432 ram_addr + TARGET_PAGE_SIZE, 1433 1433 CODE_DIRTY_FLAG); … … 1440 1440 /* update the TLB so that writes in physical page 'phys_addr' are no longer 1441 1441 tested for self modifying code */ 1442 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 1442 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 1443 1443 target_ulong vaddr) 1444 1444 { 1445 1445 #ifdef VBOX 1446 1446 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 1447 #endif 1447 #endif 1448 1448 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG; 1449 1449 } 1450 1450 1451 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 1451 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 1452 1452 unsigned long start, unsigned long length) 1453 1453 { … … 1492 1492 #ifdef VBOX 1493 1493 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 1494 #endif 1494 #endif 1495 1495 for(i = 0; i < len; i++) 1496 1496 p[i] &= mask; … … 1500 1500 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 1501 1501 start1 = start; 1502 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)1502 #elif !defined(VBOX) 1503 1503 start1 = start + (unsigned long)phys_ram_base; 1504 1504 #else … … 1531 1531 (p->prot & PROT_WRITE)) { 1532 1532 if (addr < MMAP_AREA_END) { 1533 mprotect((void *)addr, TARGET_PAGE_SIZE, 1533 mprotect((void *)addr, TARGET_PAGE_SIZE, 1534 1534 p->prot & ~PROT_WRITE); 1535 1535 } … … 1552 1552 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 1553 1553 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; 1554 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)1555 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 1554 #elif !defined(VBOX) 1555 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 1556 1556 tlb_entry->addend - (unsigned long)phys_ram_base; 1557 1557 #else … … 1574 1574 } 1575 1575 1576 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 1576 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 1577 1577 unsigned long start) 1578 1578 { … … 1603 1603 (can only happen in non SOFTMMU mode for I/O pages or pages 1604 1604 conflicting with the host address space). */ 1605 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 1606 target_phys_addr_t paddr, int prot, 1605 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 1606 target_phys_addr_t paddr, int prot, 1607 1607 int is_user, int is_softmmu) 1608 1608 { … … 1628 1628 ret = 0; 1629 1629 #if !defined(CONFIG_SOFTMMU) 1630 if (is_softmmu) 1630 if (is_softmmu) 1631 1631 #endif 1632 1632 { … … 1640 1640 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 1641 1641 addend = pd & TARGET_PAGE_MASK; 1642 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)1642 #elif !defined(VBOX) 1643 1643 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); 1644 1644 #else 1645 1645 addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK); 1646 #endif 1647 } 1648 1646 #endif 1647 } 1648 1649 1649 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 1650 1650 addend -= vaddr; … … 1662 1662 } 1663 1663 if (prot & PAGE_WRITE) { 1664 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 1664 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 1665 1665 (pd & IO_MEM_ROMD)) { 1666 1666 /* write access calls the I/O callback */ 1667 te->addr_write = vaddr | 1667 te->addr_write = vaddr | 1668 1668 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD)); 1669 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 1669 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 1670 1670 !cpu_physical_memory_is_dirty(pd)) { 1671 1671 te->addr_write = vaddr | IO_MEM_NOTDIRTY; … … 1695 1695 } else { 1696 1696 if (prot & PROT_WRITE) { 1697 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 1697 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 1698 1698 #if defined(TARGET_HAS_SMC) || 1 1699 1699 first_tb || 1700 1700 #endif 1701 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 1701 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 1702 1702 !cpu_physical_memory_is_dirty(pd))) { 1703 1703 /* ROM: we do as if code was inside */ … … 1705 1705 original mapping */ 1706 1706 VirtPageDesc *vp; 1707 1707 1708 1708 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1); 1709 1709 vp->phys_addr = pd; … … 1713 1713 } 1714 1714 } 1715 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 1715 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 1716 1716 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK)); 1717 1717 if (map_addr == MAP_FAILED) { … … 1751 1751 return 0; 1752 1752 #if defined(DEBUG_TLB) 1753 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 1753 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 1754 1754 addr, vp->phys_addr, vp->prot); 1755 1755 #endif … … 1784 1784 } 1785 1785 1786 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 1787 target_phys_addr_t paddr, int prot, 1786 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 1787 target_phys_addr_t paddr, int prot, 1788 1788 int is_user, int is_softmmu) 1789 1789 { … … 1818 1818 if (start != -1) { 1819 1819 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", 1820 start, end, end - start, 1820 start, end, end - start, 1821 1821 prot & PAGE_READ ? 'r' : '-', 1822 1822 prot & PAGE_WRITE ? 'w' : '-', … … 1866 1866 /* if the write protection is set, then we invalidate the code 1867 1867 inside */ 1868 if (!(p->flags & PAGE_WRITE) && 1868 if (!(p->flags & PAGE_WRITE) && 1869 1869 (flags & PAGE_WRITE) && 1870 1870 p->first_tb) { … … 1901 1901 pindex = (address - host_start) >> TARGET_PAGE_BITS; 1902 1902 if (!(p1[pindex].flags & PAGE_WRITE)) { 1903 mprotect((void *)g2h(host_start), qemu_host_page_size, 1903 mprotect((void *)g2h(host_start), qemu_host_page_size, 1904 1904 (prot & PAGE_BITS) | PAGE_WRITE); 1905 1905 p1[pindex].flags |= PAGE_WRITE; … … 1940 1940 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an 1941 1941 io memory page */ 1942 void cpu_register_physical_memory(target_phys_addr_t start_addr, 1942 void cpu_register_physical_memory(target_phys_addr_t start_addr, 1943 1943 unsigned long size, 1944 1944 unsigned long phys_offset) … … 1953 1953 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 1954 1954 p->phys_offset = phys_offset; 1955 #if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)1955 #if !defined(VBOX) 1956 1956 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || 1957 1957 (phys_offset & IO_MEM_ROMD)) … … 1964 1964 phys_offset += TARGET_PAGE_SIZE; 1965 1965 } 1966 1966 1967 1967 /* since each CPU stores ram addresses in its TLB cache, we must 1968 1968 reset the modified entries */ … … 2017 2017 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 2018 2018 ram_addr = addr; 2019 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)2019 #elif !defined(VBOX) 2020 2020 ram_addr = addr - (unsigned long)phys_ram_base; 2021 2021 #else … … 2062 2062 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 2063 2063 ram_addr = addr; 2064 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)2064 #elif !defined(VBOX) 2065 2065 ram_addr = addr - (unsigned long)phys_ram_base; 2066 2066 #else … … 2107 2107 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 2108 2108 ram_addr = addr; 2109 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)2109 #elif !defined(VBOX) 2110 2110 ram_addr = addr - (unsigned long)phys_ram_base; 2111 2111 #else … … 2163 2163 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); 2164 2164 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL); 2165 #if defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC)2165 #if defined(VBOX) 2166 2166 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); 2167 2167 io_mem_nb = 6; … … 2219 2219 /* physical memory access (slow version, mainly for debug) */ 2220 2220 #if defined(CONFIG_USER_ONLY) 2221 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 2221 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 2222 2222 int len, int is_write) 2223 2223 { … … 2254 2254 2255 2255 #else 2256 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 2256 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 2257 2257 int len, int is_write) 2258 2258 { … … 2263 2263 unsigned long pd; 2264 2264 PhysPageDesc *p; 2265 2265 2266 2266 while (len > 0) { 2267 2267 page = addr & TARGET_PAGE_MASK; … … 2275 2275 pd = p->phys_offset; 2276 2276 } 2277 2277 2278 2278 if (is_write) { 2279 2279 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { … … 2287 2287 #else 2288 2288 val = *(const uint32_t *)buf; 2289 #endif 2289 #endif 2290 2290 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 2291 2291 l = 4; … … 2296 2296 #else 2297 2297 val = *(const uint16_t *)buf; 2298 #endif 2298 #endif 2299 2299 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); 2300 2300 l = 2; … … 2305 2305 #else 2306 2306 val = *(const uint8_t *)buf; 2307 #endif 2307 #endif 2308 2308 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); 2309 2309 l = 1; … … 2331 2331 } 2332 2332 } else { 2333 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 2333 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 2334 2334 !(pd & IO_MEM_ROMD)) { 2335 2335 /* I/O case */ … … 2342 2342 #else 2343 2343 *(uint32_t *)buf = val; 2344 #endif 2344 #endif 2345 2345 l = 4; 2346 2346 } else if (l >= 2 && ((addr & 1) == 0)) { … … 2351 2351 #else 2352 2352 *(uint16_t *)buf = val; 2353 #endif 2353 #endif 2354 2354 l = 2; 2355 2355 } else { … … 2360 2360 #else 2361 2361 *(uint8_t *)buf = val; 2362 #endif 2362 #endif 2363 2363 l = 1; 2364 2364 } … … 2368 2368 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr); 2369 2369 #else 2370 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 2370 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 2371 2371 (addr & ~TARGET_PAGE_MASK); 2372 2372 memcpy(buf, ptr, l); … … 2382 2382 #ifndef VBOX 2383 2383 /* used for ROM loading : can write in RAM and ROM */ 2384 void cpu_physical_memory_write_rom(target_phys_addr_t addr, 2384 void cpu_physical_memory_write_rom(target_phys_addr_t addr, 2385 2385 const uint8_t *buf, int len) 2386 2386 { … … 2390 2390 unsigned long pd; 2391 2391 PhysPageDesc *p; 2392 2392 2393 2393 while (len > 0) { 2394 2394 page = addr & TARGET_PAGE_MASK; … … 2402 2402 pd = p->phys_offset; 2403 2403 } 2404 2404 2405 2405 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM && 2406 2406 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM && … … 2437 2437 pd = p->phys_offset; 2438 2438 } 2439 2440 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 2439 2440 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 2441 2441 !(pd & IO_MEM_ROMD)) { 2442 2442 /* I/O case */ … … 2446 2446 /* RAM case */ 2447 2447 #ifndef VBOX 2448 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 2448 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 2449 2449 (addr & ~TARGET_PAGE_MASK); 2450 2450 val = ldl_p(ptr); … … 2471 2471 pd = p->phys_offset; 2472 2472 } 2473 2473 2474 2474 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 2475 2475 !(pd & IO_MEM_ROMD)) { … … 2486 2486 /* RAM case */ 2487 2487 #ifndef VBOX 2488 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 2488 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 2489 2489 (addr & ~TARGET_PAGE_MASK); 2490 2490 val = ldq_p(ptr); … … 2528 2528 pd = p->phys_offset; 2529 2529 } 2530 2530 2531 2531 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 2532 2532 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); … … 2534 2534 } else { 2535 2535 #ifndef VBOX 2536 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 2536 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 2537 2537 (addr & ~TARGET_PAGE_MASK); 2538 2538 stl_p(ptr, val); … … 2557 2557 pd = p->phys_offset; 2558 2558 } 2559 2559 2560 2560 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 2561 2561 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); … … 2609 2609 #ifndef VBOX 2610 2610 /* virtual memory access for debug */ 2611 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 2611 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 2612 2612 uint8_t *buf, int len, int is_write) 2613 2613 { … … 2624 2624 if (l > len) 2625 2625 l = len; 2626 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 2626 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 2627 2627 buf, l, is_write); 2628 2628 len -= l; … … 2639 2639 int direct_jmp_count, direct_jmp2_count, cross_page; 2640 2640 TranslationBlock *tb; 2641 2641 2642 2642 target_code_size = 0; 2643 2643 max_target_code_size = 0; … … 2661 2661 /* XXX: avoid using doubles ? */ 2662 2662 cpu_fprintf(f, "TB count %d\n", nb_tbs); 2663 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", 2663 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", 2664 2664 nb_tbs ? target_code_size / nb_tbs : 0, 2665 2665 max_target_code_size); 2666 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n", 2666 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n", 2667 2667 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0, 2668 2668 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0); 2669 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 2670 cross_page, 2669 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 2670 cross_page, 2671 2671 nb_tbs ? (cross_page * 100) / nb_tbs : 0); 2672 2672 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", 2673 direct_jmp_count, 2673 direct_jmp_count, 2674 2674 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0, 2675 2675 direct_jmp2_count, … … 2681 2681 #endif /* !VBOX */ 2682 2682 2683 #if !defined(CONFIG_USER_ONLY) 2683 #if !defined(CONFIG_USER_ONLY) 2684 2684 2685 2685 #define MMUSUFFIX _cmmu
Note:
See TracChangeset
for help on using the changeset viewer.