VirtualBox

Changeset 6532 in vbox


Ignore:
Timestamp:
Jan 28, 2008 6:10:09 PM (17 years ago)
Author:
vboxsync
Message:

Cleaned out the PGM_DYNAMIC_RAM_ALLOC tests to avoid unnecessary mess with VBOX_WITH_NEW_PHYS_CODE.

Location:
trunk
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/pgm.h

    r5999 r6532  
    3838 * @{
    3939 */
    40 
    41 /** Enable dynamic allocation of guest physical RAM. */
    42 #define PGM_DYNAMIC_RAM_ALLOC
    4340
    4441/** Chunk size for dynamically allocated physical memory. */
  • trunk/src/VBox/VMM/MM.cpp

    r6529 r6532  
    209209            return VINF_SUCCESS;
    210210        }
    211 #ifdef PGM_DYNAMIC_RAM_ALLOC
    212211        Log(("MM: %llu bytes of RAM%s\n", cbRam, fPreAlloc ? " (PreAlloc)" : ""));
    213212        pVM->mm.s.pvRamBaseHC = 0; /** @todo obsolete */
     
    233232            }
    234233        }
    235 #else
    236         unsigned    cPages = cbRam >> PAGE_SHIFT;
    237         Log(("MM: %llu bytes of RAM (%d pages)\n", cbRam, cPages));
    238         rc = SUPPageAlloc(cPages, &pVM->mm.s.pvRamBaseHC);
    239         if (VBOX_SUCCESS(rc))
    240         {
    241             pVM->mm.s.cbRamBase = cPages << PAGE_SHIFT;
    242             rc = MMR3PhysRegister(pVM, pVM->mm.s.pvRamBaseHC, 0, pVM->mm.s.cbRamBase, 0, "Main Memory");
    243             if (VBOX_SUCCESS(rc))
    244                 return rc;
    245             SUPPageFree(pVM->mm.s.pvRamBaseHC);
    246         }
    247         else
    248             LogRel(("MMR3InitPage: Failed to allocate %u bytes of RAM! rc=%Vrc\n", cPages << PAGE_SHIFT));
    249 #endif
    250234    }
    251235    else
  • trunk/src/VBox/VMM/PGMPhys.cpp

    r6528 r6532  
    268268PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
    269269{
    270 #ifdef PGM_DYNAMIC_RAM_ALLOC
    271270    NOREF(pszDesc);
    272271
     
    325324
    326325    return VINF_SUCCESS;
    327 #else /* !PGM_DYNAMIC_RAM_ALLOC */
    328     AssertReleaseMsgFailed(("Shouldn't ever get here when PGM_DYNAMIC_RAM_ALLOC isn't defined!\n"));
    329     return VERR_INTERNAL_ERROR;
    330 #endif /* !PGM_DYNAMIC_RAM_ALLOC */
    331326}
    332327
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r6528 r6532  
    763763#endif
    764764
    765 #ifdef PGM_DYNAMIC_RAM_ALLOC
    766765    if ((GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK) != ((GCPhys+cbRange-1) & PGM_DYNAMIC_CHUNK_BASE_MASK))
    767766    {
     
    770769        return VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY;
    771770    }
    772 #endif
    773771
    774772    PPGMRAMRANGE pRam;
  • trunk/src/recompiler/cpu-all.h

    r6211 r6532  
    11/*
    22 * defines common to all virtual CPUs
    3  * 
     3 *
    44 *  Copyright (c) 2003 Fabrice Bellard
    55 *
     
    3333#endif
    3434
    35 /* some important defines: 
    36  * 
     35/* some important defines:
     36 *
    3737 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
    3838 * memory accesses.
    39  * 
     39 *
    4040 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
    4141 * otherwise little endian.
    42  * 
     42 *
    4343 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
    44  * 
     44 *
    4545 * TARGET_WORDS_BIGENDIAN : same for target cpu
    4646 */
     
    156156 * (empty): integer access
    157157 *   f    : float access
    158  * 
     158 *
    159159 * sign is:
    160160 * (empty): for floats or 32 bit size
     
    167167 *   l: 32 bits
    168168 *   q: 64 bits
    169  * 
     169 *
    170170 * endian is:
    171171 * (empty): target cpu endianness or 8 bit access
     
    196196void     remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val);
    197197
    198 #ifdef PGM_DYNAMIC_RAM_ALLOC
    199198void     remR3GrowDynRange(unsigned long physaddr);
    200 #endif
    201199#if 0 /*defined(RT_ARCH_AMD64) && defined(VBOX_STRICT)*/
    202200# define VBOX_CHECK_ADDR(ptr) do { if ((uintptr_t)(ptr) >= _4G) __asm__("int3"); } while (0)
    203201#else
    204202# define VBOX_CHECK_ADDR(ptr) do { } while (0)
    205 #endif 
     203#endif
    206204
    207205static inline int ldub_p(void *ptr)
     
    759757
    760758
    761 #if defined(CONFIG_USER_ONLY) 
     759#if defined(CONFIG_USER_ONLY)
    762760
    763761/* if user mode, no other memory access functions */
     
    821819/* original state of the write flag (used when tracking self-modifying
    822820   code */
    823 #define PAGE_WRITE_ORG 0x0010 
     821#define PAGE_WRITE_ORG 0x0010
    824822
    825823void page_dump(FILE *f);
     
    892890#endif /* SINGLE_CPU_DEFINES */
    893891
    894 void cpu_dump_state(CPUState *env, FILE *f, 
     892void cpu_dump_state(CPUState *env, FILE *f,
    895893                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
    896894                    int flags);
     
    938936target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
    939937
    940 #define CPU_LOG_TB_OUT_ASM (1 << 0) 
     938#define CPU_LOG_TB_OUT_ASM (1 << 0)
    941939#define CPU_LOG_TB_IN_ASM  (1 << 1)
    942940#define CPU_LOG_TB_OP      (1 << 2)
     
    985983extern uint32_t phys_ram_dirty_size;
    986984#endif /* VBOX */
    987 #if !defined(VBOX) || !(defined(PGM_DYNAMIC_RAM_ALLOC) || defined(REM_PHYS_ADDR_IN_TLB))
     985#if !defined(VBOX)
    988986extern uint8_t *phys_ram_base;
    989987#endif
     
    999997#define IO_MEM_UNASSIGNED  (2 << IO_MEM_SHIFT)
    1000998#define IO_MEM_NOTDIRTY    (4 << IO_MEM_SHIFT) /* used internally, never use directly */
    1001 #if defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC)
     999#if defined(VBOX)
    10021000#define IO_MEM_RAM_MISSING (5 << IO_MEM_SHIFT) /* used internally, never use directly */
    10031001#endif
     
    10101008typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
    10111009
    1012 void cpu_register_physical_memory(target_phys_addr_t start_addr, 
     1010void cpu_register_physical_memory(target_phys_addr_t start_addr,
    10131011                                  unsigned long size,
    10141012                                  unsigned long phys_offset);
     
    10231021void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
    10241022                            int len, int is_write);
    1025 static inline void cpu_physical_memory_read(target_phys_addr_t addr, 
     1023static inline void cpu_physical_memory_read(target_phys_addr_t addr,
    10261024                                            uint8_t *buf, int len)
    10271025{
    10281026    cpu_physical_memory_rw(addr, buf, len, 0);
    10291027}
    1030 static inline void cpu_physical_memory_write(target_phys_addr_t addr, 
     1028static inline void cpu_physical_memory_write(target_phys_addr_t addr,
    10311029                                             const uint8_t *buf, int len)
    10321030{
     
    10431041void stq_phys(target_phys_addr_t addr, uint64_t val);
    10441042
    1045 void cpu_physical_memory_write_rom(target_phys_addr_t addr, 
     1043void cpu_physical_memory_write_rom(target_phys_addr_t addr,
    10461044                                   const uint8_t *buf, int len);
    1047 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
     1045int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
    10481046                        uint8_t *buf, int len, int is_write);
    10491047
     
    10651063}
    10661064
    1067 static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, 
     1065static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
    10681066                                                int dirty_flags)
    10691067{
     
    10821080{
    10831081#ifdef VBOX
    1084     if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 
     1082    if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
    10851083    {
    10861084        Log(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr));
     
    11041102#if defined(__powerpc__)
    11051103
    1106 static inline uint32_t get_tbl(void) 
     1104static inline uint32_t get_tbl(void)
    11071105{
    11081106    uint32_t tbl;
     
    11111109}
    11121110
    1113 static inline uint32_t get_tbu(void) 
     1111static inline uint32_t get_tbu(void)
    11141112{
    11151113        uint32_t tbl;
  • trunk/src/recompiler/exec-all.h

    r3727 r6532  
    11/*
    22 * internal execution defines for qemu
    3  * 
     3 *
    44 *  Copyright (c) 2003 Fabrice Bellard
    55 *
     
    2222#ifndef VBOX
    2323#define DEBUG_DISAS
    24 #endif 
     24#endif
    2525
    2626#ifdef VBOX
     
    8282typedef void (GenOpFunc2)(long, long);
    8383typedef void (GenOpFunc3)(long, long, long);
    84                    
     84
    8585#if defined(TARGET_I386)
    8686
     
    9797int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
    9898                 int max_code_size, int *gen_code_size_ptr);
    99 int cpu_restore_state(struct TranslationBlock *tb, 
     99int cpu_restore_state(struct TranslationBlock *tb,
    100100                      CPUState *env, unsigned long searched_pc,
    101101                      void *puc);
    102102int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb,
    103103                      int max_code_size, int *gen_code_size_ptr);
    104 int cpu_restore_state_copy(struct TranslationBlock *tb, 
     104int cpu_restore_state_copy(struct TranslationBlock *tb,
    105105                           CPUState *env, unsigned long searched_pc,
    106106                           void *puc);
     
    108108void cpu_exec_init(CPUState *env);
    109109int page_unprotect(target_ulong address, unsigned long pc, void *puc);
    110 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
     110void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
    111111                                   int is_cpu_write_access);
    112112void tb_invalidate_page_range(target_ulong start, target_ulong end);
    113113void tlb_flush_page(CPUState *env, target_ulong addr);
    114114void tlb_flush(CPUState *env, int flush_global);
    115 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
    116                       target_phys_addr_t paddr, int prot, 
     115int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
     116                      target_phys_addr_t paddr, int prot,
    117117                      int is_user, int is_softmmu);
    118 static inline int tlb_set_page(CPUState *env, target_ulong vaddr, 
    119                                target_phys_addr_t paddr, int prot, 
     118static inline int tlb_set_page(CPUState *env, target_ulong vaddr,
     119                               target_phys_addr_t paddr, int prot,
    120120                               int is_user, int is_softmmu)
    121121{
     
    167167#define CODE_GEN_MAX_BLOCKS    (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)
    168168
    169 #if defined(__powerpc__) 
     169#if defined(__powerpc__)
    170170#define USE_DIRECT_JUMP
    171171#endif
     
    194194    uint8_t *tc_ptr;    /* pointer to the translated code */
    195195    /* next matching tb for physical address. */
    196     struct TranslationBlock *phys_hash_next; 
     196    struct TranslationBlock *phys_hash_next;
    197197    /* first and second physical page containing code. The lower bit
    198198       of the pointer tells the index in page_next[] */
    199     struct TranslationBlock *page_next[2]; 
    200     target_ulong page_addr[2]; 
     199    struct TranslationBlock *page_next[2];
     200    target_ulong page_addr[2];
    201201
    202202    /* the following data are used to directly call another TB from
     
    215215       the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
    216216       jmp_first */
    217     struct TranslationBlock *jmp_next[2]; 
     217    struct TranslationBlock *jmp_next[2];
    218218    struct TranslationBlock *jmp_first;
    219219} TranslationBlock;
     
    241241TranslationBlock *tb_alloc(target_ulong pc);
    242242void tb_flush(CPUState *env);
    243 void tb_link_phys(TranslationBlock *tb, 
     243void tb_link_phys(TranslationBlock *tb,
    244244                  target_ulong phys_pc, target_ulong phys_page2);
    245245
     
    277277#endif
    278278
    279 static inline void tb_set_jmp_target(TranslationBlock *tb, 
     279static inline void tb_set_jmp_target(TranslationBlock *tb,
    280280                                     int n, unsigned long addr)
    281281{
     
    292292
    293293/* set the jump target */
    294 static inline void tb_set_jmp_target(TranslationBlock *tb, 
     294static inline void tb_set_jmp_target(TranslationBlock *tb,
    295295                                     int n, unsigned long addr)
    296296{
     
    300300#endif
    301301
    302 static inline void tb_add_jump(TranslationBlock *tb, int n, 
     302static inline void tb_add_jump(TranslationBlock *tb, int n,
    303303                               TranslationBlock *tb_next)
    304304{
     
    307307        /* patch the native jump address */
    308308        tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
    309        
     309
    310310        /* add in TB jmp circular list */
    311311        tb->jmp_next[n] = tb_next->jmp_first;
     
    388388    dummy_label ## n: ;\
    389389    } while (0)
    390 #  endif 
     390#  endif
    391391# else /* !VBOX */
    392392#define GOTO_TB(opname, tbparam, n)\
     
    429429{
    430430    long int readval = 0;
    431    
     431
    432432    __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
    433433                          : "+m" (*p), "+a" (readval)
     
    442442{
    443443    long int readval = 0;
    444    
     444
    445445    __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
    446446                          : "+m" (*p), "+a" (readval)
     
    459459                          "   jl    0b"
    460460                          : "=&d" (ret)
    461                           : "r" (1), "a" (p), "0" (*p) 
     461                          : "r" (1), "a" (p), "0" (*p)
    462462                          : "cc", "memory" );
    463463    return ret;
     
    505505                         : "=r"(ret)
    506506                         : "0"(1), "r"(spinlock));
    507    
     507
    508508    return ret;
    509509}
     
    571571#if !defined(CONFIG_USER_ONLY)
    572572
    573 void tlb_fill(target_ulong addr, int is_write, int is_user, 
     573void tlb_fill(target_ulong addr, int is_write, int is_user,
    574574              void *retaddr);
    575575
     
    604604# ifdef VBOX
    605605target_ulong remR3PhysGetPhysicalAddressCode(CPUState *env, target_ulong addr, CPUTLBEntry *pTLBEntry);
    606 #  if defined(PGM_DYNAMIC_RAM_ALLOC) && !defined(REM_PHYS_ADDR_IN_TLB)
     606#  if !defined(REM_PHYS_ADDR_IN_TLB)
    607607target_ulong remR3HCVirt2GCPhys(void *env, void *addr);
    608608#  endif
     
    631631#error unimplemented CPU
    632632#endif
    633     if (__builtin_expect(env->tlb_table[is_user][index].addr_code != 
     633    if (__builtin_expect(env->tlb_table[is_user][index].addr_code !=
    634634                         (addr & TARGET_PAGE_MASK), 0)) {
    635635        ldub_code(addr);
     
    646646# if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
    647647    return addr + env->tlb_table[is_user][index].addend;
    648 # elif defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC)
     648# elif defined(VBOX)
    649649    return remR3HCVirt2GCPhys(env, (void *)(addr + env->tlb_table[is_user][index].addend));
    650650# else
     
    670670{
    671671    return(env->kqemu_enabled &&
    672            (env->cr[0] & CR0_PE_MASK) && 
     672           (env->cr[0] & CR0_PE_MASK) &&
    673673           !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
    674674           (env->eflags & IF_MASK) &&
    675675           !(env->eflags & VM_MASK) &&
    676            (env->kqemu_enabled == 2 || 
     676           (env->kqemu_enabled == 2 ||
    677677            ((env->hflags & HF_CPL_MASK) == 3 &&
    678678             (env->eflags & IOPL_MASK) != IOPL_MASK)));
  • trunk/src/recompiler/exec.c

    r4376 r6532  
    11/*
    22 *  virtual page mapping and translated block handling
    3  * 
     3 *
    44 *  Copyright (c) 2003 Fabrice Bellard
    55 *
     
    5555
    5656/* make various TB consistency checks */
    57 //#define DEBUG_TB_CHECK 
    58 //#define DEBUG_TLB_CHECK 
     57//#define DEBUG_TB_CHECK
     58//#define DEBUG_TLB_CHECK
    5959
    6060#if !defined(CONFIG_USER_ONLY)
     
    8686spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
    8787
    88 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] 
     88uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]
    8989#if defined(__MINGW32__)
    9090    __attribute__((aligned (16)));
    9191#else
    9292    __attribute__((aligned (32)));
    93 #endif 
     93#endif
    9494uint8_t *code_gen_ptr;
    9595
     
    100100#else /* VBOX */
    101101RTGCPHYS phys_ram_size;
    102 /* we have memory ranges (the high PC-BIOS mapping) which 
     102/* we have memory ranges (the high PC-BIOS mapping) which
    103103   causes some pages to fall outside the dirty map here. */
    104104uint32_t phys_ram_dirty_size;
    105105#endif /* VBOX */
    106 #if !defined(VBOX) || !(defined(PGM_DYNAMIC_RAM_ALLOC) || defined(REM_PHYS_ADDR_IN_TLB))
     106#if !defined(VBOX)
    107107uint8_t *phys_ram_base;
    108108#endif
     
    112112/* current CPU in the current thread. It is only valid inside
    113113   cpu_exec() */
    114 CPUState *cpu_single_env; 
     114CPUState *cpu_single_env;
    115115
    116116typedef struct PageDesc {
     
    181181        SYSTEM_INFO system_info;
    182182        DWORD old_protect;
    183        
     183
    184184        GetSystemInfo(&system_info);
    185185        qemu_real_host_page_size = system_info.dwPageSize;
    186        
     186
    187187        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
    188188                       PAGE_EXECUTE_READWRITE, &old_protect);
     
    195195        start = (unsigned long)code_gen_buffer;
    196196        start &= ~(qemu_real_host_page_size - 1);
    197        
     197
    198198        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
    199199        end += qemu_real_host_page_size - 1;
    200200        end &= ~(qemu_real_host_page_size - 1);
    201        
    202         mprotect((void *)start, end - start, 
     201
     202        mprotect((void *)start, end - start,
    203203                 PROT_READ | PROT_WRITE | PROT_EXEC);
    204204    }
     
    277277          pd[i].phys_offset = IO_MEM_UNASSIGNED;
    278278    }
    279 #if defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC)
     279#if defined(VBOX)
    280280    pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
    281281    if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
     
    294294#if !defined(CONFIG_USER_ONLY)
    295295static void tlb_protect_code(ram_addr_t ram_addr);
    296 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
     296static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
    297297                                    target_ulong vaddr);
    298298#endif
     
    352352    CPUState *env;
    353353#if defined(DEBUG_FLUSH)
    354     printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
    355            code_gen_ptr - code_gen_buffer, 
    356            nb_tbs, 
     354    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
     355           code_gen_ptr - code_gen_buffer,
     356           nb_tbs,
    357357           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
    358358#endif
    359359    nb_tbs = 0;
    360    
     360
    361361    for(env = first_cpu; env != NULL; env = env->next_cpu) {
    362362        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
     
    395395    TranslationBlock *tb;
    396396    int i, flags1, flags2;
    397    
     397
    398398    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
    399399        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
     
    504504    target_ulong phys_pc;
    505505    TranslationBlock *tb1, *tb2;
    506    
     506
    507507    /* remove the TB from the hash list */
    508508    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
    509509    h = tb_phys_hash_func(phys_pc);
    510     tb_remove(&tb_phys_hash[h], tb, 
     510    tb_remove(&tb_phys_hash[h], tb,
    511511              offsetof(TranslationBlock, phys_hash_next));
    512512
     
    570570    pc = cs_base + eip;
    571571
    572     tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base, 
     572    tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
    573573                 flags);
    574574
    575     if(tb) 
     575    if(tb)
    576576    {
    577577#  ifdef DEBUG
     
    631631    int n, tb_start, tb_end;
    632632    TranslationBlock *tb;
    633    
     633
    634634    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
    635635    if (!p->code_bitmap)
     
    660660#ifdef TARGET_HAS_PRECISE_SMC
    661661
    662 static void tb_gen_code(CPUState *env, 
     662static void tb_gen_code(CPUState *env,
    663663                        target_ulong pc, target_ulong cs_base, int flags,
    664664                        int cflags)
     
    684684    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
    685685    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
    686    
     686
    687687    /* check next page if needed */
    688688    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
     
    694694}
    695695#endif
    696    
     696
    697697/* invalidate all TBs which intersect with the target physical page
    698698   starting in range [start;end[. NOTE: start and end must refer to
     
    700700   from a real cpu write access: the virtual CPU will exit the current
    701701   TB if code is modified inside this TB. */
    702 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
     702void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
    703703                                   int is_cpu_write_access)
    704704{
     
    711711
    712712    p = page_find(start >> TARGET_PAGE_BITS);
    713     if (!p) 
     713    if (!p)
    714714        return;
    715     if (!p->code_bitmap && 
     715    if (!p->code_bitmap &&
    716716        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
    717717        is_cpu_write_access) {
     
    760760                would require a specialized function to partially
    761761                restore the CPU state */
    762                
     762
    763763                current_tb_modified = 1;
    764                 cpu_restore_state(current_tb, env, 
     764                cpu_restore_state(current_tb, env,
    765765                                  env->mem_write_pc, NULL);
    766766#if defined(TARGET_I386)
     
    805805           itself */
    806806        env->current_tb = NULL;
    807         tb_gen_code(env, current_pc, current_cs_base, current_flags, 
     807        tb_gen_code(env, current_pc, current_cs_base, current_flags,
    808808                    CF_SINGLE_INSN);
    809809        cpu_resume_from_signal(env, NULL);
     
    820820    if (1) {
    821821        if (loglevel) {
    822             fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
    823                    cpu_single_env->mem_write_vaddr, len, 
    824                    cpu_single_env->eip, 
     822            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
     823                   cpu_single_env->mem_write_vaddr, len,
     824                   cpu_single_env->eip,
    825825                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
    826826        }
     
    828828#endif
    829829    p = page_find(start >> TARGET_PAGE_BITS);
    830     if (!p) 
     830    if (!p)
    831831        return;
    832832    if (p->code_bitmap) {
     
    842842
    843843#if !defined(CONFIG_SOFTMMU)
    844 static void tb_invalidate_phys_page(target_ulong addr, 
     844static void tb_invalidate_phys_page(target_ulong addr,
    845845                                    unsigned long pc, void *puc)
    846846{
     
    855855    addr &= TARGET_PAGE_MASK;
    856856    p = page_find(addr >> TARGET_PAGE_BITS);
    857     if (!p) 
     857    if (!p)
    858858        return;
    859859    tb = p->first_tb;
     
    879879                   would require a specialized function to partially
    880880                   restore the CPU state */
    881            
     881
    882882            current_tb_modified = 1;
    883883            cpu_restore_state(current_tb, env, pc, puc);
     
    902902           itself */
    903903        env->current_tb = NULL;
    904         tb_gen_code(env, current_pc, current_cs_base, current_flags, 
     904        tb_gen_code(env, current_pc, current_cs_base, current_flags,
    905905                    CF_SINGLE_INSN);
    906906        cpu_resume_from_signal(env, puc);
     
    911911
    912912/* add the tb in the target page and protect it if necessary */
    913 static inline void tb_alloc_page(TranslationBlock *tb, 
     913static inline void tb_alloc_page(TranslationBlock *tb,
    914914                                 unsigned int n, target_ulong page_addr)
    915915{
     
    946946            page_get_flags(addr);
    947947          }
    948         mprotect(g2h(page_addr), qemu_host_page_size, 
     948        mprotect(g2h(page_addr), qemu_host_page_size,
    949949                 (prot & PAGE_BITS) & ~PAGE_WRITE);
    950950#ifdef DEBUG_TB_INVALIDATE
    951         printf("protecting code page: 0x%08lx\n", 
     951        printf("protecting code page: 0x%08lx\n",
    952952               page_addr);
    953953#endif
     
    971971    TranslationBlock *tb;
    972972
    973     if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
     973    if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
    974974        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
    975975        return NULL;
     
    982982/* add a new TB and link it to the physical page tables. phys_page2 is
    983983   (-1) to indicate that only one page contains the TB. */
    984 void tb_link_phys(TranslationBlock *tb, 
     984void tb_link_phys(TranslationBlock *tb,
    985985                  target_ulong phys_pc, target_ulong phys_page2)
    986986{
     
    10481048            m_min = m + 1;
    10491049        }
    1050     } 
     1050    }
    10511051    return &tbs[m_max];
    10521052}
     
    10841084        *ptb = tb->jmp_next[n];
    10851085        tb->jmp_next[n] = NULL;
    1086        
     1086
    10871087        /* suppress the jump to next tb in generated code */
    10881088        tb_reset_jump(tb, n);
     
    11241124#if defined(TARGET_HAS_ICE)
    11251125    int i;
    1126    
     1126
    11271127    for(i = 0; i < env->nb_breakpoints; i++) {
    11281128        if (env->breakpoints[i] == pc)
     
    11331133        return -1;
    11341134    env->breakpoints[env->nb_breakpoints++] = pc;
    1135    
     1135
    11361136    breakpoint_invalidate(env, pc);
    11371137    return 0;
     
    12431243#ifndef VBOX
    12441244CPULogItem cpu_log_items[] = {
    1245     { CPU_LOG_TB_OUT_ASM, "out_asm", 
     1245    { CPU_LOG_TB_OUT_ASM, "out_asm",
    12461246      "show generated host assembly code for each compiled TB" },
    12471247    { CPU_LOG_TB_IN_ASM, "in_asm",
    12481248      "show target assembly code for each compiled TB" },
    1249     { CPU_LOG_TB_OP, "op", 
     1249    { CPU_LOG_TB_OP, "op",
    12501250      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
    12511251#ifdef TARGET_I386
     
    12761276    return memcmp(s1, s2, n) == 0;
    12771277}
    1278      
     1278
    12791279/* takes a comma separated list of log masks. Return 0 if error. */
    12801280int cpu_str_to_log_mask(const char *str)
     
    13731373static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
    13741374{
    1375     if (addr == (tlb_entry->addr_read & 
     1375    if (addr == (tlb_entry->addr_read &
    13761376                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
    1377         addr == (tlb_entry->addr_write & 
     1377        addr == (tlb_entry->addr_write &
    13781378                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
    1379         addr == (tlb_entry->addr_code & 
     1379        addr == (tlb_entry->addr_code &
    13801380                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    13811381        tlb_entry->addr_read = -1;
     
    14291429static void tlb_protect_code(ram_addr_t ram_addr)
    14301430{
    1431     cpu_physical_memory_reset_dirty(ram_addr, 
     1431    cpu_physical_memory_reset_dirty(ram_addr,
    14321432                                    ram_addr + TARGET_PAGE_SIZE,
    14331433                                    CODE_DIRTY_FLAG);
     
    14401440/* update the TLB so that writes in physical page 'phys_addr' are no longer
    14411441   tested for self modifying code */
    1442 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
     1442static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
    14431443                                    target_ulong vaddr)
    14441444{
    14451445#ifdef VBOX
    14461446    if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
    1447 #endif 
     1447#endif
    14481448    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
    14491449}
    14501450
    1451 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
     1451static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
    14521452                                         unsigned long start, unsigned long length)
    14531453{
     
    14921492#ifdef VBOX
    14931493    if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
    1494 #endif 
     1494#endif
    14951495    for(i = 0; i < len; i++)
    14961496        p[i] &= mask;
     
    15001500#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
    15011501    start1 = start;
    1502 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
     1502#elif !defined(VBOX)
    15031503    start1 = start + (unsigned long)phys_ram_base;
    15041504#else
     
    15311531                        (p->prot & PROT_WRITE)) {
    15321532                        if (addr < MMAP_AREA_END) {
    1533                             mprotect((void *)addr, TARGET_PAGE_SIZE, 
     1533                            mprotect((void *)addr, TARGET_PAGE_SIZE,
    15341534                                     p->prot & ~PROT_WRITE);
    15351535                        }
     
    15521552#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
    15531553        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
    1554 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
    1555         ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 
     1554#elif !defined(VBOX)
     1555        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
    15561556            tlb_entry->addend - (unsigned long)phys_ram_base;
    15571557#else
     
    15741574}
    15751575
    1576 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
     1576static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
    15771577                                  unsigned long start)
    15781578{
     
    16031603   (can only happen in non SOFTMMU mode for I/O pages or pages
    16041604   conflicting with the host address space). */
    1605 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
    1606                       target_phys_addr_t paddr, int prot, 
     1605int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
     1606                      target_phys_addr_t paddr, int prot,
    16071607                      int is_user, int is_softmmu)
    16081608{
     
    16281628    ret = 0;
    16291629#if !defined(CONFIG_SOFTMMU)
    1630     if (is_softmmu) 
     1630    if (is_softmmu)
    16311631#endif
    16321632    {
     
    16401640#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
    16411641            addend = pd & TARGET_PAGE_MASK;
    1642 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
     1642#elif !defined(VBOX)
    16431643            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
    16441644#else
    16451645            addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK);
    1646 #endif 
    1647         }
    1648        
     1646#endif
     1647        }
     1648
    16491649        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    16501650        addend -= vaddr;
     
    16621662        }
    16631663        if (prot & PAGE_WRITE) {
    1664             if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
     1664            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
    16651665                (pd & IO_MEM_ROMD)) {
    16661666                /* write access calls the I/O callback */
    1667                 te->addr_write = vaddr | 
     1667                te->addr_write = vaddr |
    16681668                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
    1669             } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
     1669            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
    16701670                       !cpu_physical_memory_is_dirty(pd)) {
    16711671                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
     
    16951695            } else {
    16961696                if (prot & PROT_WRITE) {
    1697                     if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
     1697                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
    16981698#if defined(TARGET_HAS_SMC) || 1
    16991699                        first_tb ||
    17001700#endif
    1701                         ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
     1701                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
    17021702                         !cpu_physical_memory_is_dirty(pd))) {
    17031703                        /* ROM: we do as if code was inside */
     
    17051705                           original mapping */
    17061706                        VirtPageDesc *vp;
    1707                        
     1707
    17081708                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
    17091709                        vp->phys_addr = pd;
     
    17131713                    }
    17141714                }
    1715                 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
     1715                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
    17161716                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
    17171717                if (map_addr == MAP_FAILED) {
     
    17511751        return 0;
    17521752#if defined(DEBUG_TLB)
    1753     printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
     1753    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
    17541754           addr, vp->phys_addr, vp->prot);
    17551755#endif
     
    17841784}
    17851785
    1786 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
    1787                       target_phys_addr_t paddr, int prot, 
     1786int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
     1787                      target_phys_addr_t paddr, int prot,
    17881788                      int is_user, int is_softmmu)
    17891789{
     
    18181818                if (start != -1) {
    18191819                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
    1820                             start, end, end - start, 
     1820                            start, end, end - start,
    18211821                            prot & PAGE_READ ? 'r' : '-',
    18221822                            prot & PAGE_WRITE ? 'w' : '-',
     
    18661866        /* if the write protection is set, then we invalidate the code
    18671867           inside */
    1868         if (!(p->flags & PAGE_WRITE) && 
     1868        if (!(p->flags & PAGE_WRITE) &&
    18691869            (flags & PAGE_WRITE) &&
    18701870            p->first_tb) {
     
    19011901        pindex = (address - host_start) >> TARGET_PAGE_BITS;
    19021902        if (!(p1[pindex].flags & PAGE_WRITE)) {
    1903             mprotect((void *)g2h(host_start), qemu_host_page_size, 
     1903            mprotect((void *)g2h(host_start), qemu_host_page_size,
    19041904                     (prot & PAGE_BITS) | PAGE_WRITE);
    19051905            p1[pindex].flags |= PAGE_WRITE;
     
    19401940   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
    19411941   io memory page */
    1942 void cpu_register_physical_memory(target_phys_addr_t start_addr, 
     1942void cpu_register_physical_memory(target_phys_addr_t start_addr,
    19431943                                  unsigned long size,
    19441944                                  unsigned long phys_offset)
     
    19531953        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
    19541954        p->phys_offset = phys_offset;
    1955 #if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
     1955#if !defined(VBOX)
    19561956        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
    19571957            (phys_offset & IO_MEM_ROMD))
     
    19641964            phys_offset += TARGET_PAGE_SIZE;
    19651965    }
    1966    
     1966
    19671967    /* since each CPU stores ram addresses in its TLB cache, we must
    19681968       reset the modified entries */
     
    20172017#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
    20182018    ram_addr = addr;
    2019 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
     2019#elif !defined(VBOX)
    20202020    ram_addr = addr - (unsigned long)phys_ram_base;
    20212021#else
     
    20622062#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
    20632063    ram_addr = addr;
    2064 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
     2064#elif !defined(VBOX)
    20652065    ram_addr = addr - (unsigned long)phys_ram_base;
    20662066#else
     
    21072107#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
    21082108    ram_addr = addr;
    2109 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
     2109#elif !defined(VBOX)
    21102110    ram_addr = addr - (unsigned long)phys_ram_base;
    21112111#else
     
    21632163    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
    21642164    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
    2165 #if defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC)
     2165#if defined(VBOX)
    21662166    cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
    21672167    io_mem_nb = 6;
     
    22192219/* physical memory access (slow version, mainly for debug) */
    22202220#if defined(CONFIG_USER_ONLY)
    2221 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
     2221void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
    22222222                            int len, int is_write)
    22232223{
     
    22542254
    22552255#else
    2256 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
     2256void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
    22572257                            int len, int is_write)
    22582258{
     
    22632263    unsigned long pd;
    22642264    PhysPageDesc *p;
    2265    
     2265
    22662266    while (len > 0) {
    22672267        page = addr & TARGET_PAGE_MASK;
     
    22752275            pd = p->phys_offset;
    22762276        }
    2277        
     2277
    22782278        if (is_write) {
    22792279            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
     
    22872287#else
    22882288                    val = *(const uint32_t *)buf;
    2289 #endif 
     2289#endif
    22902290                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
    22912291                    l = 4;
     
    22962296#else
    22972297                    val = *(const uint16_t *)buf;
    2298 #endif 
     2298#endif
    22992299                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
    23002300                    l = 2;
     
    23052305#else
    23062306                    val = *(const uint8_t *)buf;
    2307 #endif 
     2307#endif
    23082308                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
    23092309                    l = 1;
     
    23312331            }
    23322332        } else {
    2333             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
     2333            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
    23342334                !(pd & IO_MEM_ROMD)) {
    23352335                /* I/O case */
     
    23422342#else
    23432343                    *(uint32_t *)buf = val;
    2344 #endif 
     2344#endif
    23452345                    l = 4;
    23462346                } else if (l >= 2 && ((addr & 1) == 0)) {
     
    23512351#else
    23522352                    *(uint16_t *)buf = val;
    2353 #endif 
     2353#endif
    23542354                    l = 2;
    23552355                } else {
     
    23602360#else
    23612361                    *(uint8_t *)buf = val;
    2362 #endif 
     2362#endif
    23632363                    l = 1;
    23642364                }
     
    23682368                remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
    23692369#else
    2370                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
     2370                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
    23712371                    (addr & ~TARGET_PAGE_MASK);
    23722372                memcpy(buf, ptr, l);
     
    23822382#ifndef VBOX
    23832383/* used for ROM loading : can write in RAM and ROM */
    2384 void cpu_physical_memory_write_rom(target_phys_addr_t addr, 
     2384void cpu_physical_memory_write_rom(target_phys_addr_t addr,
    23852385                                   const uint8_t *buf, int len)
    23862386{
     
    23902390    unsigned long pd;
    23912391    PhysPageDesc *p;
    2392    
     2392
    23932393    while (len > 0) {
    23942394        page = addr & TARGET_PAGE_MASK;
     
    24022402            pd = p->phys_offset;
    24032403        }
    2404        
     2404
    24052405        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
    24062406            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
     
    24372437        pd = p->phys_offset;
    24382438    }
    2439        
    2440     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
     2439
     2440    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
    24412441        !(pd & IO_MEM_ROMD)) {
    24422442        /* I/O case */
     
    24462446        /* RAM case */
    24472447#ifndef VBOX
    2448         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
     2448        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
    24492449            (addr & ~TARGET_PAGE_MASK);
    24502450        val = ldl_p(ptr);
     
    24712471        pd = p->phys_offset;
    24722472    }
    2473        
     2473
    24742474    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
    24752475        !(pd & IO_MEM_ROMD)) {
     
    24862486        /* RAM case */
    24872487#ifndef VBOX
    2488         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
     2488        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
    24892489            (addr & ~TARGET_PAGE_MASK);
    24902490        val = ldq_p(ptr);
     
    25282528        pd = p->phys_offset;
    25292529    }
    2530        
     2530
    25312531    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
    25322532        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
     
    25342534    } else {
    25352535#ifndef VBOX
    2536         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
     2536        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
    25372537            (addr & ~TARGET_PAGE_MASK);
    25382538        stl_p(ptr, val);
     
    25572557        pd = p->phys_offset;
    25582558    }
    2559        
     2559
    25602560    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
    25612561        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
     
    26092609#ifndef VBOX
    26102610/* virtual memory access for debug */
    2611 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
     2611int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
    26122612                        uint8_t *buf, int len, int is_write)
    26132613{
     
    26242624        if (l > len)
    26252625            l = len;
    2626         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
     2626        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
    26272627                               buf, l, is_write);
    26282628        len -= l;
     
    26392639    int direct_jmp_count, direct_jmp2_count, cross_page;
    26402640    TranslationBlock *tb;
    2641    
     2641
    26422642    target_code_size = 0;
    26432643    max_target_code_size = 0;
     
    26612661    /* XXX: avoid using doubles ? */
    26622662    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
    2663     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
     2663    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
    26642664                nb_tbs ? target_code_size / nb_tbs : 0,
    26652665                max_target_code_size);
    2666     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
     2666    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
    26672667                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
    26682668                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
    2669     cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
    2670             cross_page, 
     2669    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
     2670            cross_page,
    26712671            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
    26722672    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
    2673                 direct_jmp_count, 
     2673                direct_jmp_count,
    26742674                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
    26752675                direct_jmp2_count,
     
    26812681#endif /* !VBOX */
    26822682
    2683 #if !defined(CONFIG_USER_ONLY) 
     2683#if !defined(CONFIG_USER_ONLY)
    26842684
    26852685#define MMUSUFFIX _cmmu
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette