VirtualBox

source: vbox/trunk/src/recompiler/softmmu_template.h@ 37675

Last change on this file since 37675 was 37675, checked in by vboxsync, 13 years ago

rem: Synced with v0.12.5.

  • Property svn:eol-style set to native
File size: 13.0 KB
Line 
1/*
2 * Software MMU support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#define DATA_SIZE (1 << SHIFT)
30
31#if DATA_SIZE == 8
32#define SUFFIX q
33#define USUFFIX q
34#define DATA_TYPE uint64_t
35#ifdef VBOX
36# define DATA_TYPE_PROMOTED uint64_t
37#endif
38#elif DATA_SIZE == 4
39#define SUFFIX l
40#define USUFFIX l
41#define DATA_TYPE uint32_t
42#ifdef VBOX
43# define DATA_TYPE_PROMOTED RTCCUINTREG
44#endif
45#elif DATA_SIZE == 2
46#define SUFFIX w
47#define USUFFIX uw
48#define DATA_TYPE uint16_t
49#ifdef VBOX
50# define DATA_TYPE_PROMOTED RTCCUINTREG
51#endif
52#elif DATA_SIZE == 1
53#define SUFFIX b
54#define USUFFIX ub
55#define DATA_TYPE uint8_t
56#ifdef VBOX
57# define DATA_TYPE_PROMOTED RTCCUINTREG
58#endif
59#else
60#error unsupported data size
61#endif
62
63#ifdef SOFTMMU_CODE_ACCESS
64#define READ_ACCESS_TYPE 2
65#define ADDR_READ addr_code
66#else
67#define READ_ACCESS_TYPE 0
68#define ADDR_READ addr_read
69#endif
70
71static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
72 int mmu_idx,
73 void *retaddr);
74static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
75 target_ulong addr,
76 void *retaddr)
77{
78 DATA_TYPE res;
79 int index;
80 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
81 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
82 env->mem_io_pc = (unsigned long)retaddr;
83 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
84 && !can_do_io(env)) {
85 cpu_io_recompile(env, retaddr);
86 }
87
88 env->mem_io_vaddr = addr;
89#if SHIFT <= 2
90 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
91#else
92#ifdef TARGET_WORDS_BIGENDIAN
93 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
94 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
95#else
96 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
97 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
98#endif
99#endif /* SHIFT > 2 */
100 return res;
101}
102
103/* handle all cases except unaligned access which span two pages */
104#ifndef VBOX
105DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
106 int mmu_idx)
107#else
108/* Load helpers invoked from generated code, and TCG makes an assumption
109 that valid value takes the whole register, why gcc after 4.3 may
110 use only lower part of register for smaller types. So force promotion. */
111DATA_TYPE_PROMOTED REGPARM
112glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
113 int mmu_idx)
114#endif
115{
116 DATA_TYPE res;
117 int index;
118 target_ulong tlb_addr;
119 target_phys_addr_t addend;
120 void *retaddr;
121
122 /* test if there is match for unaligned or IO access */
123 /* XXX: could done more in memory macro in a non portable way */
124 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
125 redo:
126 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
127 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
128 if (tlb_addr & ~TARGET_PAGE_MASK) {
129 /* IO access */
130 if ((addr & (DATA_SIZE - 1)) != 0)
131 goto do_unaligned_access;
132 retaddr = GETPC();
133 addend = env->iotlb[mmu_idx][index];
134 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
135 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
136 /* slow unaligned access (it spans two pages or IO) */
137 do_unaligned_access:
138 retaddr = GETPC();
139#ifdef ALIGNED_ONLY
140 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
141#endif
142 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
143 mmu_idx, retaddr);
144 } else {
145 /* unaligned/aligned access in the same page */
146#ifdef ALIGNED_ONLY
147 if ((addr & (DATA_SIZE - 1)) != 0) {
148 retaddr = GETPC();
149 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
150 }
151#endif
152 addend = env->tlb_table[mmu_idx][index].addend;
153 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
154 }
155 } else {
156 /* the page is not in the TLB : fill it */
157 retaddr = GETPC();
158#ifdef ALIGNED_ONLY
159 if ((addr & (DATA_SIZE - 1)) != 0)
160 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
161#endif
162 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
163 goto redo;
164 }
165 return res;
166}
167
168/* handle all unaligned cases */
169static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
170 int mmu_idx,
171 void *retaddr)
172{
173 DATA_TYPE res, res1, res2;
174 int index, shift;
175 target_phys_addr_t addend;
176 target_ulong tlb_addr, addr1, addr2;
177
178 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
179 redo:
180 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
181 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
182 if (tlb_addr & ~TARGET_PAGE_MASK) {
183 /* IO access */
184 if ((addr & (DATA_SIZE - 1)) != 0)
185 goto do_unaligned_access;
186 retaddr = GETPC();
187 addend = env->iotlb[mmu_idx][index];
188 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
189 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
190 do_unaligned_access:
191 /* slow unaligned access (it spans two pages) */
192 addr1 = addr & ~(DATA_SIZE - 1);
193 addr2 = addr1 + DATA_SIZE;
194 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
195 mmu_idx, retaddr);
196 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
197 mmu_idx, retaddr);
198 shift = (addr & (DATA_SIZE - 1)) * 8;
199#ifdef TARGET_WORDS_BIGENDIAN
200 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
201#else
202 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
203#endif
204 res = (DATA_TYPE)res;
205 } else {
206 /* unaligned/aligned access in the same page */
207 addend = env->tlb_table[mmu_idx][index].addend;
208 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
209 }
210 } else {
211 /* the page is not in the TLB : fill it */
212 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
213 goto redo;
214 }
215 return res;
216}
217
218#ifndef SOFTMMU_CODE_ACCESS
219
220static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
221 DATA_TYPE val,
222 int mmu_idx,
223 void *retaddr);
224
225static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
226 DATA_TYPE val,
227 target_ulong addr,
228 void *retaddr)
229{
230 int index;
231 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
232 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
233 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
234 && !can_do_io(env)) {
235 cpu_io_recompile(env, retaddr);
236 }
237
238 env->mem_io_vaddr = addr;
239 env->mem_io_pc = (unsigned long)retaddr;
240#if SHIFT <= 2
241 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
242#else
243#ifdef TARGET_WORDS_BIGENDIAN
244 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
245 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
246#else
247 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
248 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
249#endif
250#endif /* SHIFT > 2 */
251}
252
253void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
254 DATA_TYPE val,
255 int mmu_idx)
256{
257 target_phys_addr_t addend;
258 target_ulong tlb_addr;
259 void *retaddr;
260 int index;
261
262 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
263 redo:
264 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
265 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
266 if (tlb_addr & ~TARGET_PAGE_MASK) {
267 /* IO access */
268 if ((addr & (DATA_SIZE - 1)) != 0)
269 goto do_unaligned_access;
270 retaddr = GETPC();
271 addend = env->iotlb[mmu_idx][index];
272 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
273 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
274 do_unaligned_access:
275 retaddr = GETPC();
276#ifdef ALIGNED_ONLY
277 do_unaligned_access(addr, 1, mmu_idx, retaddr);
278#endif
279 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
280 mmu_idx, retaddr);
281 } else {
282 /* aligned/unaligned access in the same page */
283#ifdef ALIGNED_ONLY
284 if ((addr & (DATA_SIZE - 1)) != 0) {
285 retaddr = GETPC();
286 do_unaligned_access(addr, 1, mmu_idx, retaddr);
287 }
288#endif
289 addend = env->tlb_table[mmu_idx][index].addend;
290 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
291 }
292 } else {
293 /* the page is not in the TLB : fill it */
294 retaddr = GETPC();
295#ifdef ALIGNED_ONLY
296 if ((addr & (DATA_SIZE - 1)) != 0)
297 do_unaligned_access(addr, 1, mmu_idx, retaddr);
298#endif
299 tlb_fill(addr, 1, mmu_idx, retaddr);
300 goto redo;
301 }
302}
303
304/* handles all unaligned cases */
305static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
306 DATA_TYPE val,
307 int mmu_idx,
308 void *retaddr)
309{
310 target_phys_addr_t addend;
311 target_ulong tlb_addr;
312 int index, i;
313
314 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
315 redo:
316 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
317 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
318 if (tlb_addr & ~TARGET_PAGE_MASK) {
319 /* IO access */
320 if ((addr & (DATA_SIZE - 1)) != 0)
321 goto do_unaligned_access;
322 addend = env->iotlb[mmu_idx][index];
323 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
324 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
325 do_unaligned_access:
326 /* XXX: not efficient, but simple */
327 /* Note: relies on the fact that tlb_fill() does not remove the
328 * previous page from the TLB cache. */
329 for(i = DATA_SIZE - 1; i >= 0; i--) {
330#ifdef TARGET_WORDS_BIGENDIAN
331 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
332 mmu_idx, retaddr);
333#else
334 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
335 mmu_idx, retaddr);
336#endif
337 }
338 } else {
339 /* aligned/unaligned access in the same page */
340 addend = env->tlb_table[mmu_idx][index].addend;
341 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
342 }
343 } else {
344 /* the page is not in the TLB : fill it */
345 tlb_fill(addr, 1, mmu_idx, retaddr);
346 goto redo;
347 }
348}
349
350#endif /* !defined(SOFTMMU_CODE_ACCESS) */
351
352#ifdef VBOX
353# undef DATA_TYPE_PROMOTED
354#endif
355#undef READ_ACCESS_TYPE
356#undef SHIFT
357#undef DATA_TYPE
358#undef SUFFIX
359#undef USUFFIX
360#undef DATA_SIZE
361#undef ADDR_READ
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette