VirtualBox

source: vbox/trunk/src/recompiler/cpu-all.h@ 36172

Last change on this file since 36172 was 36170, checked in by vboxsync, 14 years ago

rem: synced up to svn://svn.savannah.nongnu.org/qemu/trunk@6686 (repo UUID c046a42c-6fe2-441c-8c8c-71466251a162).

  • Property svn:eol-style set to native
File size: 35.5 KB
Line 
1/*
2 * defines common to all virtual CPUs
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#ifndef CPU_ALL_H
31#define CPU_ALL_H
32
33#ifdef VBOX
34# ifndef LOG_GROUP
35# define LOG_GROUP LOG_GROUP_REM
36# endif
37# include <VBox/log.h>
38# include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
39#endif /* VBOX */
40#include "qemu-common.h"
41
42#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__)
43#define WORDS_ALIGNED
44#endif
45
46/* some important defines:
47 *
48 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
49 * memory accesses.
50 *
51 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
52 * otherwise little endian.
53 *
54 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
55 *
56 * TARGET_WORDS_BIGENDIAN : same for target cpu
57 */
58
59#include "bswap.h"
60#include "softfloat.h"
61
62#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
63#define BSWAP_NEEDED
64#endif
65
66#ifdef BSWAP_NEEDED
67
68static inline uint16_t tswap16(uint16_t s)
69{
70 return bswap16(s);
71}
72
73static inline uint32_t tswap32(uint32_t s)
74{
75 return bswap32(s);
76}
77
78static inline uint64_t tswap64(uint64_t s)
79{
80 return bswap64(s);
81}
82
83static inline void tswap16s(uint16_t *s)
84{
85 *s = bswap16(*s);
86}
87
88static inline void tswap32s(uint32_t *s)
89{
90 *s = bswap32(*s);
91}
92
93static inline void tswap64s(uint64_t *s)
94{
95 *s = bswap64(*s);
96}
97
98#else
99
100static inline uint16_t tswap16(uint16_t s)
101{
102 return s;
103}
104
105static inline uint32_t tswap32(uint32_t s)
106{
107 return s;
108}
109
110static inline uint64_t tswap64(uint64_t s)
111{
112 return s;
113}
114
115static inline void tswap16s(uint16_t *s)
116{
117}
118
119static inline void tswap32s(uint32_t *s)
120{
121}
122
123static inline void tswap64s(uint64_t *s)
124{
125}
126
127#endif
128
129#if TARGET_LONG_SIZE == 4
130#define tswapl(s) tswap32(s)
131#define tswapls(s) tswap32s((uint32_t *)(s))
132#define bswaptls(s) bswap32s(s)
133#else
134#define tswapl(s) tswap64(s)
135#define tswapls(s) tswap64s((uint64_t *)(s))
136#define bswaptls(s) bswap64s(s)
137#endif
138
139typedef union {
140 float32 f;
141 uint32_t l;
142} CPU_FloatU;
143
144/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
145 endian ! */
146typedef union {
147 float64 d;
148#if defined(WORDS_BIGENDIAN) \
149 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
150 struct {
151 uint32_t upper;
152 uint32_t lower;
153 } l;
154#else
155 struct {
156 uint32_t lower;
157 uint32_t upper;
158 } l;
159#endif
160 uint64_t ll;
161} CPU_DoubleU;
162
163#ifdef TARGET_SPARC
164typedef union {
165 float128 q;
166#if defined(WORDS_BIGENDIAN) \
167 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
168 struct {
169 uint32_t upmost;
170 uint32_t upper;
171 uint32_t lower;
172 uint32_t lowest;
173 } l;
174 struct {
175 uint64_t upper;
176 uint64_t lower;
177 } ll;
178#else
179 struct {
180 uint32_t lowest;
181 uint32_t lower;
182 uint32_t upper;
183 uint32_t upmost;
184 } l;
185 struct {
186 uint64_t lower;
187 uint64_t upper;
188 } ll;
189#endif
190} CPU_QuadU;
191#endif
192
193/* CPU memory access without any memory or io remapping */
194
195/*
196 * the generic syntax for the memory accesses is:
197 *
198 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
199 *
200 * store: st{type}{size}{endian}_{access_type}(ptr, val)
201 *
202 * type is:
203 * (empty): integer access
204 * f : float access
205 *
206 * sign is:
207 * (empty): for floats or 32 bit size
208 * u : unsigned
209 * s : signed
210 *
211 * size is:
212 * b: 8 bits
213 * w: 16 bits
214 * l: 32 bits
215 * q: 64 bits
216 *
217 * endian is:
218 * (empty): target cpu endianness or 8 bit access
219 * r : reversed target cpu endianness (not implemented yet)
220 * be : big endian (not implemented yet)
221 * le : little endian (not implemented yet)
222 *
223 * access_type is:
224 * raw : host memory access
225 * user : user mode access using soft MMU
226 * kernel : kernel mode access using soft MMU
227 */
228
229#ifdef VBOX
230void remAbort(int rc, const char *pszTip) __attribute__((__noreturn__));
231
232void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb);
233RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys);
234RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys);
235RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys);
236RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys);
237RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys);
238RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys);
239uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys);
240int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys);
241void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb);
242void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val);
243void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val);
244void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val);
245void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val);
246
247#ifndef REM_PHYS_ADDR_IN_TLB
248void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable);
249#endif
250
251#endif /* VBOX */
252
253#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
254
255DECLINLINE(uint8_t) ldub_p(const void *ptr)
256{
257 VBOX_CHECK_ADDR(ptr);
258 return remR3PhysReadU8((uintptr_t)ptr);
259}
260
261DECLINLINE(int8_t) ldsb_p(const void *ptr)
262{
263 VBOX_CHECK_ADDR(ptr);
264 return remR3PhysReadS8((uintptr_t)ptr);
265}
266
267DECLINLINE(void) stb_p(void *ptr, int v)
268{
269 VBOX_CHECK_ADDR(ptr);
270 remR3PhysWriteU8((uintptr_t)ptr, v);
271}
272
273DECLINLINE(uint32_t) lduw_le_p(const void *ptr)
274{
275 VBOX_CHECK_ADDR(ptr);
276 return remR3PhysReadU16((uintptr_t)ptr);
277}
278
279DECLINLINE(int32_t) ldsw_le_p(const void *ptr)
280{
281 VBOX_CHECK_ADDR(ptr);
282 return remR3PhysReadS16((uintptr_t)ptr);
283}
284
285DECLINLINE(void) stw_le_p(void *ptr, int v)
286{
287 VBOX_CHECK_ADDR(ptr);
288 remR3PhysWriteU16((uintptr_t)ptr, v);
289}
290
291DECLINLINE(uint32_t) ldl_le_p(const void *ptr)
292{
293 VBOX_CHECK_ADDR(ptr);
294 return remR3PhysReadU32((uintptr_t)ptr);
295}
296
297DECLINLINE(void) stl_le_p(void *ptr, int v)
298{
299 VBOX_CHECK_ADDR(ptr);
300 remR3PhysWriteU32((uintptr_t)ptr, v);
301}
302
303DECLINLINE(void) stq_le_p(void *ptr, uint64_t v)
304{
305 VBOX_CHECK_ADDR(ptr);
306 remR3PhysWriteU64((uintptr_t)ptr, v);
307}
308
309DECLINLINE(uint64_t) ldq_le_p(const void *ptr)
310{
311 VBOX_CHECK_ADDR(ptr);
312 return remR3PhysReadU64((uintptr_t)ptr);
313}
314
315#undef VBOX_CHECK_ADDR
316
317/* float access */
318
319DECLINLINE(float32) ldfl_le_p(const void *ptr)
320{
321 union {
322 float32 f;
323 uint32_t i;
324 } u;
325 u.i = ldl_le_p(ptr);
326 return u.f;
327}
328
329DECLINLINE(void) stfl_le_p(void *ptr, float32 v)
330{
331 union {
332 float32 f;
333 uint32_t i;
334 } u;
335 u.f = v;
336 stl_le_p(ptr, u.i);
337}
338
339DECLINLINE(float64) ldfq_le_p(const void *ptr)
340{
341 CPU_DoubleU u;
342 u.l.lower = ldl_le_p(ptr);
343 u.l.upper = ldl_le_p((uint8_t*)ptr + 4);
344 return u.d;
345}
346
347DECLINLINE(void) stfq_le_p(void *ptr, float64 v)
348{
349 CPU_DoubleU u;
350 u.d = v;
351 stl_le_p(ptr, u.l.lower);
352 stl_le_p((uint8_t*)ptr + 4, u.l.upper);
353}
354
355#else /* !VBOX */
356
357static inline int ldub_p(const void *ptr)
358{
359 return *(uint8_t *)ptr;
360}
361
362static inline int ldsb_p(const void *ptr)
363{
364 return *(int8_t *)ptr;
365}
366
367static inline void stb_p(void *ptr, int v)
368{
369 *(uint8_t *)ptr = v;
370}
371
372/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
373 kernel handles unaligned load/stores may give better results, but
374 it is a system wide setting : bad */
375#if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
376
377/* conservative code for little endian unaligned accesses */
378static inline int lduw_le_p(const void *ptr)
379{
380#ifdef _ARCH_PPC
381 int val;
382 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
383 return val;
384#else
385 const uint8_t *p = ptr;
386 return p[0] | (p[1] << 8);
387#endif
388}
389
390static inline int ldsw_le_p(const void *ptr)
391{
392#ifdef _ARCH_PPC
393 int val;
394 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
395 return (int16_t)val;
396#else
397 const uint8_t *p = ptr;
398 return (int16_t)(p[0] | (p[1] << 8));
399#endif
400}
401
402static inline int ldl_le_p(const void *ptr)
403{
404#ifdef _ARCH_PPC
405 int val;
406 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
407 return val;
408#else
409 const uint8_t *p = ptr;
410 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
411#endif
412}
413
414static inline uint64_t ldq_le_p(const void *ptr)
415{
416 const uint8_t *p = ptr;
417 uint32_t v1, v2;
418 v1 = ldl_le_p(p);
419 v2 = ldl_le_p(p + 4);
420 return v1 | ((uint64_t)v2 << 32);
421}
422
423static inline void stw_le_p(void *ptr, int v)
424{
425#ifdef _ARCH_PPC
426 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
427#else
428 uint8_t *p = ptr;
429 p[0] = v;
430 p[1] = v >> 8;
431#endif
432}
433
434static inline void stl_le_p(void *ptr, int v)
435{
436#ifdef _ARCH_PPC
437 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
438#else
439 uint8_t *p = ptr;
440 p[0] = v;
441 p[1] = v >> 8;
442 p[2] = v >> 16;
443 p[3] = v >> 24;
444#endif
445}
446
447static inline void stq_le_p(void *ptr, uint64_t v)
448{
449 uint8_t *p = ptr;
450 stl_le_p(p, (uint32_t)v);
451 stl_le_p(p + 4, v >> 32);
452}
453
454/* float access */
455
456static inline float32 ldfl_le_p(const void *ptr)
457{
458 union {
459 float32 f;
460 uint32_t i;
461 } u;
462 u.i = ldl_le_p(ptr);
463 return u.f;
464}
465
466static inline void stfl_le_p(void *ptr, float32 v)
467{
468 union {
469 float32 f;
470 uint32_t i;
471 } u;
472 u.f = v;
473 stl_le_p(ptr, u.i);
474}
475
476static inline float64 ldfq_le_p(const void *ptr)
477{
478 CPU_DoubleU u;
479 u.l.lower = ldl_le_p(ptr);
480 u.l.upper = ldl_le_p(ptr + 4);
481 return u.d;
482}
483
484static inline void stfq_le_p(void *ptr, float64 v)
485{
486 CPU_DoubleU u;
487 u.d = v;
488 stl_le_p(ptr, u.l.lower);
489 stl_le_p(ptr + 4, u.l.upper);
490}
491
492#else
493
494static inline int lduw_le_p(const void *ptr)
495{
496 return *(uint16_t *)ptr;
497}
498
499static inline int ldsw_le_p(const void *ptr)
500{
501 return *(int16_t *)ptr;
502}
503
504static inline int ldl_le_p(const void *ptr)
505{
506 return *(uint32_t *)ptr;
507}
508
509static inline uint64_t ldq_le_p(const void *ptr)
510{
511 return *(uint64_t *)ptr;
512}
513
514static inline void stw_le_p(void *ptr, int v)
515{
516 *(uint16_t *)ptr = v;
517}
518
519static inline void stl_le_p(void *ptr, int v)
520{
521 *(uint32_t *)ptr = v;
522}
523
524static inline void stq_le_p(void *ptr, uint64_t v)
525{
526 *(uint64_t *)ptr = v;
527}
528
529/* float access */
530
531static inline float32 ldfl_le_p(const void *ptr)
532{
533 return *(float32 *)ptr;
534}
535
536static inline float64 ldfq_le_p(const void *ptr)
537{
538 return *(float64 *)ptr;
539}
540
541static inline void stfl_le_p(void *ptr, float32 v)
542{
543 *(float32 *)ptr = v;
544}
545
546static inline void stfq_le_p(void *ptr, float64 v)
547{
548 *(float64 *)ptr = v;
549}
550#endif
551#endif /* !VBOX */
552
553#if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
554
555static inline int lduw_be_p(const void *ptr)
556{
557#if defined(__i386__)
558 int val;
559 asm volatile ("movzwl %1, %0\n"
560 "xchgb %b0, %h0\n"
561 : "=q" (val)
562 : "m" (*(uint16_t *)ptr));
563 return val;
564#else
565 const uint8_t *b = ptr;
566 return ((b[0] << 8) | b[1]);
567#endif
568}
569
570static inline int ldsw_be_p(const void *ptr)
571{
572#if defined(__i386__)
573 int val;
574 asm volatile ("movzwl %1, %0\n"
575 "xchgb %b0, %h0\n"
576 : "=q" (val)
577 : "m" (*(uint16_t *)ptr));
578 return (int16_t)val;
579#else
580 const uint8_t *b = ptr;
581 return (int16_t)((b[0] << 8) | b[1]);
582#endif
583}
584
585static inline int ldl_be_p(const void *ptr)
586{
587#if defined(__i386__) || defined(__x86_64__)
588 int val;
589 asm volatile ("movl %1, %0\n"
590 "bswap %0\n"
591 : "=r" (val)
592 : "m" (*(uint32_t *)ptr));
593 return val;
594#else
595 const uint8_t *b = ptr;
596 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
597#endif
598}
599
600static inline uint64_t ldq_be_p(const void *ptr)
601{
602 uint32_t a,b;
603 a = ldl_be_p(ptr);
604 b = ldl_be_p((uint8_t *)ptr + 4);
605 return (((uint64_t)a<<32)|b);
606}
607
608static inline void stw_be_p(void *ptr, int v)
609{
610#if defined(__i386__)
611 asm volatile ("xchgb %b0, %h0\n"
612 "movw %w0, %1\n"
613 : "=q" (v)
614 : "m" (*(uint16_t *)ptr), "0" (v));
615#else
616 uint8_t *d = (uint8_t *) ptr;
617 d[0] = v >> 8;
618 d[1] = v;
619#endif
620}
621
622static inline void stl_be_p(void *ptr, int v)
623{
624#if defined(__i386__) || defined(__x86_64__)
625 asm volatile ("bswap %0\n"
626 "movl %0, %1\n"
627 : "=r" (v)
628 : "m" (*(uint32_t *)ptr), "0" (v));
629#else
630 uint8_t *d = (uint8_t *) ptr;
631 d[0] = v >> 24;
632 d[1] = v >> 16;
633 d[2] = v >> 8;
634 d[3] = v;
635#endif
636}
637
638static inline void stq_be_p(void *ptr, uint64_t v)
639{
640 stl_be_p(ptr, v >> 32);
641 stl_be_p((uint8_t *)ptr + 4, v);
642}
643
644/* float access */
645
646static inline float32 ldfl_be_p(const void *ptr)
647{
648 union {
649 float32 f;
650 uint32_t i;
651 } u;
652 u.i = ldl_be_p(ptr);
653 return u.f;
654}
655
656static inline void stfl_be_p(void *ptr, float32 v)
657{
658 union {
659 float32 f;
660 uint32_t i;
661 } u;
662 u.f = v;
663 stl_be_p(ptr, u.i);
664}
665
666static inline float64 ldfq_be_p(const void *ptr)
667{
668 CPU_DoubleU u;
669 u.l.upper = ldl_be_p(ptr);
670 u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
671 return u.d;
672}
673
674static inline void stfq_be_p(void *ptr, float64 v)
675{
676 CPU_DoubleU u;
677 u.d = v;
678 stl_be_p(ptr, u.l.upper);
679 stl_be_p((uint8_t *)ptr + 4, u.l.lower);
680}
681
682#else
683
684static inline int lduw_be_p(const void *ptr)
685{
686 return *(uint16_t *)ptr;
687}
688
689static inline int ldsw_be_p(const void *ptr)
690{
691 return *(int16_t *)ptr;
692}
693
694static inline int ldl_be_p(const void *ptr)
695{
696 return *(uint32_t *)ptr;
697}
698
699static inline uint64_t ldq_be_p(const void *ptr)
700{
701 return *(uint64_t *)ptr;
702}
703
704static inline void stw_be_p(void *ptr, int v)
705{
706 *(uint16_t *)ptr = v;
707}
708
709static inline void stl_be_p(void *ptr, int v)
710{
711 *(uint32_t *)ptr = v;
712}
713
714static inline void stq_be_p(void *ptr, uint64_t v)
715{
716 *(uint64_t *)ptr = v;
717}
718
719/* float access */
720
721static inline float32 ldfl_be_p(const void *ptr)
722{
723 return *(float32 *)ptr;
724}
725
726static inline float64 ldfq_be_p(const void *ptr)
727{
728 return *(float64 *)ptr;
729}
730
731static inline void stfl_be_p(void *ptr, float32 v)
732{
733 *(float32 *)ptr = v;
734}
735
736static inline void stfq_be_p(void *ptr, float64 v)
737{
738 *(float64 *)ptr = v;
739}
740
741#endif
742
743/* target CPU memory access functions */
744#if defined(TARGET_WORDS_BIGENDIAN)
745#define lduw_p(p) lduw_be_p(p)
746#define ldsw_p(p) ldsw_be_p(p)
747#define ldl_p(p) ldl_be_p(p)
748#define ldq_p(p) ldq_be_p(p)
749#define ldfl_p(p) ldfl_be_p(p)
750#define ldfq_p(p) ldfq_be_p(p)
751#define stw_p(p, v) stw_be_p(p, v)
752#define stl_p(p, v) stl_be_p(p, v)
753#define stq_p(p, v) stq_be_p(p, v)
754#define stfl_p(p, v) stfl_be_p(p, v)
755#define stfq_p(p, v) stfq_be_p(p, v)
756#else
757#define lduw_p(p) lduw_le_p(p)
758#define ldsw_p(p) ldsw_le_p(p)
759#define ldl_p(p) ldl_le_p(p)
760#define ldq_p(p) ldq_le_p(p)
761#define ldfl_p(p) ldfl_le_p(p)
762#define ldfq_p(p) ldfq_le_p(p)
763#define stw_p(p, v) stw_le_p(p, v)
764#define stl_p(p, v) stl_le_p(p, v)
765#define stq_p(p, v) stq_le_p(p, v)
766#define stfl_p(p, v) stfl_le_p(p, v)
767#define stfq_p(p, v) stfq_le_p(p, v)
768#endif
769
770/* MMU memory access macros */
771
772#if defined(CONFIG_USER_ONLY)
773#include <assert.h>
774#include "qemu-types.h"
775
776/* On some host systems the guest address space is reserved on the host.
777 * This allows the guest address space to be offset to a convenient location.
778 */
779//#define GUEST_BASE 0x20000000
780#define GUEST_BASE 0
781
782/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
783#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
784#define h2g(x) ({ \
785 unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
786 /* Check if given address fits target address space */ \
787 assert(__ret == (abi_ulong)__ret); \
788 (abi_ulong)__ret; \
789})
790#define h2g_valid(x) ({ \
791 unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
792 (__guest == (abi_ulong)__guest); \
793})
794
795#define saddr(x) g2h(x)
796#define laddr(x) g2h(x)
797
798#else /* !CONFIG_USER_ONLY */
799/* NOTE: we use double casts if pointers and target_ulong have
800 different sizes */
801#define saddr(x) (uint8_t *)(long)(x)
802#define laddr(x) (uint8_t *)(long)(x)
803#endif
804
805#define ldub_raw(p) ldub_p(laddr((p)))
806#define ldsb_raw(p) ldsb_p(laddr((p)))
807#define lduw_raw(p) lduw_p(laddr((p)))
808#define ldsw_raw(p) ldsw_p(laddr((p)))
809#define ldl_raw(p) ldl_p(laddr((p)))
810#define ldq_raw(p) ldq_p(laddr((p)))
811#define ldfl_raw(p) ldfl_p(laddr((p)))
812#define ldfq_raw(p) ldfq_p(laddr((p)))
813#define stb_raw(p, v) stb_p(saddr((p)), v)
814#define stw_raw(p, v) stw_p(saddr((p)), v)
815#define stl_raw(p, v) stl_p(saddr((p)), v)
816#define stq_raw(p, v) stq_p(saddr((p)), v)
817#define stfl_raw(p, v) stfl_p(saddr((p)), v)
818#define stfq_raw(p, v) stfq_p(saddr((p)), v)
819
820
821#if defined(CONFIG_USER_ONLY)
822
823/* if user mode, no other memory access functions */
824#define ldub(p) ldub_raw(p)
825#define ldsb(p) ldsb_raw(p)
826#define lduw(p) lduw_raw(p)
827#define ldsw(p) ldsw_raw(p)
828#define ldl(p) ldl_raw(p)
829#define ldq(p) ldq_raw(p)
830#define ldfl(p) ldfl_raw(p)
831#define ldfq(p) ldfq_raw(p)
832#define stb(p, v) stb_raw(p, v)
833#define stw(p, v) stw_raw(p, v)
834#define stl(p, v) stl_raw(p, v)
835#define stq(p, v) stq_raw(p, v)
836#define stfl(p, v) stfl_raw(p, v)
837#define stfq(p, v) stfq_raw(p, v)
838
839#define ldub_code(p) ldub_raw(p)
840#define ldsb_code(p) ldsb_raw(p)
841#define lduw_code(p) lduw_raw(p)
842#define ldsw_code(p) ldsw_raw(p)
843#define ldl_code(p) ldl_raw(p)
844#define ldq_code(p) ldq_raw(p)
845
846#define ldub_kernel(p) ldub_raw(p)
847#define ldsb_kernel(p) ldsb_raw(p)
848#define lduw_kernel(p) lduw_raw(p)
849#define ldsw_kernel(p) ldsw_raw(p)
850#define ldl_kernel(p) ldl_raw(p)
851#define ldq_kernel(p) ldq_raw(p)
852#define ldfl_kernel(p) ldfl_raw(p)
853#define ldfq_kernel(p) ldfq_raw(p)
854#define stb_kernel(p, v) stb_raw(p, v)
855#define stw_kernel(p, v) stw_raw(p, v)
856#define stl_kernel(p, v) stl_raw(p, v)
857#define stq_kernel(p, v) stq_raw(p, v)
858#define stfl_kernel(p, v) stfl_raw(p, v)
859#define stfq_kernel(p, vt) stfq_raw(p, v)
860
861#endif /* defined(CONFIG_USER_ONLY) */
862
863/* page related stuff */
864
865#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
866#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
867#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
868
869/* ??? These should be the larger of unsigned long and target_ulong. */
870extern unsigned long qemu_real_host_page_size;
871extern unsigned long qemu_host_page_bits;
872extern unsigned long qemu_host_page_size;
873extern unsigned long qemu_host_page_mask;
874
875#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
876
877/* same as PROT_xxx */
878#define PAGE_READ 0x0001
879#define PAGE_WRITE 0x0002
880#define PAGE_EXEC 0x0004
881#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
882#define PAGE_VALID 0x0008
883/* original state of the write flag (used when tracking self-modifying
884 code */
885#define PAGE_WRITE_ORG 0x0010
886#define PAGE_RESERVED 0x0020
887
888void page_dump(FILE *f);
889int page_get_flags(target_ulong address);
890void page_set_flags(target_ulong start, target_ulong end, int flags);
891int page_check_range(target_ulong start, target_ulong len, int flags);
892
893void cpu_exec_init_all(unsigned long tb_size);
894CPUState *cpu_copy(CPUState *env);
895
896void cpu_dump_state(CPUState *env, FILE *f,
897 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
898 int flags);
899void cpu_dump_statistics (CPUState *env, FILE *f,
900 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
901 int flags);
902
903void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...)
904#ifndef VBOX
905 __attribute__ ((__format__ (__printf__, 2, 3)));
906#else
907 ;
908#endif
909extern CPUState *first_cpu;
910extern CPUState *cpu_single_env;
911extern int64_t qemu_icount;
912extern int use_icount;
913
914#define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */
915#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
916#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
917#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
918#define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */
919#define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */
920#define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */
921#define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */
922#define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */
923#define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */
924
925#ifdef VBOX
926/** Executes a single instruction. cpu_exec() will normally return EXCP_SINGLE_INSTR. */
927# define CPU_INTERRUPT_SINGLE_INSTR 0x0400
928/** Executing a CPU_INTERRUPT_SINGLE_INSTR request, quit the cpu_loop. (for exceptions and suchlike) */
929# define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT 0x0800
930/** VM execution was interrupted by VMR3Reset, VMR3Suspend or VMR3PowerOff. */
931# define CPU_INTERRUPT_RC 0x1000
932/** Exit current TB to process an external interrupt request (also in op.c!!) */
933# define CPU_INTERRUPT_EXTERNAL_EXIT 0x2000
934/** Exit current TB to process an external interrupt request (also in op.c!!) */
935# define CPU_INTERRUPT_EXTERNAL_HARD 0x4000
936/** Exit current TB to process an external interrupt request (also in op.c!!) */
937# define CPU_INTERRUPT_EXTERNAL_TIMER 0x8000
938/** Exit current TB to process an external interrupt request (also in op.c!!) */
939# define CPU_INTERRUPT_EXTERNAL_DMA 0x10000
940#endif /* VBOX */
941void cpu_interrupt(CPUState *s, int mask);
942void cpu_reset_interrupt(CPUState *env, int mask);
943
944/* Breakpoint/watchpoint flags */
945#define BP_MEM_READ 0x01
946#define BP_MEM_WRITE 0x02
947#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
948#define BP_STOP_BEFORE_ACCESS 0x04
949#define BP_WATCHPOINT_HIT 0x08
950#define BP_GDB 0x10
951#define BP_CPU 0x20
952
953int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
954 CPUBreakpoint **breakpoint);
955int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags);
956void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
957void cpu_breakpoint_remove_all(CPUState *env, int mask);
958int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
959 int flags, CPUWatchpoint **watchpoint);
960int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
961 target_ulong len, int flags);
962void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
963void cpu_watchpoint_remove_all(CPUState *env, int mask);
964
965#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
966#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
967#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
968
969void cpu_single_step(CPUState *env, int enabled);
970void cpu_reset(CPUState *s);
971
972/* Return the physical page corresponding to a virtual one. Use it
973 only for debugging because no protection checks are done. Return -1
974 if no page found. */
975target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
976
977#define CPU_LOG_TB_OUT_ASM (1 << 0)
978#define CPU_LOG_TB_IN_ASM (1 << 1)
979#define CPU_LOG_TB_OP (1 << 2)
980#define CPU_LOG_TB_OP_OPT (1 << 3)
981#define CPU_LOG_INT (1 << 4)
982#define CPU_LOG_EXEC (1 << 5)
983#define CPU_LOG_PCALL (1 << 6)
984#define CPU_LOG_IOPORT (1 << 7)
985#define CPU_LOG_TB_CPU (1 << 8)
986#define CPU_LOG_RESET (1 << 9)
987
988/* define log items */
989typedef struct CPULogItem {
990 int mask;
991 const char *name;
992 const char *help;
993} CPULogItem;
994
995extern const CPULogItem cpu_log_items[];
996
997void cpu_set_log(int log_flags);
998void cpu_set_log_filename(const char *filename);
999int cpu_str_to_log_mask(const char *str);
1000
1001/* IO ports API */
1002
1003/* NOTE: as these functions may be even used when there is an isa
1004 brige on non x86 targets, we always defined them */
1005#ifndef NO_CPU_IO_DEFS
1006void cpu_outb(CPUState *env, int addr, int val);
1007void cpu_outw(CPUState *env, int addr, int val);
1008void cpu_outl(CPUState *env, int addr, int val);
1009int cpu_inb(CPUState *env, int addr);
1010int cpu_inw(CPUState *env, int addr);
1011int cpu_inl(CPUState *env, int addr);
1012#endif
1013
1014/* address in the RAM (different from a physical address) */
1015#ifdef USE_KQEMU
1016typedef uint32_t ram_addr_t;
1017#else
1018typedef unsigned long ram_addr_t;
1019#endif
1020
1021/* memory API */
1022
1023#ifndef VBOX
1024extern ram_addr_t phys_ram_size;
1025extern int phys_ram_fd;
1026extern uint8_t *phys_ram_base;
1027extern uint8_t *phys_ram_dirty;
1028extern ram_addr_t ram_size;
1029#else /* VBOX */
1030extern RTGCPHYS phys_ram_size;
1031/** This is required for bounds checking the phys_ram_dirty accesses. */
1032extern RTGCPHYS phys_ram_dirty_size;
1033extern uint8_t *phys_ram_dirty;
1034#endif /* VBOX */
1035
1036/* physical memory access */
1037
1038/* MMIO pages are identified by a combination of an IO device index and
1039 3 flags. The ROMD code stores the page ram offset in iotlb entry,
1040 so only a limited number of ids are avaiable. */
1041
1042#define IO_MEM_SHIFT 3
1043#define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
1044
1045#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
1046#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
1047#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
1048#define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
1049
1050/* Acts like a ROM when read and like a device when written. */
1051#define IO_MEM_ROMD (1)
1052#define IO_MEM_SUBPAGE (2)
1053#define IO_MEM_SUBWIDTH (4)
1054
1055/* Flags stored in the low bits of the TLB virtual address. These are
1056 defined so that fast path ram access is all zeros. */
1057/* Zero if TLB entry is valid. */
1058#define TLB_INVALID_MASK (1 << 3)
1059/* Set if TLB entry references a clean RAM page. The iotlb entry will
1060 contain the page physical address. */
1061#define TLB_NOTDIRTY (1 << 4)
1062/* Set if TLB entry is an IO callback. */
1063#define TLB_MMIO (1 << 5)
1064
1065typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
1066typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
1067
1068void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
1069 ram_addr_t size,
1070 ram_addr_t phys_offset,
1071 ram_addr_t region_offset);
1072static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
1073 ram_addr_t size,
1074 ram_addr_t phys_offset)
1075{
1076 cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);
1077}
1078
1079ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
1080ram_addr_t qemu_ram_alloc(ram_addr_t);
1081void qemu_ram_free(ram_addr_t addr);
1082int cpu_register_io_memory(int io_index,
1083 CPUReadMemoryFunc **mem_read,
1084 CPUWriteMemoryFunc **mem_write,
1085 void *opaque);
1086void cpu_unregister_io_memory(int table_address);
1087CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
1088CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
1089
1090void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1091 int len, int is_write);
1092static inline void cpu_physical_memory_read(target_phys_addr_t addr,
1093 uint8_t *buf, int len)
1094{
1095 cpu_physical_memory_rw(addr, buf, len, 0);
1096}
1097static inline void cpu_physical_memory_write(target_phys_addr_t addr,
1098 const uint8_t *buf, int len)
1099{
1100 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
1101}
1102void *cpu_physical_memory_map(target_phys_addr_t addr,
1103 target_phys_addr_t *plen,
1104 int is_write);
1105void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
1106 int is_write, target_phys_addr_t access_len);
1107void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
1108void cpu_unregister_map_client(void *cookie);
1109
1110uint32_t ldub_phys(target_phys_addr_t addr);
1111uint32_t lduw_phys(target_phys_addr_t addr);
1112uint32_t ldl_phys(target_phys_addr_t addr);
1113uint64_t ldq_phys(target_phys_addr_t addr);
1114void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
1115void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
1116void stb_phys(target_phys_addr_t addr, uint32_t val);
1117void stw_phys(target_phys_addr_t addr, uint32_t val);
1118void stl_phys(target_phys_addr_t addr, uint32_t val);
1119void stq_phys(target_phys_addr_t addr, uint64_t val);
1120
1121void cpu_physical_memory_write_rom(target_phys_addr_t addr,
1122 const uint8_t *buf, int len);
1123int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
1124 uint8_t *buf, int len, int is_write);
1125
1126#define VGA_DIRTY_FLAG 0x01
1127#define CODE_DIRTY_FLAG 0x02
1128#define KQEMU_DIRTY_FLAG 0x04
1129#define MIGRATION_DIRTY_FLAG 0x08
1130
1131/* read dirty bit (return 0 or 1) */
1132#ifndef VBOX
1133static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
1134{
1135 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
1136}
1137#else /* VBOX */
1138DECLINLINE(int) cpu_physical_memory_is_dirty(ram_addr_t addr)
1139{
1140 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1141 {
1142 Log(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));
1143 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));*/
1144 return 0;
1145 }
1146 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
1147}
1148#endif /* VBOX */
1149
1150#ifndef VBOX
1151static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
1152 int dirty_flags)
1153{
1154 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
1155}
1156#else /* VBOX */
1157DECLINLINE(int) cpu_physical_memory_get_dirty(ram_addr_t addr,
1158 int dirty_flags)
1159{
1160 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1161 {
1162 Log(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));
1163 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));*/
1164 return 0xff & dirty_flags; /** @todo I don't think this is the right thing to return, fix! */
1165 }
1166 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
1167}
1168#endif /* VBOX */
1169
1170#ifndef VBOX
1171static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
1172{
1173 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
1174}
1175#else /* VBOX */
1176DECLINLINE(void) cpu_physical_memory_set_dirty(ram_addr_t addr)
1177{
1178 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1179 {
1180 Log(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));
1181 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));*/
1182 return;
1183 }
1184 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
1185}
1186#endif /* VBOX */
1187
1188void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1189 int dirty_flags);
1190void cpu_tlb_update_dirty(CPUState *env);
1191
1192int cpu_physical_memory_set_dirty_tracking(int enable);
1193
1194int cpu_physical_memory_get_dirty_tracking(void);
1195
1196void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr);
1197
1198void dump_exec_info(FILE *f,
1199 int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
1200
1201/* Coalesced MMIO regions are areas where write operations can be reordered.
1202 * This usually implies that write operations are side-effect free. This allows
1203 * batching which can make a major impact on performance when using
1204 * virtualization.
1205 */
1206void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
1207
1208void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
1209
1210/*******************************************/
1211/* host CPU ticks (if available) */
1212
1213#if defined(_ARCH_PPC)
1214
1215static inline int64_t cpu_get_real_ticks(void)
1216{
1217 int64_t retval;
1218#ifdef _ARCH_PPC64
1219 /* This reads timebase in one 64bit go and includes Cell workaround from:
1220 http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
1221 */
1222 __asm__ __volatile__ (
1223 "mftb %0\n\t"
1224 "cmpwi %0,0\n\t"
1225 "beq- $-8"
1226 : "=r" (retval));
1227#else
1228 /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
1229 unsigned long junk;
1230 __asm__ __volatile__ (
1231 "mftbu %1\n\t"
1232 "mftb %L0\n\t"
1233 "mftbu %0\n\t"
1234 "cmpw %0,%1\n\t"
1235 "bne $-16"
1236 : "=r" (retval), "=r" (junk));
1237#endif
1238 return retval;
1239}
1240
1241#elif defined(__i386__)
1242
1243static inline int64_t cpu_get_real_ticks(void)
1244{
1245 int64_t val;
1246 asm volatile ("rdtsc" : "=A" (val));
1247 return val;
1248}
1249
1250#elif defined(__x86_64__)
1251
1252static inline int64_t cpu_get_real_ticks(void)
1253{
1254 uint32_t low,high;
1255 int64_t val;
1256 asm volatile("rdtsc" : "=a" (low), "=d" (high));
1257 val = high;
1258 val <<= 32;
1259 val |= low;
1260 return val;
1261}
1262
1263#elif defined(__hppa__)
1264
1265static inline int64_t cpu_get_real_ticks(void)
1266{
1267 int val;
1268 asm volatile ("mfctl %%cr16, %0" : "=r"(val));
1269 return val;
1270}
1271
1272#elif defined(__ia64)
1273
1274static inline int64_t cpu_get_real_ticks(void)
1275{
1276 int64_t val;
1277 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
1278 return val;
1279}
1280
1281#elif defined(__s390__)
1282
1283static inline int64_t cpu_get_real_ticks(void)
1284{
1285 int64_t val;
1286 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
1287 return val;
1288}
1289
1290#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
1291
1292static inline int64_t cpu_get_real_ticks (void)
1293{
1294#if defined(_LP64)
1295 uint64_t rval;
1296 asm volatile("rd %%tick,%0" : "=r"(rval));
1297 return rval;
1298#else
1299 union {
1300 uint64_t i64;
1301 struct {
1302 uint32_t high;
1303 uint32_t low;
1304 } i32;
1305 } rval;
1306 asm volatile("rd %%tick,%1; srlx %1,32,%0"
1307 : "=r"(rval.i32.high), "=r"(rval.i32.low));
1308 return rval.i64;
1309#endif
1310}
1311
1312#elif defined(__mips__)
1313
1314static inline int64_t cpu_get_real_ticks(void)
1315{
1316#if __mips_isa_rev >= 2
1317 uint32_t count;
1318 static uint32_t cyc_per_count = 0;
1319
1320 if (!cyc_per_count)
1321 __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count));
1322
1323 __asm__ __volatile__("rdhwr %1, $2" : "=r" (count));
1324 return (int64_t)(count * cyc_per_count);
1325#else
1326 /* FIXME */
1327 static int64_t ticks = 0;
1328 return ticks++;
1329#endif
1330}
1331
1332#else
1333/* The host CPU doesn't have an easily accessible cycle counter.
1334 Just return a monotonically increasing value. This will be
1335 totally wrong, but hopefully better than nothing. */
1336static inline int64_t cpu_get_real_ticks (void)
1337{
1338 static int64_t ticks = 0;
1339 return ticks++;
1340}
1341#endif
1342
1343/* profiling */
1344#ifdef CONFIG_PROFILER
1345static inline int64_t profile_getclock(void)
1346{
1347 return cpu_get_real_ticks();
1348}
1349
1350extern int64_t kqemu_time, kqemu_time_start;
1351extern int64_t qemu_time, qemu_time_start;
1352extern int64_t tlb_flush_time;
1353extern int64_t kqemu_exec_count;
1354extern int64_t dev_time;
1355extern int64_t kqemu_ret_int_count;
1356extern int64_t kqemu_ret_excp_count;
1357extern int64_t kqemu_ret_intr_count;
1358#endif
1359
1360#ifdef VBOX
1361void tb_invalidate_virt(CPUState *env, uint32_t eip);
1362#endif /* VBOX */
1363
1364#endif /* CPU_ALL_H */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette