VirtualBox

source: vbox/trunk/src/VBox/Additions/x11/x11include/xorg-server-1.3.0.0/compiler.h@ 34721

Last change on this file since 34721 was 25078, checked in by vboxsync, 15 years ago

Additions/x11/x11include: exported and set eol-style on new headers

  • Property svn:eol-style set to native
File size: 49.9 KB
Line 
1/*
2 * Copyright 1990,91 by Thomas Roell, Dinkelscherben, Germany.
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that
7 * copyright notice and this permission notice appear in supporting
8 * documentation, and that the name of Thomas Roell not be used in
9 * advertising or publicity pertaining to distribution of the software without
10 * specific, written prior permission. Thomas Roell makes no representations
11 * about the suitability of this software for any purpose. It is provided
12 * "as is" without express or implied warranty.
13 *
14 * THOMAS ROELL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THOMAS ROELL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
20 * PERFORMANCE OF THIS SOFTWARE.
21 *
22 */
23/*
24 * Copyright (c) 1994-2003 by The XFree86 Project, Inc.
25 *
26 * Permission is hereby granted, free of charge, to any person obtaining a
27 * copy of this software and associated documentation files (the "Software"),
28 * to deal in the Software without restriction, including without limitation
29 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
30 * and/or sell copies of the Software, and to permit persons to whom the
31 * Software is furnished to do so, subject to the following conditions:
32 *
33 * The above copyright notice and this permission notice shall be included in
34 * all copies or substantial portions of the Software.
35 *
36 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
38 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
39 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
40 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
41 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
42 * OTHER DEALINGS IN THE SOFTWARE.
43 *
44 * Except as contained in this notice, the name of the copyright holder(s)
45 * and author(s) shall not be used in advertising or otherwise to promote
46 * the sale, use or other dealings in this Software without prior written
47 * authorization from the copyright holder(s) and author(s).
48 */
49
50/* $XConsortium: compiler.h /main/16 1996/10/25 15:38:34 kaleb $ */
51
52#ifndef _COMPILER_H
53
54# define _COMPILER_H
55
56#if defined(__SUNPRO_C)
57# define DO_PROTOTYPES
58#endif
59
60/* Allow drivers to use the GCC-supported __inline__ and/or __inline. */
61# ifndef __inline__
62# if defined(__GNUC__)
63 /* gcc has __inline__ */
64# elif defined(__HIGHC__)
65# define __inline__ _Inline
66# else
67# define __inline__ /**/
68# endif
69# endif /* __inline__ */
70# ifndef __inline
71# if defined(__GNUC__)
72 /* gcc has __inline */
73# elif defined(__HIGHC__)
74# define __inline _Inline
75# else
76# define __inline /**/
77# endif
78# endif /* __inline */
79
80# if defined(IODEBUG) && defined(__GNUC__)
81# define outb RealOutb
82# define outw RealOutw
83# define outl RealOutl
84# define inb RealInb
85# define inw RealInw
86# define inl RealInl
87# endif
88
89# if defined(QNX4) /* Do this for now to keep Watcom happy */
90# define outb outp
91# define outw outpw
92# define outl outpd
93# define inb inp
94# define inw inpw
95# define inl inpd
96
97/* Define the ffs function for inlining */
98extern int ffs(unsigned long);
99# pragma aux ffs_ = \
100 "bsf edx, eax" \
101 "jnz bits_set" \
102 "xor eax, eax" \
103 "jmp exit1" \
104 "bits_set:" \
105 "mov eax, edx" \
106 "inc eax" \
107 "exit1:" \
108 __parm [eax] \
109 __modify [eax edx] \
110 __value [eax] \
111 ;
112# endif
113
114# if defined(__SUNPRO_C)
115# define DO_PROTOTYPES
116# endif
117
118# if defined(NO_INLINE) || defined(DO_PROTOTYPES)
119
120# if !defined(__arm__)
121# if !defined(__sparc__) && !defined(__sparc) && !defined(__arm32__) \
122 && !(defined(__alpha__) && defined(linux)) \
123 && !(defined(__ia64__) && defined(linux)) \
124
125extern void outb(unsigned short, unsigned char);
126extern void outw(unsigned short, unsigned short);
127extern void outl(unsigned short, unsigned int);
128extern unsigned int inb(unsigned short);
129extern unsigned int inw(unsigned short);
130extern unsigned int inl(unsigned short);
131
132# else /* __sparc__, __arm32__, __alpha__*/
133
134extern void outb(unsigned long, unsigned char);
135extern void outw(unsigned long, unsigned short);
136extern void outl(unsigned long, unsigned int);
137extern unsigned int inb(unsigned long);
138extern unsigned int inw(unsigned long);
139extern unsigned int inl(unsigned long);
140
141# endif /* __sparc__, __arm32__, __alpha__ */
142# endif /* __arm__ */
143
144extern unsigned long ldq_u(unsigned long *);
145extern unsigned long ldl_u(unsigned int *);
146extern unsigned long ldw_u(unsigned short *);
147extern void stq_u(unsigned long, unsigned long *);
148extern void stl_u(unsigned long, unsigned int *);
149extern void stw_u(unsigned long, unsigned short *);
150extern void mem_barrier(void);
151extern void write_mem_barrier(void);
152extern void stl_brx(unsigned long, volatile unsigned char *, int);
153extern void stw_brx(unsigned short, volatile unsigned char *, int);
154extern unsigned long ldl_brx(volatile unsigned char *, int);
155extern unsigned short ldw_brx(volatile unsigned char *, int);
156
157# endif
158
159# ifndef NO_INLINE
160# ifdef __GNUC__
161# if (defined(linux) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && (defined(__alpha__))
162
163# ifdef linux
164/* for Linux on Alpha, we use the LIBC _inx/_outx routines */
165/* note that the appropriate setup via "ioperm" needs to be done */
166/* *before* any inx/outx is done. */
167
168extern void (*_alpha_outb)(char val, unsigned long port);
169static __inline__ void
170outb(unsigned long port, unsigned char val)
171{
172 _alpha_outb(val, port);
173}
174
175extern void (*_alpha_outw)(short val, unsigned long port);
176static __inline__ void
177outw(unsigned long port, unsigned short val)
178{
179 _alpha_outw(val, port);
180}
181
182extern void (*_alpha_outl)(int val, unsigned long port);
183static __inline__ void
184outl(unsigned long port, unsigned int val)
185{
186 _alpha_outl(val, port);
187}
188
189extern unsigned int (*_alpha_inb)(unsigned long port);
190static __inline__ unsigned int
191inb(unsigned long port)
192{
193 return _alpha_inb(port);
194}
195
196extern unsigned int (*_alpha_inw)(unsigned long port);
197static __inline__ unsigned int
198inw(unsigned long port)
199{
200 return _alpha_inw(port);
201}
202
203extern unsigned int (*_alpha_inl)(unsigned long port);
204static __inline__ unsigned int
205inl(unsigned long port)
206{
207 return _alpha_inl(port);
208}
209
210# endif /* linux */
211
212# if (defined(__FreeBSD__) || defined(__OpenBSD__)) \
213 && !defined(DO_PROTOTYPES)
214
215/* for FreeBSD and OpenBSD on Alpha, we use the libio (resp. libalpha) */
216/* inx/outx routines */
217/* note that the appropriate setup via "ioperm" needs to be done */
218/* *before* any inx/outx is done. */
219
220extern void outb(unsigned int port, unsigned char val);
221extern void outw(unsigned int port, unsigned short val);
222extern void outl(unsigned int port, unsigned int val);
223extern unsigned char inb(unsigned int port);
224extern unsigned short inw(unsigned int port);
225extern unsigned int inl(unsigned int port);
226
227# endif /* (__FreeBSD__ || __OpenBSD__ ) && !DO_PROTOTYPES */
228
229
230#if defined(__NetBSD__)
231#include <machine/pio.h>
232#endif /* __NetBSD__ */
233
234/*
235 * inline functions to do unaligned accesses
236 * from linux/include/asm-alpha/unaligned.h
237 */
238
239/*
240 * EGCS 1.1 knows about arbitrary unaligned loads. Define some
241 * packed structures to talk about such things with.
242 */
243
244struct __una_u64 { unsigned long x __attribute__((packed)); };
245struct __una_u32 { unsigned int x __attribute__((packed)); };
246struct __una_u16 { unsigned short x __attribute__((packed)); };
247
248/*
249 * Elemental unaligned loads
250 */
251/* let's try making these things static */
252
253static __inline__ unsigned long ldq_u(unsigned long * r11)
254{
255# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
256 const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
257 return ptr->x;
258# else
259 unsigned long r1,r2;
260 __asm__("ldq_u %0,%3\n\t"
261 "ldq_u %1,%4\n\t"
262 "extql %0,%2,%0\n\t"
263 "extqh %1,%2,%1"
264 :"=&r" (r1), "=&r" (r2)
265 :"r" (r11),
266 "m" (*r11),
267 "m" (*(const unsigned long *)(7+(char *) r11)));
268 return r1 | r2;
269# endif
270}
271
272static __inline__ unsigned long ldl_u(unsigned int * r11)
273{
274# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
275 const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
276 return ptr->x;
277# else
278 unsigned long r1,r2;
279 __asm__("ldq_u %0,%3\n\t"
280 "ldq_u %1,%4\n\t"
281 "extll %0,%2,%0\n\t"
282 "extlh %1,%2,%1"
283 :"=&r" (r1), "=&r" (r2)
284 :"r" (r11),
285 "m" (*r11),
286 "m" (*(const unsigned long *)(3+(char *) r11)));
287 return r1 | r2;
288# endif
289}
290
291static __inline__ unsigned long ldw_u(unsigned short * r11)
292{
293# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
294 const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
295 return ptr->x;
296# else
297 unsigned long r1,r2;
298 __asm__("ldq_u %0,%3\n\t"
299 "ldq_u %1,%4\n\t"
300 "extwl %0,%2,%0\n\t"
301 "extwh %1,%2,%1"
302 :"=&r" (r1), "=&r" (r2)
303 :"r" (r11),
304 "m" (*r11),
305 "m" (*(const unsigned long *)(1+(char *) r11)));
306 return r1 | r2;
307# endif
308}
309
310/*
311 * Elemental unaligned stores
312 */
313
314static __inline__ void stq_u(unsigned long r5, unsigned long * r11)
315{
316# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
317 struct __una_u64 *ptr = (struct __una_u64 *) r11;
318 ptr->x = r5;
319# else
320 unsigned long r1,r2,r3,r4;
321
322 __asm__("ldq_u %3,%1\n\t"
323 "ldq_u %2,%0\n\t"
324 "insqh %6,%7,%5\n\t"
325 "insql %6,%7,%4\n\t"
326 "mskqh %3,%7,%3\n\t"
327 "mskql %2,%7,%2\n\t"
328 "bis %3,%5,%3\n\t"
329 "bis %2,%4,%2\n\t"
330 "stq_u %3,%1\n\t"
331 "stq_u %2,%0"
332 :"=m" (*r11),
333 "=m" (*(unsigned long *)(7+(char *) r11)),
334 "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
335 :"r" (r5), "r" (r11));
336# endif
337}
338
339static __inline__ void stl_u(unsigned long r5, unsigned int * r11)
340{
341# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
342 struct __una_u32 *ptr = (struct __una_u32 *) r11;
343 ptr->x = r5;
344# else
345 unsigned long r1,r2,r3,r4;
346
347 __asm__("ldq_u %3,%1\n\t"
348 "ldq_u %2,%0\n\t"
349 "inslh %6,%7,%5\n\t"
350 "insll %6,%7,%4\n\t"
351 "msklh %3,%7,%3\n\t"
352 "mskll %2,%7,%2\n\t"
353 "bis %3,%5,%3\n\t"
354 "bis %2,%4,%2\n\t"
355 "stq_u %3,%1\n\t"
356 "stq_u %2,%0"
357 :"=m" (*r11),
358 "=m" (*(unsigned long *)(3+(char *) r11)),
359 "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
360 :"r" (r5), "r" (r11));
361# endif
362}
363
364static __inline__ void stw_u(unsigned long r5, unsigned short * r11)
365{
366# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
367 struct __una_u16 *ptr = (struct __una_u16 *) r11;
368 ptr->x = r5;
369# else
370 unsigned long r1,r2,r3,r4;
371
372 __asm__("ldq_u %3,%1\n\t"
373 "ldq_u %2,%0\n\t"
374 "inswh %6,%7,%5\n\t"
375 "inswl %6,%7,%4\n\t"
376 "mskwh %3,%7,%3\n\t"
377 "mskwl %2,%7,%2\n\t"
378 "bis %3,%5,%3\n\t"
379 "bis %2,%4,%2\n\t"
380 "stq_u %3,%1\n\t"
381 "stq_u %2,%0"
382 :"=m" (*r11),
383 "=m" (*(unsigned long *)(1+(char *) r11)),
384 "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
385 :"r" (r5), "r" (r11));
386# endif
387}
388
389/* to flush the I-cache before jumping to code which just got loaded */
390# define PAL_imb 134
391# define istream_mem_barrier() \
392 __asm__ __volatile__("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
393# define mem_barrier() __asm__ __volatile__("mb" : : : "memory")
394# ifdef __ELF__
395# define write_mem_barrier() __asm__ __volatile__("wmb" : : : "memory")
396# else /* ECOFF gas 2.6 doesn't know "wmb" :-( */
397# define write_mem_barrier() mem_barrier()
398# endif
399
400
401# elif defined(linux) && defined(__ia64__)
402
403# include <inttypes.h>
404
405# include <sys/io.h>
406
407struct __una_u64 { uint64_t x __attribute__((packed)); };
408struct __una_u32 { uint32_t x __attribute__((packed)); };
409struct __una_u16 { uint16_t x __attribute__((packed)); };
410
411static __inline__ unsigned long
412__uldq (const unsigned long * r11)
413{
414 const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
415 return ptr->x;
416}
417
418static __inline__ unsigned long
419__uldl (const unsigned int * r11)
420{
421 const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
422 return ptr->x;
423}
424
425static __inline__ unsigned long
426__uldw (const unsigned short * r11)
427{
428 const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
429 return ptr->x;
430}
431
432static __inline__ void
433__ustq (unsigned long r5, unsigned long * r11)
434{
435 struct __una_u64 *ptr = (struct __una_u64 *) r11;
436 ptr->x = r5;
437}
438
439static __inline__ void
440__ustl (unsigned long r5, unsigned int * r11)
441{
442 struct __una_u32 *ptr = (struct __una_u32 *) r11;
443 ptr->x = r5;
444}
445
446static __inline__ void
447__ustw (unsigned long r5, unsigned short * r11)
448{
449 struct __una_u16 *ptr = (struct __una_u16 *) r11;
450 ptr->x = r5;
451}
452
453# define ldq_u(p) __uldq(p)
454# define ldl_u(p) __uldl(p)
455# define ldw_u(p) __uldw(p)
456# define stq_u(v,p) __ustq(v,p)
457# define stl_u(v,p) __ustl(v,p)
458# define stw_u(v,p) __ustw(v,p)
459
460# ifndef __INTEL_COMPILER
461# define mem_barrier() __asm__ __volatile__ ("mf" ::: "memory")
462# define write_mem_barrier() __asm__ __volatile__ ("mf" ::: "memory")
463# else
464# include "ia64intrin.h"
465# define mem_barrier() __mf()
466# define write_mem_barrier() __mf()
467# endif
468
469/*
470 * This is overkill, but for different reasons depending on where it is used.
471 * This is thus general enough to be used everywhere cache flushes are needed.
472 * It doesn't handle memory access serialisation by other processors, though.
473 */
474# ifndef __INTEL_COMPILER
475# define ia64_flush_cache(Addr) \
476 __asm__ __volatile__ ( \
477 "fc.i %0;;;" \
478 "sync.i;;;" \
479 "mf;;;" \
480 "srlz.i;;;" \
481 :: "r"(Addr) : "memory")
482# else
483# define ia64_flush_cache(Addr) { \
484 __fc(Addr);\
485 __synci();\
486 __mf();\
487 __isrlz();\
488 }
489# endif
490# undef outb
491# undef outw
492# undef outl
493# undef inb
494# undef inw
495# undef inl
496extern void outb(unsigned long port, unsigned char val);
497extern void outw(unsigned long port, unsigned short val);
498extern void outl(unsigned long port, unsigned int val);
499extern unsigned int inb(unsigned long port);
500extern unsigned int inw(unsigned long port);
501extern unsigned int inl(unsigned long port);
502
503# elif defined(linux) && defined(__amd64__)
504
505# include <inttypes.h>
506
507# define ldq_u(p) (*((unsigned long *)(p)))
508# define ldl_u(p) (*((unsigned int *)(p)))
509# define ldw_u(p) (*((unsigned short *)(p)))
510# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
511# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
512# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
513
514# define mem_barrier() \
515 __asm__ __volatile__ ("lock; addl $0,0(%%rsp)": : :"memory")
516# define write_mem_barrier() \
517 __asm__ __volatile__ ("": : :"memory")
518
519
520static __inline__ void
521outb(unsigned short port, unsigned char val)
522{
523 __asm__ __volatile__("outb %0,%1" : :"a" (val), "d" (port));
524}
525
526
527static __inline__ void
528outw(unsigned short port, unsigned short val)
529{
530 __asm__ __volatile__("outw %0,%1" : :"a" (val), "d" (port));
531}
532
533static __inline__ void
534outl(unsigned short port, unsigned int val)
535{
536 __asm__ __volatile__("outl %0,%1" : :"a" (val), "d" (port));
537}
538
539static __inline__ unsigned int
540inb(unsigned short port)
541{
542 unsigned char ret;
543 __asm__ __volatile__("inb %1,%0" :
544 "=a" (ret) :
545 "d" (port));
546 return ret;
547}
548
549static __inline__ unsigned int
550inw(unsigned short port)
551{
552 unsigned short ret;
553 __asm__ __volatile__("inw %1,%0" :
554 "=a" (ret) :
555 "d" (port));
556 return ret;
557}
558
559static __inline__ unsigned int
560inl(unsigned short port)
561{
562 unsigned int ret;
563 __asm__ __volatile__("inl %1,%0" :
564 "=a" (ret) :
565 "d" (port));
566 return ret;
567}
568
569# elif (defined(linux) || defined(Lynx) || defined(sun) || defined(__OpenBSD__) || defined(__FreeBSD__)) && defined(__sparc__)
570
571# if !defined(Lynx)
572# ifndef ASI_PL
573# define ASI_PL 0x88
574# endif
575
576# define barrier() __asm__ __volatile__(".word 0x8143e00a": : :"memory")
577
578static __inline__ void
579outb(unsigned long port, unsigned char val)
580{
581 __asm__ __volatile__("stba %0, [%1] %2"
582 : /* No outputs */
583 : "r" (val), "r" (port), "i" (ASI_PL));
584 barrier();
585}
586
587static __inline__ void
588outw(unsigned long port, unsigned short val)
589{
590 __asm__ __volatile__("stha %0, [%1] %2"
591 : /* No outputs */
592 : "r" (val), "r" (port), "i" (ASI_PL));
593 barrier();
594}
595
596static __inline__ void
597outl(unsigned long port, unsigned int val)
598{
599 __asm__ __volatile__("sta %0, [%1] %2"
600 : /* No outputs */
601 : "r" (val), "r" (port), "i" (ASI_PL));
602 barrier();
603}
604
605static __inline__ unsigned int
606inb(unsigned long port)
607{
608 unsigned int ret;
609 __asm__ __volatile__("lduba [%1] %2, %0"
610 : "=r" (ret)
611 : "r" (port), "i" (ASI_PL));
612 return ret;
613}
614
615static __inline__ unsigned int
616inw(unsigned long port)
617{
618 unsigned int ret;
619 __asm__ __volatile__("lduha [%1] %2, %0"
620 : "=r" (ret)
621 : "r" (port), "i" (ASI_PL));
622 return ret;
623}
624
625static __inline__ unsigned int
626inl(unsigned long port)
627{
628 unsigned int ret;
629 __asm__ __volatile__("lda [%1] %2, %0"
630 : "=r" (ret)
631 : "r" (port), "i" (ASI_PL));
632 return ret;
633}
634
635static __inline__ unsigned char
636xf86ReadMmio8(__volatile__ void *base, const unsigned long offset)
637{
638 unsigned long addr = ((unsigned long)base) + offset;
639 unsigned char ret;
640
641 __asm__ __volatile__("lduba [%1] %2, %0"
642 : "=r" (ret)
643 : "r" (addr), "i" (ASI_PL));
644 return ret;
645}
646
647static __inline__ unsigned short
648xf86ReadMmio16Be(__volatile__ void *base, const unsigned long offset)
649{
650 unsigned long addr = ((unsigned long)base) + offset;
651 unsigned short ret;
652
653 __asm__ __volatile__("lduh [%1], %0"
654 : "=r" (ret)
655 : "r" (addr));
656 return ret;
657}
658
659static __inline__ unsigned short
660xf86ReadMmio16Le(__volatile__ void *base, const unsigned long offset)
661{
662 unsigned long addr = ((unsigned long)base) + offset;
663 unsigned short ret;
664
665 __asm__ __volatile__("lduha [%1] %2, %0"
666 : "=r" (ret)
667 : "r" (addr), "i" (ASI_PL));
668 return ret;
669}
670
671static __inline__ unsigned int
672xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset)
673{
674 unsigned long addr = ((unsigned long)base) + offset;
675 unsigned int ret;
676
677 __asm__ __volatile__("ld [%1], %0"
678 : "=r" (ret)
679 : "r" (addr));
680 return ret;
681}
682
683static __inline__ unsigned int
684xf86ReadMmio32Le(__volatile__ void *base, const unsigned long offset)
685{
686 unsigned long addr = ((unsigned long)base) + offset;
687 unsigned int ret;
688
689 __asm__ __volatile__("lda [%1] %2, %0"
690 : "=r" (ret)
691 : "r" (addr), "i" (ASI_PL));
692 return ret;
693}
694
695static __inline__ void
696xf86WriteMmio8(__volatile__ void *base, const unsigned long offset,
697 const unsigned int val)
698{
699 unsigned long addr = ((unsigned long)base) + offset;
700
701 __asm__ __volatile__("stba %0, [%1] %2"
702 : /* No outputs */
703 : "r" (val), "r" (addr), "i" (ASI_PL));
704 barrier();
705}
706
707static __inline__ void
708xf86WriteMmio16Be(__volatile__ void *base, const unsigned long offset,
709 const unsigned int val)
710{
711 unsigned long addr = ((unsigned long)base) + offset;
712
713 __asm__ __volatile__("sth %0, [%1]"
714 : /* No outputs */
715 : "r" (val), "r" (addr));
716 barrier();
717}
718
719static __inline__ void
720xf86WriteMmio16Le(__volatile__ void *base, const unsigned long offset,
721 const unsigned int val)
722{
723 unsigned long addr = ((unsigned long)base) + offset;
724
725 __asm__ __volatile__("stha %0, [%1] %2"
726 : /* No outputs */
727 : "r" (val), "r" (addr), "i" (ASI_PL));
728 barrier();
729}
730
731static __inline__ void
732xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset,
733 const unsigned int val)
734{
735 unsigned long addr = ((unsigned long)base) + offset;
736
737 __asm__ __volatile__("st %0, [%1]"
738 : /* No outputs */
739 : "r" (val), "r" (addr));
740 barrier();
741}
742
743static __inline__ void
744xf86WriteMmio32Le(__volatile__ void *base, const unsigned long offset,
745 const unsigned int val)
746{
747 unsigned long addr = ((unsigned long)base) + offset;
748
749 __asm__ __volatile__("sta %0, [%1] %2"
750 : /* No outputs */
751 : "r" (val), "r" (addr), "i" (ASI_PL));
752 barrier();
753}
754
755static __inline__ void
756xf86WriteMmio8NB(__volatile__ void *base, const unsigned long offset,
757 const unsigned int val)
758{
759 unsigned long addr = ((unsigned long)base) + offset;
760
761 __asm__ __volatile__("stba %0, [%1] %2"
762 : /* No outputs */
763 : "r" (val), "r" (addr), "i" (ASI_PL));
764}
765
766static __inline__ void
767xf86WriteMmio16BeNB(__volatile__ void *base, const unsigned long offset,
768 const unsigned int val)
769{
770 unsigned long addr = ((unsigned long)base) + offset;
771
772 __asm__ __volatile__("sth %0, [%1]"
773 : /* No outputs */
774 : "r" (val), "r" (addr));
775}
776
777static __inline__ void
778xf86WriteMmio16LeNB(__volatile__ void *base, const unsigned long offset,
779 const unsigned int val)
780{
781 unsigned long addr = ((unsigned long)base) + offset;
782
783 __asm__ __volatile__("stha %0, [%1] %2"
784 : /* No outputs */
785 : "r" (val), "r" (addr), "i" (ASI_PL));
786}
787
788static __inline__ void
789xf86WriteMmio32BeNB(__volatile__ void *base, const unsigned long offset,
790 const unsigned int val)
791{
792 unsigned long addr = ((unsigned long)base) + offset;
793
794 __asm__ __volatile__("st %0, [%1]"
795 : /* No outputs */
796 : "r" (val), "r" (addr));
797}
798
799static __inline__ void
800xf86WriteMmio32LeNB(__volatile__ void *base, const unsigned long offset,
801 const unsigned int val)
802{
803 unsigned long addr = ((unsigned long)base) + offset;
804
805 __asm__ __volatile__("sta %0, [%1] %2"
806 : /* No outputs */
807 : "r" (val), "r" (addr), "i" (ASI_PL));
808}
809
810# endif /* !Lynx */
811
812/*
813 * EGCS 1.1 knows about arbitrary unaligned loads. Define some
814 * packed structures to talk about such things with.
815 */
816
817# if defined(__arch64__) || defined(__sparcv9)
818struct __una_u64 { unsigned long x __attribute__((packed)); };
819# endif
820struct __una_u32 { unsigned int x __attribute__((packed)); };
821struct __una_u16 { unsigned short x __attribute__((packed)); };
822
823static __inline__ unsigned long ldq_u(unsigned long *p)
824{
825# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
826# if defined(__arch64__) || defined(__sparcv9)
827 const struct __una_u64 *ptr = (const struct __una_u64 *) p;
828# else
829 const struct __una_u32 *ptr = (const struct __una_u32 *) p;
830# endif
831 return ptr->x;
832# else
833 unsigned long ret;
834 memmove(&ret, p, sizeof(*p));
835 return ret;
836# endif
837}
838
839static __inline__ unsigned long ldl_u(unsigned int *p)
840{
841# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
842 const struct __una_u32 *ptr = (const struct __una_u32 *) p;
843 return ptr->x;
844# else
845 unsigned int ret;
846 memmove(&ret, p, sizeof(*p));
847 return ret;
848# endif
849}
850
851static __inline__ unsigned long ldw_u(unsigned short *p)
852{
853# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
854 const struct __una_u16 *ptr = (const struct __una_u16 *) p;
855 return ptr->x;
856# else
857 unsigned short ret;
858 memmove(&ret, p, sizeof(*p));
859 return ret;
860# endif
861}
862
863static __inline__ void stq_u(unsigned long val, unsigned long *p)
864{
865# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
866# if defined(__arch64__) || defined(__sparcv9)
867 struct __una_u64 *ptr = (struct __una_u64 *) p;
868# else
869 struct __una_u32 *ptr = (struct __una_u32 *) p;
870# endif
871 ptr->x = val;
872# else
873 unsigned long tmp = val;
874 memmove(p, &tmp, sizeof(*p));
875# endif
876}
877
878static __inline__ void stl_u(unsigned long val, unsigned int *p)
879{
880# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
881 struct __una_u32 *ptr = (struct __una_u32 *) p;
882 ptr->x = val;
883# else
884 unsigned int tmp = val;
885 memmove(p, &tmp, sizeof(*p));
886# endif
887}
888
889static __inline__ void stw_u(unsigned long val, unsigned short *p)
890{
891# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
892 struct __una_u16 *ptr = (struct __una_u16 *) p;
893 ptr->x = val;
894# else
895 unsigned short tmp = val;
896 memmove(p, &tmp, sizeof(*p));
897# endif
898}
899
900# define mem_barrier() /* XXX: nop for now */
901# define write_mem_barrier() /* XXX: nop for now */
902
903# elif defined(__mips__) || (defined(__arm32__) && !defined(__linux__))
904# ifdef __arm32__
905# define PORT_SIZE long
906# else
907# define PORT_SIZE short
908# endif
909
910unsigned int IOPortBase; /* Memory mapped I/O port area */
911
912static __inline__ void
913outb(unsigned PORT_SIZE port, unsigned char val)
914{
915 *(volatile unsigned char*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val;
916}
917
918static __inline__ void
919outw(unsigned PORT_SIZE port, unsigned short val)
920{
921 *(volatile unsigned short*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val;
922}
923
924static __inline__ void
925outl(unsigned PORT_SIZE port, unsigned int val)
926{
927 *(volatile unsigned int*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val;
928}
929
930static __inline__ unsigned int
931inb(unsigned PORT_SIZE port)
932{
933 return *(volatile unsigned char*)(((unsigned PORT_SIZE)(port))+IOPortBase);
934}
935
936static __inline__ unsigned int
937inw(unsigned PORT_SIZE port)
938{
939 return *(volatile unsigned short*)(((unsigned PORT_SIZE)(port))+IOPortBase);
940}
941
942static __inline__ unsigned int
943inl(unsigned PORT_SIZE port)
944{
945 return *(volatile unsigned int*)(((unsigned PORT_SIZE)(port))+IOPortBase);
946}
947
948
949# if defined(__mips__)
950static __inline__ unsigned long ldq_u(unsigned long * r11)
951{
952 unsigned long r1;
953 __asm__("lwr %0,%2\n\t"
954 "lwl %0,%3\n\t"
955 :"=&r" (r1)
956 :"r" (r11),
957 "m" (*r11),
958 "m" (*(unsigned long *)(3+(char *) r11)));
959 return r1;
960}
961
962static __inline__ unsigned long ldl_u(unsigned int * r11)
963{
964 unsigned long r1;
965 __asm__("lwr %0,%2\n\t"
966 "lwl %0,%3\n\t"
967 :"=&r" (r1)
968 :"r" (r11),
969 "m" (*r11),
970 "m" (*(unsigned long *)(3+(char *) r11)));
971 return r1;
972}
973
974static __inline__ unsigned long ldw_u(unsigned short * r11)
975{
976 unsigned long r1;
977 __asm__("lwr %0,%2\n\t"
978 "lwl %0,%3\n\t"
979 :"=&r" (r1)
980 :"r" (r11),
981 "m" (*r11),
982 "m" (*(unsigned long *)(1+(char *) r11)));
983 return r1;
984}
985
986# ifdef linux /* don't mess with other OSs */
987
988/*
989 * EGCS 1.1 knows about arbitrary unaligned loads (and we don't support older
990 * versions anyway. Define some packed structures to talk about such things
991 * with.
992 */
993
994struct __una_u32 { unsigned int x __attribute__((packed)); };
995struct __una_u16 { unsigned short x __attribute__((packed)); };
996
997static __inline__ void stw_u(unsigned long val, unsigned short *p)
998{
999 struct __una_u16 *ptr = (struct __una_u16 *) p;
1000 ptr->x = val;
1001}
1002
1003static __inline__ void stl_u(unsigned long val, unsigned int *p)
1004{
1005 struct __una_u32 *ptr = (struct __una_u32 *) p;
1006 ptr->x = val;
1007}
1008
1009# if X_BYTE_ORDER == X_BIG_ENDIAN
1010static __inline__ unsigned int
1011xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset)
1012{
1013 unsigned long addr = ((unsigned long)base) + offset;
1014 unsigned int ret;
1015
1016 __asm__ __volatile__("lw %0, 0(%1)"
1017 : "=r" (ret)
1018 : "r" (addr));
1019 return ret;
1020}
1021
1022static __inline__ void
1023xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset,
1024 const unsigned int val)
1025{
1026 unsigned long addr = ((unsigned long)base) + offset;
1027
1028 __asm__ __volatile__("sw %0, 0(%1)"
1029 : /* No outputs */
1030 : "r" (val), "r" (addr));
1031}
1032# endif
1033
1034# define mem_barrier() \
1035 __asm__ __volatile__( \
1036 "# prevent instructions being moved around\n\t" \
1037 ".set\tnoreorder\n\t" \
1038 "# 8 nops to fool the R4400 pipeline\n\t" \
1039 "nop;nop;nop;nop;nop;nop;nop;nop\n\t" \
1040 ".set\treorder" \
1041 : /* no output */ \
1042 : /* no input */ \
1043 : "memory")
1044# define write_mem_barrier() mem_barrier()
1045
1046# else /* !linux */
1047
1048# define stq_u(v,p) stl_u(v,p)
1049# define stl_u(v,p) (*(unsigned char *)(p)) = (v); \
1050 (*(unsigned char *)(p)+1) = ((v) >> 8); \
1051 (*(unsigned char *)(p)+2) = ((v) >> 16); \
1052 (*(unsigned char *)(p)+3) = ((v) >> 24)
1053
1054# define stw_u(v,p) (*(unsigned char *)(p)) = (v); \
1055 (*(unsigned char *)(p)+1) = ((v) >> 8)
1056
1057# define mem_barrier() /* NOP */
1058# endif /* !linux */
1059# endif /* __mips__ */
1060
1061# if defined(__arm32__)
1062# define ldq_u(p) (*((unsigned long *)(p)))
1063# define ldl_u(p) (*((unsigned int *)(p)))
1064# define ldw_u(p) (*((unsigned short *)(p)))
1065# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1066# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1067# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1068# define mem_barrier() /* NOP */
1069# define write_mem_barrier() /* NOP */
1070# endif /* __arm32__ */
1071
1072# elif (defined(Lynx) || defined(linux) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__FreeBSD__)) && defined(__powerpc__)
1073
1074# ifndef MAP_FAILED
1075# define MAP_FAILED ((void *)-1)
1076# endif
1077
1078extern volatile unsigned char *ioBase;
1079
1080#if defined(linux) && defined(__powerpc64__)
1081# include <linux/version.h>
1082# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
1083# include <asm/memory.h>
1084# endif
1085#endif /* defined(linux) && defined(__powerpc64__) */
1086#ifndef eieio /* We deal with arch-specific eieio() routines above... */
1087# define eieio() __asm__ __volatile__ ("eieio" ::: "memory")
1088#endif /* eieio */
1089
1090static __inline__ unsigned char
1091xf86ReadMmio8(__volatile__ void *base, const unsigned long offset)
1092{
1093 register unsigned char val;
1094 __asm__ __volatile__(
1095 "lbzx %0,%1,%2\n\t"
1096 "eieio"
1097 : "=r" (val)
1098 : "b" (base), "r" (offset),
1099 "m" (*((volatile unsigned char *)base+offset)));
1100 return val;
1101}
1102
1103static __inline__ unsigned short
1104xf86ReadMmio16Be(__volatile__ void *base, const unsigned long offset)
1105{
1106 register unsigned short val;
1107 __asm__ __volatile__(
1108 "lhzx %0,%1,%2\n\t"
1109 "eieio"
1110 : "=r" (val)
1111 : "b" (base), "r" (offset),
1112 "m" (*((volatile unsigned char *)base+offset)));
1113 return val;
1114}
1115
1116static __inline__ unsigned short
1117xf86ReadMmio16Le(__volatile__ void *base, const unsigned long offset)
1118{
1119 register unsigned short val;
1120 __asm__ __volatile__(
1121 "lhbrx %0,%1,%2\n\t"
1122 "eieio"
1123 : "=r" (val)
1124 : "b" (base), "r" (offset),
1125 "m" (*((volatile unsigned char *)base+offset)));
1126 return val;
1127}
1128
1129static __inline__ unsigned int
1130xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset)
1131{
1132 register unsigned int val;
1133 __asm__ __volatile__(
1134 "lwzx %0,%1,%2\n\t"
1135 "eieio"
1136 : "=r" (val)
1137 : "b" (base), "r" (offset),
1138 "m" (*((volatile unsigned char *)base+offset)));
1139 return val;
1140}
1141
1142static __inline__ unsigned int
1143xf86ReadMmio32Le(__volatile__ void *base, const unsigned long offset)
1144{
1145 register unsigned int val;
1146 __asm__ __volatile__(
1147 "lwbrx %0,%1,%2\n\t"
1148 "eieio"
1149 : "=r" (val)
1150 : "b" (base), "r" (offset),
1151 "m" (*((volatile unsigned char *)base+offset)));
1152 return val;
1153}
1154
1155static __inline__ void
1156xf86WriteMmioNB8(__volatile__ void *base, const unsigned long offset,
1157 const unsigned char val)
1158{
1159 __asm__ __volatile__(
1160 "stbx %1,%2,%3\n\t"
1161 : "=m" (*((volatile unsigned char *)base+offset))
1162 : "r" (val), "b" (base), "r" (offset));
1163}
1164
1165static __inline__ void
1166xf86WriteMmioNB16Le(__volatile__ void *base, const unsigned long offset,
1167 const unsigned short val)
1168{
1169 __asm__ __volatile__(
1170 "sthbrx %1,%2,%3\n\t"
1171 : "=m" (*((volatile unsigned char *)base+offset))
1172 : "r" (val), "b" (base), "r" (offset));
1173}
1174
1175static __inline__ void
1176xf86WriteMmioNB16Be(__volatile__ void *base, const unsigned long offset,
1177 const unsigned short val)
1178{
1179 __asm__ __volatile__(
1180 "sthx %1,%2,%3\n\t"
1181 : "=m" (*((volatile unsigned char *)base+offset))
1182 : "r" (val), "b" (base), "r" (offset));
1183}
1184
1185static __inline__ void
1186xf86WriteMmioNB32Le(__volatile__ void *base, const unsigned long offset,
1187 const unsigned int val)
1188{
1189 __asm__ __volatile__(
1190 "stwbrx %1,%2,%3\n\t"
1191 : "=m" (*((volatile unsigned char *)base+offset))
1192 : "r" (val), "b" (base), "r" (offset));
1193}
1194
1195static __inline__ void
1196xf86WriteMmioNB32Be(__volatile__ void *base, const unsigned long offset,
1197 const unsigned int val)
1198{
1199 __asm__ __volatile__(
1200 "stwx %1,%2,%3\n\t"
1201 : "=m" (*((volatile unsigned char *)base+offset))
1202 : "r" (val), "b" (base), "r" (offset));
1203}
1204
1205static __inline__ void
1206xf86WriteMmio8(__volatile__ void *base, const unsigned long offset,
1207 const unsigned char val)
1208{
1209 xf86WriteMmioNB8(base, offset, val);
1210 eieio();
1211}
1212
1213static __inline__ void
1214xf86WriteMmio16Le(__volatile__ void *base, const unsigned long offset,
1215 const unsigned short val)
1216{
1217 xf86WriteMmioNB16Le(base, offset, val);
1218 eieio();
1219}
1220
1221static __inline__ void
1222xf86WriteMmio16Be(__volatile__ void *base, const unsigned long offset,
1223 const unsigned short val)
1224{
1225 xf86WriteMmioNB16Be(base, offset, val);
1226 eieio();
1227}
1228
1229static __inline__ void
1230xf86WriteMmio32Le(__volatile__ void *base, const unsigned long offset,
1231 const unsigned int val)
1232{
1233 xf86WriteMmioNB32Le(base, offset, val);
1234 eieio();
1235}
1236
1237static __inline__ void
1238xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset,
1239 const unsigned int val)
1240{
1241 xf86WriteMmioNB32Be(base, offset, val);
1242 eieio();
1243}
1244
1245
1246static __inline__ void
1247outb(unsigned short port, unsigned char value)
1248{
1249 if(ioBase == MAP_FAILED) return;
1250 xf86WriteMmio8((void *)ioBase, port, value);
1251}
1252
1253static __inline__ void
1254outw(unsigned short port, unsigned short value)
1255{
1256 if(ioBase == MAP_FAILED) return;
1257 xf86WriteMmio16Le((void *)ioBase, port, value);
1258}
1259
1260static __inline__ void
1261outl(unsigned short port, unsigned int value)
1262{
1263 if(ioBase == MAP_FAILED) return;
1264 xf86WriteMmio32Le((void *)ioBase, port, value);
1265}
1266
1267static __inline__ unsigned int
1268inb(unsigned short port)
1269{
1270 if(ioBase == MAP_FAILED) return 0;
1271 return xf86ReadMmio8((void *)ioBase, port);
1272}
1273
1274static __inline__ unsigned int
1275inw(unsigned short port)
1276{
1277 if(ioBase == MAP_FAILED) return 0;
1278 return xf86ReadMmio16Le((void *)ioBase, port);
1279}
1280
1281static __inline__ unsigned int
1282inl(unsigned short port)
1283{
1284 if(ioBase == MAP_FAILED) return 0;
1285 return xf86ReadMmio32Le((void *)ioBase, port);
1286}
1287
1288# define ldq_u(p) ldl_u(p)
1289# define ldl_u(p) ((*(unsigned char *)(p)) | \
1290 (*((unsigned char *)(p)+1)<<8) | \
1291 (*((unsigned char *)(p)+2)<<16) | \
1292 (*((unsigned char *)(p)+3)<<24))
1293# define ldw_u(p) ((*(unsigned char *)(p)) | \
1294 (*((unsigned char *)(p)+1)<<8))
1295
1296# define stq_u(v,p) stl_u(v,p)
1297# define stl_u(v,p) (*(unsigned char *)(p)) = (v); \
1298 (*((unsigned char *)(p)+1)) = ((v) >> 8); \
1299 (*((unsigned char *)(p)+2)) = ((v) >> 16); \
1300 (*((unsigned char *)(p)+3)) = ((v) >> 24)
1301# define stw_u(v,p) (*(unsigned char *)(p)) = (v); \
1302 (*((unsigned char *)(p)+1)) = ((v) >> 8)
1303
1304# define mem_barrier() eieio()
1305# define write_mem_barrier() eieio()
1306
1307#elif defined(__arm__) && defined(__linux__)
1308
1309#define ldq_u(p) (*((unsigned long *)(p)))
1310#define ldl_u(p) (*((unsigned int *)(p)))
1311#define ldw_u(p) (*((unsigned short *)(p)))
1312#define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1313#define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1314#define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1315#define mem_barrier() /* NOP */
1316#define write_mem_barrier() /* NOP */
1317
1318/* for Linux on ARM, we use the LIBC inx/outx routines */
1319/* note that the appropriate setup via "ioperm" needs to be done */
1320/* *before* any inx/outx is done. */
1321
1322#include <sys/io.h>
1323
1324static __inline__ void
1325xf_outb(unsigned short port, unsigned char val)
1326{
1327 outb(val, port);
1328}
1329
1330static __inline__ void
1331xf_outw(unsigned short port, unsigned short val)
1332{
1333 outw(val, port);
1334}
1335
1336static __inline__ void
1337xf_outl(unsigned short port, unsigned int val)
1338{
1339 outl(val, port);
1340}
1341
1342#define outb xf_outb
1343#define outw xf_outw
1344#define outl xf_outl
1345
1346#define arm_flush_cache(addr) \
1347do { \
1348 register unsigned long _beg __asm ("a1") = (unsigned long) (addr); \
1349 register unsigned long _end __asm ("a2") = (unsigned long) (addr) + 4;\
1350 register unsigned long _flg __asm ("a3") = 0; \
1351 __asm __volatile ("swi 0x9f0002 @ sys_cacheflush" \
1352 : "=r" (_beg) \
1353 : "0" (_beg), "r" (_end), "r" (_flg)); \
1354} while (0)
1355
1356# else /* ix86 */
1357
1358# define ldq_u(p) (*((unsigned long *)(p)))
1359# define ldl_u(p) (*((unsigned int *)(p)))
1360# define ldw_u(p) (*((unsigned short *)(p)))
1361# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1362# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1363# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1364# define mem_barrier() /* NOP */
1365# define write_mem_barrier() /* NOP */
1366
1367# if !defined(__SUNPRO_C)
1368# if !defined(FAKEIT) && !defined(__mc68000__) && !defined(__arm__) && !defined(__sh__) && !defined(__hppa__)
1369# ifdef GCCUSESGAS
1370
1371/*
1372 * If gcc uses gas rather than the native assembler, the syntax of these
1373 * inlines has to be different. DHD
1374 */
1375
1376static __inline__ void
1377outb(unsigned short port, unsigned char val)
1378{
1379 __asm__ __volatile__("outb %0,%1" : :"a" (val), "d" (port));
1380}
1381
1382
1383static __inline__ void
1384outw(unsigned short port, unsigned short val)
1385{
1386 __asm__ __volatile__("outw %0,%1" : :"a" (val), "d" (port));
1387}
1388
1389static __inline__ void
1390outl(unsigned short port, unsigned int val)
1391{
1392 __asm__ __volatile__("outl %0,%1" : :"a" (val), "d" (port));
1393}
1394
1395static __inline__ unsigned int
1396inb(unsigned short port)
1397{
1398 unsigned char ret;
1399 __asm__ __volatile__("inb %1,%0" :
1400 "=a" (ret) :
1401 "d" (port));
1402 return ret;
1403}
1404
1405static __inline__ unsigned int
1406inw(unsigned short port)
1407{
1408 unsigned short ret;
1409 __asm__ __volatile__("inw %1,%0" :
1410 "=a" (ret) :
1411 "d" (port));
1412 return ret;
1413}
1414
1415static __inline__ unsigned int
1416inl(unsigned short port)
1417{
1418 unsigned int ret;
1419 __asm__ __volatile__("inl %1,%0" :
1420 "=a" (ret) :
1421 "d" (port));
1422 return ret;
1423}
1424
1425# else /* GCCUSESGAS */
1426
1427static __inline__ void
1428outb(unsigned short port, unsigned char val)
1429{
1430 __asm__ __volatile__("out%B0 (%1)" : :"a" (val), "d" (port));
1431}
1432
1433static __inline__ void
1434outw(unsigned short port, unsigned short val)
1435{
1436 __asm__ __volatile__("out%W0 (%1)" : :"a" (val), "d" (port));
1437}
1438
1439static __inline__ void
1440outl(unsigned short port, unsigned int val)
1441{
1442 __asm__ __volatile__("out%L0 (%1)" : :"a" (val), "d" (port));
1443}
1444
1445static __inline__ unsigned int
1446inb(unsigned short port)
1447{
1448 unsigned char ret;
1449 __asm__ __volatile__("in%B0 (%1)" :
1450 "=a" (ret) :
1451 "d" (port));
1452 return ret;
1453}
1454
1455static __inline__ unsigned int
1456inw(unsigned short port)
1457{
1458 unsigned short ret;
1459 __asm__ __volatile__("in%W0 (%1)" :
1460 "=a" (ret) :
1461 "d" (port));
1462 return ret;
1463}
1464
1465static __inline__ unsigned int
1466inl(unsigned short port)
1467{
1468 unsigned int ret;
1469 __asm__ __volatile__("in%L0 (%1)" :
1470 "=a" (ret) :
1471 "d" (port));
1472 return ret;
1473}
1474
1475# endif /* GCCUSESGAS */
1476
1477# else /* !defined(FAKEIT) && !defined(__mc68000__) && !defined(__arm__) && !defined(__sh__) && !defined(__hppa__)*/
1478
1479static __inline__ void
1480outb(unsigned short port, unsigned char val)
1481{
1482}
1483
1484static __inline__ void
1485outw(unsigned short port, unsigned short val)
1486{
1487}
1488
1489static __inline__ void
1490outl(unsigned short port, unsigned int val)
1491{
1492}
1493
1494static __inline__ unsigned int
1495inb(unsigned short port)
1496{
1497 return 0;
1498}
1499
1500static __inline__ unsigned int
1501inw(unsigned short port)
1502{
1503 return 0;
1504}
1505
1506static __inline__ unsigned int
1507inl(unsigned short port)
1508{
1509 return 0;
1510}
1511
1512# endif /* FAKEIT */
1513# endif /* __SUNPRO_C */
1514
1515# endif /* ix86 */
1516
1517# else /* !GNUC */
1518# if !defined(QNX4)
1519# if defined(__STDC__) && (__STDC__ == 1)
1520# ifndef asm
1521# define asm __asm
1522# endif
1523# endif
1524# ifndef SCO325
1525# if defined(__UNIXWARE__)
1526# /* avoid including <sys/types.h> for <sys/inline.h> on UnixWare */
1527# define ushort unsigned short
1528# define ushort_t unsigned short
1529# define ulong unsigned long
1530# define ulong_t unsigned long
1531# define uint_t unsigned int
1532# define uchar_t unsigned char
1533# endif /* __UNIXWARE__ */
1534# if !defined(sgi) && !defined(__SUNPRO_C)
1535# include <sys/inline.h>
1536# endif
1537# else
1538# include "scoasm.h"
1539# endif
1540# if (!defined(__HIGHC__) && !defined(sgi) && !defined(__SUNPRO_C)) || \
1541 defined(__USLC__)
1542# pragma asm partial_optimization outl
1543# pragma asm partial_optimization outw
1544# pragma asm partial_optimization outb
1545# pragma asm partial_optimization inl
1546# pragma asm partial_optimization inw
1547# pragma asm partial_optimization inb
1548# endif
1549# endif
1550# define ldq_u(p) (*((unsigned long *)(p)))
1551# define ldl_u(p) (*((unsigned int *)(p)))
1552# define ldw_u(p) (*((unsigned short *)(p)))
1553# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1554# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1555# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1556# define mem_barrier() /* NOP */
1557# define write_mem_barrier() /* NOP */
1558# endif /* __GNUC__ */
1559
1560# if defined(QNX4)
1561# include <sys/types.h>
1562extern unsigned inb(unsigned port);
1563extern unsigned inw(unsigned port);
1564extern unsigned inl(unsigned port);
1565extern void outb(unsigned port, unsigned val);
1566extern void outw(unsigned port, unsigned val);
1567extern void outl(unsigned port, unsigned val);
1568# endif /* QNX4 */
1569
1570# if defined(IODEBUG) && defined(__GNUC__)
1571# undef inb
1572# undef inw
1573# undef inl
1574# undef outb
1575# undef outw
1576# undef outl
1577# define inb(a) __extension__ ({unsigned char __c=RealInb(a); ErrorF("inb(0x%03x) = 0x%02x\t@ line %4d, file %s\n", a, __c, __LINE__, __FILE__);__c;})
1578# define inw(a) __extension__ ({unsigned short __c=RealInw(a); ErrorF("inw(0x%03x) = 0x%04x\t@ line %4d, file %s\n", a, __c, __LINE__, __FILE__);__c;})
1579# define inl(a) __extension__ ({unsigned int __c=RealInl(a); ErrorF("inl(0x%03x) = 0x%08x\t@ line %4d, file %s\n", a, __c, __LINE__, __FILE__);__c;})
1580
1581# define outb(a,b) (ErrorF("outb(0x%03x, 0x%02x)\t@ line %4d, file %s\n", a, b, __LINE__, __FILE__),RealOutb(a,b))
1582# define outw(a,b) (ErrorF("outw(0x%03x, 0x%04x)\t@ line %4d, file %s\n", a, b, __LINE__, __FILE__),RealOutw(a,b))
1583# define outl(a,b) (ErrorF("outl(0x%03x, 0x%08x)\t@ line %4d, file %s\n", a, b, __LINE__, __FILE__),RealOutl(a,b))
1584# endif
1585
1586# endif /* NO_INLINE */
1587
1588# ifdef __alpha__
1589/* entry points for Mmio memory access routines */
1590extern int (*xf86ReadMmio8)(void *, unsigned long);
1591extern int (*xf86ReadMmio16)(void *, unsigned long);
1592# ifndef STANDALONE_MMIO
1593extern int (*xf86ReadMmio32)(void *, unsigned long);
1594# else
1595/* Some DRI 3D drivers need MMIO_IN32. */
1596static __inline__ int
1597xf86ReadMmio32(void *Base, unsigned long Offset)
1598{
1599 __asm__ __volatile__("mb" : : : "memory");
1600 return *(volatile unsigned int*)((unsigned long)Base+(Offset));
1601}
1602# endif
1603extern void (*xf86WriteMmio8)(int, void *, unsigned long);
1604extern void (*xf86WriteMmio16)(int, void *, unsigned long);
1605extern void (*xf86WriteMmio32)(int, void *, unsigned long);
1606extern void (*xf86WriteMmioNB8)(int, void *, unsigned long);
1607extern void (*xf86WriteMmioNB16)(int, void *, unsigned long);
1608extern void (*xf86WriteMmioNB32)(int, void *, unsigned long);
1609extern void xf86JensenMemToBus(char *, long, long, int);
1610extern void xf86JensenBusToMem(char *, char *, unsigned long, int);
1611extern void xf86SlowBCopyFromBus(unsigned char *, unsigned char *, int);
1612extern void xf86SlowBCopyToBus(unsigned char *, unsigned char *, int);
1613
1614/* Some macros to hide the system dependencies for MMIO accesses */
1615/* Changed to kill noise generated by gcc's -Wcast-align */
1616# define MMIO_IN8(base, offset) (*xf86ReadMmio8)(base, offset)
1617# define MMIO_IN16(base, offset) (*xf86ReadMmio16)(base, offset)
1618# ifndef STANDALONE_MMIO
1619# define MMIO_IN32(base, offset) (*xf86ReadMmio32)(base, offset)
1620# else
1621# define MMIO_IN32(base, offset) xf86ReadMmio32(base, offset)
1622# endif
1623
1624# if defined (JENSEN_SUPPORT)
1625# define MMIO_OUT32(base, offset, val) \
1626 (*xf86WriteMmio32)((CARD32)(val), base, offset)
1627# define MMIO_ONB32(base, offset, val) \
1628 (*xf86WriteMmioNB32)((CARD32)(val), base, offset)
1629# else
1630# define MMIO_OUT32(base, offset, val) \
1631 do { \
1632 write_mem_barrier(); \
1633 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val); \
1634 } while (0)
1635# define MMIO_ONB32(base, offset, val) \
1636 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val)
1637# endif
1638
1639# define MMIO_OUT8(base, offset, val) \
1640 (*xf86WriteMmio8)((CARD8)(val), base, offset)
1641# define MMIO_OUT16(base, offset, val) \
1642 (*xf86WriteMmio16)((CARD16)(val), base, offset)
1643# define MMIO_ONB8(base, offset, val) \
1644 (*xf86WriteMmioNB8)((CARD8)(val), base, offset)
1645# define MMIO_ONB16(base, offset, val) \
1646 (*xf86WriteMmioNB16)((CARD16)(val), base, offset)
1647# define MMIO_MOVE32(base, offset, val) \
1648 MMIO_OUT32(base, offset, val)
1649
1650# elif defined(__powerpc__)
1651 /*
1652 * we provide byteswapping and no byteswapping functions here
1653 * with byteswapping as default,
1654 * drivers that don't need byteswapping should define PPC_MMIO_IS_BE
1655 */
1656# define MMIO_IN8(base, offset) xf86ReadMmio8(base, offset)
1657# define MMIO_OUT8(base, offset, val) \
1658 xf86WriteMmio8(base, offset, (CARD8)(val))
1659# define MMIO_ONB8(base, offset, val) \
1660 xf86WriteMmioNB8(base, offset, (CARD8)(val))
1661
1662# if defined(PPC_MMIO_IS_BE) /* No byteswapping */
1663# define MMIO_IN16(base, offset) xf86ReadMmio16Be(base, offset)
1664# define MMIO_IN32(base, offset) xf86ReadMmio32Be(base, offset)
1665# define MMIO_OUT16(base, offset, val) \
1666 xf86WriteMmio16Be(base, offset, (CARD16)(val))
1667# define MMIO_OUT32(base, offset, val) \
1668 xf86WriteMmio32Be(base, offset, (CARD32)(val))
1669# define MMIO_ONB16(base, offset, val) \
1670 xf86WriteMmioNB16Be(base, offset, (CARD16)(val))
1671# define MMIO_ONB32(base, offset, val) \
1672 xf86WriteMmioNB32Be(base, offset, (CARD32)(val))
1673# else /* byteswapping is the default */
1674# define MMIO_IN16(base, offset) xf86ReadMmio16Le(base, offset)
1675# define MMIO_IN32(base, offset) xf86ReadMmio32Le(base, offset)
1676# define MMIO_OUT16(base, offset, val) \
1677 xf86WriteMmio16Le(base, offset, (CARD16)(val))
1678# define MMIO_OUT32(base, offset, val) \
1679 xf86WriteMmio32Le(base, offset, (CARD32)(val))
1680# define MMIO_ONB16(base, offset, val) \
1681 xf86WriteMmioNB16Le(base, offset, (CARD16)(val))
1682# define MMIO_ONB32(base, offset, val) \
1683 xf86WriteMmioNB32Le(base, offset, (CARD32)(val))
1684# endif
1685
1686# define MMIO_MOVE32(base, offset, val) \
1687 xf86WriteMmio32Be(base, offset, (CARD32)(val))
1688
1689static __inline__ void ppc_flush_icache(char *addr)
1690{
1691 __asm__ volatile (
1692 "dcbf 0,%0;"
1693 "sync;"
1694 "icbi 0,%0;"
1695 "sync;"
1696 "isync;"
1697 : : "r"(addr) : "memory");
1698}
1699
1700# elif defined(__sparc__) || defined(sparc) || defined(__sparc)
1701 /*
1702 * Like powerpc, we provide byteswapping and no byteswapping functions
1703 * here with byteswapping as default, drivers that don't need byteswapping
1704 * should define SPARC_MMIO_IS_BE (perhaps create a generic macro so that we
1705 * do not need to use PPC_MMIO_IS_BE and the sparc one in all the same places
1706 * of drivers?).
1707 */
1708# define MMIO_IN8(base, offset) xf86ReadMmio8(base, offset)
1709# define MMIO_OUT8(base, offset, val) \
1710 xf86WriteMmio8(base, offset, (CARD8)(val))
1711# define MMIO_ONB8(base, offset, val) \
1712 xf86WriteMmio8NB(base, offset, (CARD8)(val))
1713
1714# if defined(SPARC_MMIO_IS_BE) /* No byteswapping */
1715# define MMIO_IN16(base, offset) xf86ReadMmio16Be(base, offset)
1716# define MMIO_IN32(base, offset) xf86ReadMmio32Be(base, offset)
1717# define MMIO_OUT16(base, offset, val) \
1718 xf86WriteMmio16Be(base, offset, (CARD16)(val))
1719# define MMIO_OUT32(base, offset, val) \
1720 xf86WriteMmio32Be(base, offset, (CARD32)(val))
1721# define MMIO_ONB16(base, offset, val) \
1722 xf86WriteMmio16BeNB(base, offset, (CARD16)(val))
1723# define MMIO_ONB32(base, offset, val) \
1724 xf86WriteMmio32BeNB(base, offset, (CARD32)(val))
1725# else /* byteswapping is the default */
1726# define MMIO_IN16(base, offset) xf86ReadMmio16Le(base, offset)
1727# define MMIO_IN32(base, offset) xf86ReadMmio32Le(base, offset)
1728# define MMIO_OUT16(base, offset, val) \
1729 xf86WriteMmio16Le(base, offset, (CARD16)(val))
1730# define MMIO_OUT32(base, offset, val) \
1731 xf86WriteMmio32Le(base, offset, (CARD32)(val))
1732# define MMIO_ONB16(base, offset, val) \
1733 xf86WriteMmio16LeNB(base, offset, (CARD16)(val))
1734# define MMIO_ONB32(base, offset, val) \
1735 xf86WriteMmio32LeNB(base, offset, (CARD32)(val))
1736# endif
1737
1738# define MMIO_MOVE32(base, offset, val) \
1739 xf86WriteMmio32Be(base, offset, (CARD32)(val))
1740
1741# else /* !__alpha__ && !__powerpc__ && !__sparc__ */
1742
1743# define MMIO_IN8(base, offset) \
1744 *(volatile CARD8 *)(((CARD8*)(base)) + (offset))
1745# define MMIO_IN16(base, offset) \
1746 *(volatile CARD16 *)(void *)(((CARD8*)(base)) + (offset))
1747# define MMIO_IN32(base, offset) \
1748 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset))
1749# define MMIO_OUT8(base, offset, val) \
1750 *(volatile CARD8 *)(((CARD8*)(base)) + (offset)) = (val)
1751# define MMIO_OUT16(base, offset, val) \
1752 *(volatile CARD16 *)(void *)(((CARD8*)(base)) + (offset)) = (val)
1753# define MMIO_OUT32(base, offset, val) \
1754 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val)
1755# define MMIO_ONB8(base, offset, val) MMIO_OUT8(base, offset, val)
1756# define MMIO_ONB16(base, offset, val) MMIO_OUT16(base, offset, val)
1757# define MMIO_ONB32(base, offset, val) MMIO_OUT32(base, offset, val)
1758
1759# define MMIO_MOVE32(base, offset, val) MMIO_OUT32(base, offset, val)
1760
1761# endif /* __alpha__ */
1762
1763/*
1764 * With Intel, the version in os-support/misc/SlowBcopy.s is used.
1765 * This avoids port I/O during the copy (which causes problems with
1766 * some hardware).
1767 */
1768# ifdef __alpha__
1769# define slowbcopy_tobus(src,dst,count) xf86SlowBCopyToBus(src,dst,count)
1770# define slowbcopy_frombus(src,dst,count) xf86SlowBCopyFromBus(src,dst,count)
1771# else /* __alpha__ */
1772# define slowbcopy_tobus(src,dst,count) xf86SlowBcopy(src,dst,count)
1773# define slowbcopy_frombus(src,dst,count) xf86SlowBcopy(src,dst,count)
1774# endif /* __alpha__ */
1775
1776#endif /* _COMPILER_H */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette