VirtualBox

source: vbox/trunk/src/VBox/Additions/x11/x11include/xorg-server-1.6.99-20090831/compiler.h@ 29743

Last change on this file since 29743 was 22658, checked in by vboxsync, 15 years ago

export Xorg 1.6.99 headers to OSE

  • Property svn:eol-style set to native
File size: 47.2 KB
Line 
1/*
2 * Copyright 1990,91 by Thomas Roell, Dinkelscherben, Germany.
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that
7 * copyright notice and this permission notice appear in supporting
8 * documentation, and that the name of Thomas Roell not be used in
9 * advertising or publicity pertaining to distribution of the software without
10 * specific, written prior permission. Thomas Roell makes no representations
11 * about the suitability of this software for any purpose. It is provided
12 * "as is" without express or implied warranty.
13 *
14 * THOMAS ROELL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THOMAS ROELL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
20 * PERFORMANCE OF THIS SOFTWARE.
21 *
22 */
23/*
24 * Copyright (c) 1994-2003 by The XFree86 Project, Inc.
25 *
26 * Permission is hereby granted, free of charge, to any person obtaining a
27 * copy of this software and associated documentation files (the "Software"),
28 * to deal in the Software without restriction, including without limitation
29 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
30 * and/or sell copies of the Software, and to permit persons to whom the
31 * Software is furnished to do so, subject to the following conditions:
32 *
33 * The above copyright notice and this permission notice shall be included in
34 * all copies or substantial portions of the Software.
35 *
36 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
38 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
39 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
40 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
41 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
42 * OTHER DEALINGS IN THE SOFTWARE.
43 *
44 * Except as contained in this notice, the name of the copyright holder(s)
45 * and author(s) shall not be used in advertising or otherwise to promote
46 * the sale, use or other dealings in this Software without prior written
47 * authorization from the copyright holder(s) and author(s).
48 */
49
50#ifndef _COMPILER_H
51
52# define _COMPILER_H
53
54#if defined(__SUNPRO_C)
55# define DO_PROTOTYPES
56#endif
57
58#ifndef _X_EXPORT
59# include <X11/Xfuncproto.h>
60#endif
61
62/* Allow drivers to use the GCC-supported __inline__ and/or __inline. */
63# ifndef __inline__
64# if defined(__GNUC__)
65 /* gcc has __inline__ */
66# elif defined(__HIGHC__)
67# define __inline__ _Inline
68# else
69# define __inline__ /**/
70# endif
71# endif /* __inline__ */
72# ifndef __inline
73# if defined(__GNUC__)
74 /* gcc has __inline */
75# elif defined(__HIGHC__)
76# define __inline _Inline
77# else
78# define __inline /**/
79# endif
80# endif /* __inline */
81
82/* Support gcc's __FUNCTION__ for people using other compilers */
83#if !defined(__GNUC__) && !defined(__FUNCTION__)
84# define __FUNCTION__ __func__ /* C99 */
85#endif
86
87# if defined(NO_INLINE) || defined(DO_PROTOTYPES)
88
89# if !defined(__arm__)
90# if !defined(__sparc__) && !defined(__sparc) && !defined(__arm32__) \
91 && !(defined(__alpha__) && defined(linux)) \
92 && !(defined(__ia64__) && defined(linux)) \
93
94extern _X_EXPORT void outb(unsigned short, unsigned char);
95extern _X_EXPORT void outw(unsigned short, unsigned short);
96extern _X_EXPORT void outl(unsigned short, unsigned int);
97extern _X_EXPORT unsigned int inb(unsigned short);
98extern _X_EXPORT unsigned int inw(unsigned short);
99extern _X_EXPORT unsigned int inl(unsigned short);
100
101# else /* __sparc__, __arm32__, __alpha__*/
102
103extern _X_EXPORT void outb(unsigned long, unsigned char);
104extern _X_EXPORT void outw(unsigned long, unsigned short);
105extern _X_EXPORT void outl(unsigned long, unsigned int);
106extern _X_EXPORT unsigned int inb(unsigned long);
107extern _X_EXPORT unsigned int inw(unsigned long);
108extern _X_EXPORT unsigned int inl(unsigned long);
109
110# endif /* __sparc__, __arm32__, __alpha__ */
111# endif /* __arm__ */
112
113# if defined(__powerpc__) && !defined(__OpenBSD__)
114extern unsigned long ldq_u(unsigned long *);
115extern unsigned long ldl_u(unsigned int *);
116extern unsigned long ldw_u(unsigned short *);
117extern void stq_u(unsigned long, unsigned long *);
118extern void stl_u(unsigned long, unsigned int *);
119extern void stw_u(unsigned long, unsigned short *);
120extern void mem_barrier(void);
121extern void write_mem_barrier(void);
122extern void stl_brx(unsigned long, volatile unsigned char *, int);
123extern void stw_brx(unsigned short, volatile unsigned char *, int);
124extern unsigned long ldl_brx(volatile unsigned char *, int);
125extern unsigned short ldw_brx(volatile unsigned char *, int);
126# endif /* __powerpc__ && !__OpenBSD */
127
128# endif /* NO_INLINE || DO_PROTOTYPES */
129
130# ifndef NO_INLINE
131# ifdef __GNUC__
132# if (defined(linux) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && (defined(__alpha__))
133
134# ifdef linux
135/* for Linux on Alpha, we use the LIBC _inx/_outx routines */
136/* note that the appropriate setup via "ioperm" needs to be done */
137/* *before* any inx/outx is done. */
138
139extern _X_EXPORT void (*_alpha_outb)(char val, unsigned long port);
140static __inline__ void
141outb(unsigned long port, unsigned char val)
142{
143 _alpha_outb(val, port);
144}
145
146extern _X_EXPORT void (*_alpha_outw)(short val, unsigned long port);
147static __inline__ void
148outw(unsigned long port, unsigned short val)
149{
150 _alpha_outw(val, port);
151}
152
153extern _X_EXPORT void (*_alpha_outl)(int val, unsigned long port);
154static __inline__ void
155outl(unsigned long port, unsigned int val)
156{
157 _alpha_outl(val, port);
158}
159
160extern _X_EXPORT unsigned int (*_alpha_inb)(unsigned long port);
161static __inline__ unsigned int
162inb(unsigned long port)
163{
164 return _alpha_inb(port);
165}
166
167extern _X_EXPORT unsigned int (*_alpha_inw)(unsigned long port);
168static __inline__ unsigned int
169inw(unsigned long port)
170{
171 return _alpha_inw(port);
172}
173
174extern _X_EXPORT unsigned int (*_alpha_inl)(unsigned long port);
175static __inline__ unsigned int
176inl(unsigned long port)
177{
178 return _alpha_inl(port);
179}
180
181# endif /* linux */
182
183# if (defined(__FreeBSD__) || defined(__OpenBSD__)) \
184 && !defined(DO_PROTOTYPES)
185
186/* for FreeBSD and OpenBSD on Alpha, we use the libio (resp. libalpha) */
187/* inx/outx routines */
188/* note that the appropriate setup via "ioperm" needs to be done */
189/* *before* any inx/outx is done. */
190
191extern _X_EXPORT void outb(unsigned int port, unsigned char val);
192extern _X_EXPORT void outw(unsigned int port, unsigned short val);
193extern _X_EXPORT void outl(unsigned int port, unsigned int val);
194extern _X_EXPORT unsigned char inb(unsigned int port);
195extern _X_EXPORT unsigned short inw(unsigned int port);
196extern _X_EXPORT unsigned int inl(unsigned int port);
197
198# endif /* (__FreeBSD__ || __OpenBSD__ ) && !DO_PROTOTYPES */
199
200
201#if defined(__NetBSD__)
202#include <machine/pio.h>
203#endif /* __NetBSD__ */
204
205/*
206 * inline functions to do unaligned accesses
207 * from linux/include/asm-alpha/unaligned.h
208 */
209
210/*
211 * EGCS 1.1 knows about arbitrary unaligned loads. Define some
212 * packed structures to talk about such things with.
213 */
214
215struct __una_u64 { unsigned long x __attribute__((packed)); };
216struct __una_u32 { unsigned int x __attribute__((packed)); };
217struct __una_u16 { unsigned short x __attribute__((packed)); };
218
219/*
220 * Elemental unaligned loads
221 */
222/* let's try making these things static */
223
224static __inline__ unsigned long ldq_u(unsigned long * r11)
225{
226# if defined(__GNUC__)
227 const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
228 return ptr->x;
229# else
230 unsigned long r1,r2;
231 __asm__("ldq_u %0,%3\n\t"
232 "ldq_u %1,%4\n\t"
233 "extql %0,%2,%0\n\t"
234 "extqh %1,%2,%1"
235 :"=&r" (r1), "=&r" (r2)
236 :"r" (r11),
237 "m" (*r11),
238 "m" (*(const unsigned long *)(7+(char *) r11)));
239 return r1 | r2;
240# endif
241}
242
243static __inline__ unsigned long ldl_u(unsigned int * r11)
244{
245# if defined(__GNUC__)
246 const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
247 return ptr->x;
248# else
249 unsigned long r1,r2;
250 __asm__("ldq_u %0,%3\n\t"
251 "ldq_u %1,%4\n\t"
252 "extll %0,%2,%0\n\t"
253 "extlh %1,%2,%1"
254 :"=&r" (r1), "=&r" (r2)
255 :"r" (r11),
256 "m" (*r11),
257 "m" (*(const unsigned long *)(3+(char *) r11)));
258 return r1 | r2;
259# endif
260}
261
262static __inline__ unsigned long ldw_u(unsigned short * r11)
263{
264# if defined(__GNUC__)
265 const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
266 return ptr->x;
267# else
268 unsigned long r1,r2;
269 __asm__("ldq_u %0,%3\n\t"
270 "ldq_u %1,%4\n\t"
271 "extwl %0,%2,%0\n\t"
272 "extwh %1,%2,%1"
273 :"=&r" (r1), "=&r" (r2)
274 :"r" (r11),
275 "m" (*r11),
276 "m" (*(const unsigned long *)(1+(char *) r11)));
277 return r1 | r2;
278# endif
279}
280
281/*
282 * Elemental unaligned stores
283 */
284
285static __inline__ void stq_u(unsigned long r5, unsigned long * r11)
286{
287# if defined(__GNUC__)
288 struct __una_u64 *ptr = (struct __una_u64 *) r11;
289 ptr->x = r5;
290# else
291 unsigned long r1,r2,r3,r4;
292
293 __asm__("ldq_u %3,%1\n\t"
294 "ldq_u %2,%0\n\t"
295 "insqh %6,%7,%5\n\t"
296 "insql %6,%7,%4\n\t"
297 "mskqh %3,%7,%3\n\t"
298 "mskql %2,%7,%2\n\t"
299 "bis %3,%5,%3\n\t"
300 "bis %2,%4,%2\n\t"
301 "stq_u %3,%1\n\t"
302 "stq_u %2,%0"
303 :"=m" (*r11),
304 "=m" (*(unsigned long *)(7+(char *) r11)),
305 "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
306 :"r" (r5), "r" (r11));
307# endif
308}
309
310static __inline__ void stl_u(unsigned long r5, unsigned int * r11)
311{
312# if defined(__GNUC__)
313 struct __una_u32 *ptr = (struct __una_u32 *) r11;
314 ptr->x = r5;
315# else
316 unsigned long r1,r2,r3,r4;
317
318 __asm__("ldq_u %3,%1\n\t"
319 "ldq_u %2,%0\n\t"
320 "inslh %6,%7,%5\n\t"
321 "insll %6,%7,%4\n\t"
322 "msklh %3,%7,%3\n\t"
323 "mskll %2,%7,%2\n\t"
324 "bis %3,%5,%3\n\t"
325 "bis %2,%4,%2\n\t"
326 "stq_u %3,%1\n\t"
327 "stq_u %2,%0"
328 :"=m" (*r11),
329 "=m" (*(unsigned long *)(3+(char *) r11)),
330 "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
331 :"r" (r5), "r" (r11));
332# endif
333}
334
335static __inline__ void stw_u(unsigned long r5, unsigned short * r11)
336{
337# if defined(__GNUC__)
338 struct __una_u16 *ptr = (struct __una_u16 *) r11;
339 ptr->x = r5;
340# else
341 unsigned long r1,r2,r3,r4;
342
343 __asm__("ldq_u %3,%1\n\t"
344 "ldq_u %2,%0\n\t"
345 "inswh %6,%7,%5\n\t"
346 "inswl %6,%7,%4\n\t"
347 "mskwh %3,%7,%3\n\t"
348 "mskwl %2,%7,%2\n\t"
349 "bis %3,%5,%3\n\t"
350 "bis %2,%4,%2\n\t"
351 "stq_u %3,%1\n\t"
352 "stq_u %2,%0"
353 :"=m" (*r11),
354 "=m" (*(unsigned long *)(1+(char *) r11)),
355 "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
356 :"r" (r5), "r" (r11));
357# endif
358}
359
360# define mem_barrier() __asm__ __volatile__("mb" : : : "memory")
361# define write_mem_barrier() __asm__ __volatile__("wmb" : : : "memory")
362
363# elif defined(linux) && defined(__ia64__)
364
365# include <inttypes.h>
366
367# include <sys/io.h>
368
369struct __una_u64 { uint64_t x __attribute__((packed)); };
370struct __una_u32 { uint32_t x __attribute__((packed)); };
371struct __una_u16 { uint16_t x __attribute__((packed)); };
372
373static __inline__ unsigned long
374__uldq (const unsigned long * r11)
375{
376 const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
377 return ptr->x;
378}
379
380static __inline__ unsigned long
381__uldl (const unsigned int * r11)
382{
383 const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
384 return ptr->x;
385}
386
387static __inline__ unsigned long
388__uldw (const unsigned short * r11)
389{
390 const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
391 return ptr->x;
392}
393
394static __inline__ void
395__ustq (unsigned long r5, unsigned long * r11)
396{
397 struct __una_u64 *ptr = (struct __una_u64 *) r11;
398 ptr->x = r5;
399}
400
401static __inline__ void
402__ustl (unsigned long r5, unsigned int * r11)
403{
404 struct __una_u32 *ptr = (struct __una_u32 *) r11;
405 ptr->x = r5;
406}
407
408static __inline__ void
409__ustw (unsigned long r5, unsigned short * r11)
410{
411 struct __una_u16 *ptr = (struct __una_u16 *) r11;
412 ptr->x = r5;
413}
414
415# define ldq_u(p) __uldq(p)
416# define ldl_u(p) __uldl(p)
417# define ldw_u(p) __uldw(p)
418# define stq_u(v,p) __ustq(v,p)
419# define stl_u(v,p) __ustl(v,p)
420# define stw_u(v,p) __ustw(v,p)
421
422# ifndef __INTEL_COMPILER
423# define mem_barrier() __asm__ __volatile__ ("mf" ::: "memory")
424# define write_mem_barrier() __asm__ __volatile__ ("mf" ::: "memory")
425# else
426# include "ia64intrin.h"
427# define mem_barrier() __mf()
428# define write_mem_barrier() __mf()
429# endif
430
431/*
432 * This is overkill, but for different reasons depending on where it is used.
433 * This is thus general enough to be used everywhere cache flushes are needed.
434 * It doesn't handle memory access serialisation by other processors, though.
435 */
436# ifndef __INTEL_COMPILER
437# define ia64_flush_cache(Addr) \
438 __asm__ __volatile__ ( \
439 "fc.i %0;;;" \
440 "sync.i;;;" \
441 "mf;;;" \
442 "srlz.i;;;" \
443 :: "r"(Addr) : "memory")
444# else
445# define ia64_flush_cache(Addr) { \
446 __fc(Addr);\
447 __synci();\
448 __mf();\
449 __isrlz();\
450 }
451# endif
452# undef outb
453# undef outw
454# undef outl
455# undef inb
456# undef inw
457# undef inl
458extern _X_EXPORT void outb(unsigned long port, unsigned char val);
459extern _X_EXPORT void outw(unsigned long port, unsigned short val);
460extern _X_EXPORT void outl(unsigned long port, unsigned int val);
461extern _X_EXPORT unsigned int inb(unsigned long port);
462extern _X_EXPORT unsigned int inw(unsigned long port);
463extern _X_EXPORT unsigned int inl(unsigned long port);
464
465# elif (defined(linux) || defined(__FreeBSD__)) && defined(__amd64__)
466
467# include <inttypes.h>
468
469# define ldq_u(p) (*((unsigned long *)(p)))
470# define ldl_u(p) (*((unsigned int *)(p)))
471# define ldw_u(p) (*((unsigned short *)(p)))
472# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
473# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
474# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
475
476# define mem_barrier() \
477 __asm__ __volatile__ ("lock; addl $0,0(%%rsp)": : :"memory")
478# define write_mem_barrier() \
479 __asm__ __volatile__ ("": : :"memory")
480
481
482static __inline__ void
483outb(unsigned short port, unsigned char val)
484{
485 __asm__ __volatile__("outb %0,%1" : :"a" (val), "d" (port));
486}
487
488
489static __inline__ void
490outw(unsigned short port, unsigned short val)
491{
492 __asm__ __volatile__("outw %0,%1" : :"a" (val), "d" (port));
493}
494
495static __inline__ void
496outl(unsigned short port, unsigned int val)
497{
498 __asm__ __volatile__("outl %0,%1" : :"a" (val), "d" (port));
499}
500
501static __inline__ unsigned int
502inb(unsigned short port)
503{
504 unsigned char ret;
505 __asm__ __volatile__("inb %1,%0" :
506 "=a" (ret) :
507 "d" (port));
508 return ret;
509}
510
511static __inline__ unsigned int
512inw(unsigned short port)
513{
514 unsigned short ret;
515 __asm__ __volatile__("inw %1,%0" :
516 "=a" (ret) :
517 "d" (port));
518 return ret;
519}
520
521static __inline__ unsigned int
522inl(unsigned short port)
523{
524 unsigned int ret;
525 __asm__ __volatile__("inl %1,%0" :
526 "=a" (ret) :
527 "d" (port));
528 return ret;
529}
530
531# elif (defined(linux) || defined(sun) || defined(__OpenBSD__) || defined(__FreeBSD__)) && defined(__sparc__)
532
533# ifndef ASI_PL
534# define ASI_PL 0x88
535# endif
536
537# define barrier() __asm__ __volatile__(".word 0x8143e00a": : :"memory")
538
539static __inline__ void
540outb(unsigned long port, unsigned char val)
541{
542 __asm__ __volatile__("stba %0, [%1] %2"
543 : /* No outputs */
544 : "r" (val), "r" (port), "i" (ASI_PL));
545 barrier();
546}
547
548static __inline__ void
549outw(unsigned long port, unsigned short val)
550{
551 __asm__ __volatile__("stha %0, [%1] %2"
552 : /* No outputs */
553 : "r" (val), "r" (port), "i" (ASI_PL));
554 barrier();
555}
556
557static __inline__ void
558outl(unsigned long port, unsigned int val)
559{
560 __asm__ __volatile__("sta %0, [%1] %2"
561 : /* No outputs */
562 : "r" (val), "r" (port), "i" (ASI_PL));
563 barrier();
564}
565
566static __inline__ unsigned int
567inb(unsigned long port)
568{
569 unsigned int ret;
570 __asm__ __volatile__("lduba [%1] %2, %0"
571 : "=r" (ret)
572 : "r" (port), "i" (ASI_PL));
573 return ret;
574}
575
576static __inline__ unsigned int
577inw(unsigned long port)
578{
579 unsigned int ret;
580 __asm__ __volatile__("lduha [%1] %2, %0"
581 : "=r" (ret)
582 : "r" (port), "i" (ASI_PL));
583 return ret;
584}
585
586static __inline__ unsigned int
587inl(unsigned long port)
588{
589 unsigned int ret;
590 __asm__ __volatile__("lda [%1] %2, %0"
591 : "=r" (ret)
592 : "r" (port), "i" (ASI_PL));
593 return ret;
594}
595
596static __inline__ unsigned char
597xf86ReadMmio8(__volatile__ void *base, const unsigned long offset)
598{
599 unsigned long addr = ((unsigned long)base) + offset;
600 unsigned char ret;
601
602 __asm__ __volatile__("lduba [%1] %2, %0"
603 : "=r" (ret)
604 : "r" (addr), "i" (ASI_PL));
605 return ret;
606}
607
608static __inline__ unsigned short
609xf86ReadMmio16Be(__volatile__ void *base, const unsigned long offset)
610{
611 unsigned long addr = ((unsigned long)base) + offset;
612 unsigned short ret;
613
614 __asm__ __volatile__("lduh [%1], %0"
615 : "=r" (ret)
616 : "r" (addr));
617 return ret;
618}
619
620static __inline__ unsigned short
621xf86ReadMmio16Le(__volatile__ void *base, const unsigned long offset)
622{
623 unsigned long addr = ((unsigned long)base) + offset;
624 unsigned short ret;
625
626 __asm__ __volatile__("lduha [%1] %2, %0"
627 : "=r" (ret)
628 : "r" (addr), "i" (ASI_PL));
629 return ret;
630}
631
632static __inline__ unsigned int
633xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset)
634{
635 unsigned long addr = ((unsigned long)base) + offset;
636 unsigned int ret;
637
638 __asm__ __volatile__("ld [%1], %0"
639 : "=r" (ret)
640 : "r" (addr));
641 return ret;
642}
643
644static __inline__ unsigned int
645xf86ReadMmio32Le(__volatile__ void *base, const unsigned long offset)
646{
647 unsigned long addr = ((unsigned long)base) + offset;
648 unsigned int ret;
649
650 __asm__ __volatile__("lda [%1] %2, %0"
651 : "=r" (ret)
652 : "r" (addr), "i" (ASI_PL));
653 return ret;
654}
655
656static __inline__ void
657xf86WriteMmio8(__volatile__ void *base, const unsigned long offset,
658 const unsigned int val)
659{
660 unsigned long addr = ((unsigned long)base) + offset;
661
662 __asm__ __volatile__("stba %0, [%1] %2"
663 : /* No outputs */
664 : "r" (val), "r" (addr), "i" (ASI_PL));
665 barrier();
666}
667
668static __inline__ void
669xf86WriteMmio16Be(__volatile__ void *base, const unsigned long offset,
670 const unsigned int val)
671{
672 unsigned long addr = ((unsigned long)base) + offset;
673
674 __asm__ __volatile__("sth %0, [%1]"
675 : /* No outputs */
676 : "r" (val), "r" (addr));
677 barrier();
678}
679
680static __inline__ void
681xf86WriteMmio16Le(__volatile__ void *base, const unsigned long offset,
682 const unsigned int val)
683{
684 unsigned long addr = ((unsigned long)base) + offset;
685
686 __asm__ __volatile__("stha %0, [%1] %2"
687 : /* No outputs */
688 : "r" (val), "r" (addr), "i" (ASI_PL));
689 barrier();
690}
691
692static __inline__ void
693xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset,
694 const unsigned int val)
695{
696 unsigned long addr = ((unsigned long)base) + offset;
697
698 __asm__ __volatile__("st %0, [%1]"
699 : /* No outputs */
700 : "r" (val), "r" (addr));
701 barrier();
702}
703
704static __inline__ void
705xf86WriteMmio32Le(__volatile__ void *base, const unsigned long offset,
706 const unsigned int val)
707{
708 unsigned long addr = ((unsigned long)base) + offset;
709
710 __asm__ __volatile__("sta %0, [%1] %2"
711 : /* No outputs */
712 : "r" (val), "r" (addr), "i" (ASI_PL));
713 barrier();
714}
715
716static __inline__ void
717xf86WriteMmio8NB(__volatile__ void *base, const unsigned long offset,
718 const unsigned int val)
719{
720 unsigned long addr = ((unsigned long)base) + offset;
721
722 __asm__ __volatile__("stba %0, [%1] %2"
723 : /* No outputs */
724 : "r" (val), "r" (addr), "i" (ASI_PL));
725}
726
727static __inline__ void
728xf86WriteMmio16BeNB(__volatile__ void *base, const unsigned long offset,
729 const unsigned int val)
730{
731 unsigned long addr = ((unsigned long)base) + offset;
732
733 __asm__ __volatile__("sth %0, [%1]"
734 : /* No outputs */
735 : "r" (val), "r" (addr));
736}
737
738static __inline__ void
739xf86WriteMmio16LeNB(__volatile__ void *base, const unsigned long offset,
740 const unsigned int val)
741{
742 unsigned long addr = ((unsigned long)base) + offset;
743
744 __asm__ __volatile__("stha %0, [%1] %2"
745 : /* No outputs */
746 : "r" (val), "r" (addr), "i" (ASI_PL));
747}
748
749static __inline__ void
750xf86WriteMmio32BeNB(__volatile__ void *base, const unsigned long offset,
751 const unsigned int val)
752{
753 unsigned long addr = ((unsigned long)base) + offset;
754
755 __asm__ __volatile__("st %0, [%1]"
756 : /* No outputs */
757 : "r" (val), "r" (addr));
758}
759
760static __inline__ void
761xf86WriteMmio32LeNB(__volatile__ void *base, const unsigned long offset,
762 const unsigned int val)
763{
764 unsigned long addr = ((unsigned long)base) + offset;
765
766 __asm__ __volatile__("sta %0, [%1] %2"
767 : /* No outputs */
768 : "r" (val), "r" (addr), "i" (ASI_PL));
769}
770
771
772/*
773 * EGCS 1.1 knows about arbitrary unaligned loads. Define some
774 * packed structures to talk about such things with.
775 */
776
777# if defined(__arch64__) || defined(__sparcv9)
778struct __una_u64 { unsigned long x __attribute__((packed)); };
779# endif
780struct __una_u32 { unsigned int x __attribute__((packed)); };
781struct __una_u16 { unsigned short x __attribute__((packed)); };
782
783static __inline__ unsigned long ldq_u(unsigned long *p)
784{
785# if defined(__GNUC__)
786# if defined(__arch64__) || defined(__sparcv9)
787 const struct __una_u64 *ptr = (const struct __una_u64 *) p;
788# else
789 const struct __una_u32 *ptr = (const struct __una_u32 *) p;
790# endif
791 return ptr->x;
792# else
793 unsigned long ret;
794 memmove(&ret, p, sizeof(*p));
795 return ret;
796# endif
797}
798
799static __inline__ unsigned long ldl_u(unsigned int *p)
800{
801# if defined(__GNUC__)
802 const struct __una_u32 *ptr = (const struct __una_u32 *) p;
803 return ptr->x;
804# else
805 unsigned int ret;
806 memmove(&ret, p, sizeof(*p));
807 return ret;
808# endif
809}
810
811static __inline__ unsigned long ldw_u(unsigned short *p)
812{
813# if defined(__GNUC__)
814 const struct __una_u16 *ptr = (const struct __una_u16 *) p;
815 return ptr->x;
816# else
817 unsigned short ret;
818 memmove(&ret, p, sizeof(*p));
819 return ret;
820# endif
821}
822
823static __inline__ void stq_u(unsigned long val, unsigned long *p)
824{
825# if defined(__GNUC__)
826# if defined(__arch64__) || defined(__sparcv9)
827 struct __una_u64 *ptr = (struct __una_u64 *) p;
828# else
829 struct __una_u32 *ptr = (struct __una_u32 *) p;
830# endif
831 ptr->x = val;
832# else
833 unsigned long tmp = val;
834 memmove(p, &tmp, sizeof(*p));
835# endif
836}
837
838static __inline__ void stl_u(unsigned long val, unsigned int *p)
839{
840# if defined(__GNUC__)
841 struct __una_u32 *ptr = (struct __una_u32 *) p;
842 ptr->x = val;
843# else
844 unsigned int tmp = val;
845 memmove(p, &tmp, sizeof(*p));
846# endif
847}
848
849static __inline__ void stw_u(unsigned long val, unsigned short *p)
850{
851# if defined(__GNUC__)
852 struct __una_u16 *ptr = (struct __una_u16 *) p;
853 ptr->x = val;
854# else
855 unsigned short tmp = val;
856 memmove(p, &tmp, sizeof(*p));
857# endif
858}
859
860# define mem_barrier() /* XXX: nop for now */
861# define write_mem_barrier() /* XXX: nop for now */
862
863# elif defined(__mips__) || (defined(__arm32__) && !defined(__linux__))
864# ifdef __arm32__
865# define PORT_SIZE long
866# else
867# define PORT_SIZE short
868# endif
869
870_X_EXPORT unsigned int IOPortBase; /* Memory mapped I/O port area */
871
872static __inline__ void
873outb(unsigned PORT_SIZE port, unsigned char val)
874{
875 *(volatile unsigned char*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val;
876}
877
878static __inline__ void
879outw(unsigned PORT_SIZE port, unsigned short val)
880{
881 *(volatile unsigned short*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val;
882}
883
884static __inline__ void
885outl(unsigned PORT_SIZE port, unsigned int val)
886{
887 *(volatile unsigned int*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val;
888}
889
890static __inline__ unsigned int
891inb(unsigned PORT_SIZE port)
892{
893 return *(volatile unsigned char*)(((unsigned PORT_SIZE)(port))+IOPortBase);
894}
895
896static __inline__ unsigned int
897inw(unsigned PORT_SIZE port)
898{
899 return *(volatile unsigned short*)(((unsigned PORT_SIZE)(port))+IOPortBase);
900}
901
902static __inline__ unsigned int
903inl(unsigned PORT_SIZE port)
904{
905 return *(volatile unsigned int*)(((unsigned PORT_SIZE)(port))+IOPortBase);
906}
907
908
909# if defined(__mips__)
910static __inline__ unsigned long ldq_u(unsigned long * r11)
911{
912 unsigned long r1;
913 __asm__("lwr %0,%2\n\t"
914 "lwl %0,%3\n\t"
915 :"=&r" (r1)
916 :"r" (r11),
917 "m" (*r11),
918 "m" (*(unsigned long *)(3+(char *) r11)));
919 return r1;
920}
921
922static __inline__ unsigned long ldl_u(unsigned int * r11)
923{
924 unsigned long r1;
925 __asm__("lwr %0,%2\n\t"
926 "lwl %0,%3\n\t"
927 :"=&r" (r1)
928 :"r" (r11),
929 "m" (*r11),
930 "m" (*(unsigned long *)(3+(char *) r11)));
931 return r1;
932}
933
934static __inline__ unsigned long ldw_u(unsigned short * r11)
935{
936 unsigned long r1;
937 __asm__("lwr %0,%2\n\t"
938 "lwl %0,%3\n\t"
939 :"=&r" (r1)
940 :"r" (r11),
941 "m" (*r11),
942 "m" (*(unsigned long *)(1+(char *) r11)));
943 return r1;
944}
945
946# ifdef linux /* don't mess with other OSs */
947
948/*
949 * EGCS 1.1 knows about arbitrary unaligned loads (and we don't support older
950 * versions anyway. Define some packed structures to talk about such things
951 * with.
952 */
953
954struct __una_u32 { unsigned int x __attribute__((packed)); };
955struct __una_u16 { unsigned short x __attribute__((packed)); };
956
957static __inline__ void stw_u(unsigned long val, unsigned short *p)
958{
959 struct __una_u16 *ptr = (struct __una_u16 *) p;
960 ptr->x = val;
961}
962
963static __inline__ void stl_u(unsigned long val, unsigned int *p)
964{
965 struct __una_u32 *ptr = (struct __una_u32 *) p;
966 ptr->x = val;
967}
968
969# if X_BYTE_ORDER == X_BIG_ENDIAN
970static __inline__ unsigned int
971xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset)
972{
973 unsigned long addr = ((unsigned long)base) + offset;
974 unsigned int ret;
975
976 __asm__ __volatile__("lw %0, 0(%1)"
977 : "=r" (ret)
978 : "r" (addr));
979 return ret;
980}
981
982static __inline__ void
983xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset,
984 const unsigned int val)
985{
986 unsigned long addr = ((unsigned long)base) + offset;
987
988 __asm__ __volatile__("sw %0, 0(%1)"
989 : /* No outputs */
990 : "r" (val), "r" (addr));
991}
992# endif
993
994# define mem_barrier() \
995 __asm__ __volatile__( \
996 "# prevent instructions being moved around\n\t" \
997 ".set\tnoreorder\n\t" \
998 "# 8 nops to fool the R4400 pipeline\n\t" \
999 "nop;nop;nop;nop;nop;nop;nop;nop\n\t" \
1000 ".set\treorder" \
1001 : /* no output */ \
1002 : /* no input */ \
1003 : "memory")
1004# define write_mem_barrier() mem_barrier()
1005
1006# else /* !linux */
1007
1008# define stq_u(v,p) stl_u(v,p)
1009# define stl_u(v,p) (*(unsigned char *)(p)) = (v); \
1010 (*(unsigned char *)(p)+1) = ((v) >> 8); \
1011 (*(unsigned char *)(p)+2) = ((v) >> 16); \
1012 (*(unsigned char *)(p)+3) = ((v) >> 24)
1013
1014# define stw_u(v,p) (*(unsigned char *)(p)) = (v); \
1015 (*(unsigned char *)(p)+1) = ((v) >> 8)
1016
1017# define mem_barrier() /* NOP */
1018# endif /* !linux */
1019# endif /* __mips__ */
1020
1021# if defined(__arm32__)
1022# define ldq_u(p) (*((unsigned long *)(p)))
1023# define ldl_u(p) (*((unsigned int *)(p)))
1024# define ldw_u(p) (*((unsigned short *)(p)))
1025# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1026# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1027# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1028# define mem_barrier() /* NOP */
1029# define write_mem_barrier() /* NOP */
1030# endif /* __arm32__ */
1031
1032# elif (defined(linux) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__FreeBSD__)) && defined(__powerpc__)
1033
1034# ifndef MAP_FAILED
1035# define MAP_FAILED ((void *)-1)
1036# endif
1037
1038extern _X_EXPORT volatile unsigned char *ioBase;
1039
1040#if defined(linux) && defined(__powerpc64__)
1041# include <linux/version.h>
1042# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
1043# include <asm/memory.h>
1044# endif
1045#endif /* defined(linux) && defined(__powerpc64__) */
1046#ifndef eieio /* We deal with arch-specific eieio() routines above... */
1047# define eieio() __asm__ __volatile__ ("eieio" ::: "memory")
1048#endif /* eieio */
1049
1050static __inline__ unsigned char
1051xf86ReadMmio8(__volatile__ void *base, const unsigned long offset)
1052{
1053 register unsigned char val;
1054 __asm__ __volatile__(
1055 "lbzx %0,%1,%2\n\t"
1056 "eieio"
1057 : "=r" (val)
1058 : "b" (base), "r" (offset),
1059 "m" (*((volatile unsigned char *)base+offset)));
1060 return val;
1061}
1062
1063static __inline__ unsigned short
1064xf86ReadMmio16Be(__volatile__ void *base, const unsigned long offset)
1065{
1066 register unsigned short val;
1067 __asm__ __volatile__(
1068 "lhzx %0,%1,%2\n\t"
1069 "eieio"
1070 : "=r" (val)
1071 : "b" (base), "r" (offset),
1072 "m" (*((volatile unsigned char *)base+offset)));
1073 return val;
1074}
1075
1076static __inline__ unsigned short
1077xf86ReadMmio16Le(__volatile__ void *base, const unsigned long offset)
1078{
1079 register unsigned short val;
1080 __asm__ __volatile__(
1081 "lhbrx %0,%1,%2\n\t"
1082 "eieio"
1083 : "=r" (val)
1084 : "b" (base), "r" (offset),
1085 "m" (*((volatile unsigned char *)base+offset)));
1086 return val;
1087}
1088
1089static __inline__ unsigned int
1090xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset)
1091{
1092 register unsigned int val;
1093 __asm__ __volatile__(
1094 "lwzx %0,%1,%2\n\t"
1095 "eieio"
1096 : "=r" (val)
1097 : "b" (base), "r" (offset),
1098 "m" (*((volatile unsigned char *)base+offset)));
1099 return val;
1100}
1101
1102static __inline__ unsigned int
1103xf86ReadMmio32Le(__volatile__ void *base, const unsigned long offset)
1104{
1105 register unsigned int val;
1106 __asm__ __volatile__(
1107 "lwbrx %0,%1,%2\n\t"
1108 "eieio"
1109 : "=r" (val)
1110 : "b" (base), "r" (offset),
1111 "m" (*((volatile unsigned char *)base+offset)));
1112 return val;
1113}
1114
1115static __inline__ void
1116xf86WriteMmioNB8(__volatile__ void *base, const unsigned long offset,
1117 const unsigned char val)
1118{
1119 __asm__ __volatile__(
1120 "stbx %1,%2,%3\n\t"
1121 : "=m" (*((volatile unsigned char *)base+offset))
1122 : "r" (val), "b" (base), "r" (offset));
1123}
1124
1125static __inline__ void
1126xf86WriteMmioNB16Le(__volatile__ void *base, const unsigned long offset,
1127 const unsigned short val)
1128{
1129 __asm__ __volatile__(
1130 "sthbrx %1,%2,%3\n\t"
1131 : "=m" (*((volatile unsigned char *)base+offset))
1132 : "r" (val), "b" (base), "r" (offset));
1133}
1134
1135static __inline__ void
1136xf86WriteMmioNB16Be(__volatile__ void *base, const unsigned long offset,
1137 const unsigned short val)
1138{
1139 __asm__ __volatile__(
1140 "sthx %1,%2,%3\n\t"
1141 : "=m" (*((volatile unsigned char *)base+offset))
1142 : "r" (val), "b" (base), "r" (offset));
1143}
1144
1145static __inline__ void
1146xf86WriteMmioNB32Le(__volatile__ void *base, const unsigned long offset,
1147 const unsigned int val)
1148{
1149 __asm__ __volatile__(
1150 "stwbrx %1,%2,%3\n\t"
1151 : "=m" (*((volatile unsigned char *)base+offset))
1152 : "r" (val), "b" (base), "r" (offset));
1153}
1154
1155static __inline__ void
1156xf86WriteMmioNB32Be(__volatile__ void *base, const unsigned long offset,
1157 const unsigned int val)
1158{
1159 __asm__ __volatile__(
1160 "stwx %1,%2,%3\n\t"
1161 : "=m" (*((volatile unsigned char *)base+offset))
1162 : "r" (val), "b" (base), "r" (offset));
1163}
1164
1165static __inline__ void
1166xf86WriteMmio8(__volatile__ void *base, const unsigned long offset,
1167 const unsigned char val)
1168{
1169 xf86WriteMmioNB8(base, offset, val);
1170 eieio();
1171}
1172
1173static __inline__ void
1174xf86WriteMmio16Le(__volatile__ void *base, const unsigned long offset,
1175 const unsigned short val)
1176{
1177 xf86WriteMmioNB16Le(base, offset, val);
1178 eieio();
1179}
1180
1181static __inline__ void
1182xf86WriteMmio16Be(__volatile__ void *base, const unsigned long offset,
1183 const unsigned short val)
1184{
1185 xf86WriteMmioNB16Be(base, offset, val);
1186 eieio();
1187}
1188
1189static __inline__ void
1190xf86WriteMmio32Le(__volatile__ void *base, const unsigned long offset,
1191 const unsigned int val)
1192{
1193 xf86WriteMmioNB32Le(base, offset, val);
1194 eieio();
1195}
1196
1197static __inline__ void
1198xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset,
1199 const unsigned int val)
1200{
1201 xf86WriteMmioNB32Be(base, offset, val);
1202 eieio();
1203}
1204
1205
1206static __inline__ void
1207outb(unsigned short port, unsigned char value)
1208{
1209 if(ioBase == MAP_FAILED) return;
1210 xf86WriteMmio8((void *)ioBase, port, value);
1211}
1212
1213static __inline__ void
1214outw(unsigned short port, unsigned short value)
1215{
1216 if(ioBase == MAP_FAILED) return;
1217 xf86WriteMmio16Le((void *)ioBase, port, value);
1218}
1219
1220static __inline__ void
1221outl(unsigned short port, unsigned int value)
1222{
1223 if(ioBase == MAP_FAILED) return;
1224 xf86WriteMmio32Le((void *)ioBase, port, value);
1225}
1226
1227static __inline__ unsigned int
1228inb(unsigned short port)
1229{
1230 if(ioBase == MAP_FAILED) return 0;
1231 return xf86ReadMmio8((void *)ioBase, port);
1232}
1233
1234static __inline__ unsigned int
1235inw(unsigned short port)
1236{
1237 if(ioBase == MAP_FAILED) return 0;
1238 return xf86ReadMmio16Le((void *)ioBase, port);
1239}
1240
1241static __inline__ unsigned int
1242inl(unsigned short port)
1243{
1244 if(ioBase == MAP_FAILED) return 0;
1245 return xf86ReadMmio32Le((void *)ioBase, port);
1246}
1247
1248# define ldq_u(p) ldl_u(p)
1249# define ldl_u(p) ((*(unsigned char *)(p)) | \
1250 (*((unsigned char *)(p)+1)<<8) | \
1251 (*((unsigned char *)(p)+2)<<16) | \
1252 (*((unsigned char *)(p)+3)<<24))
1253# define ldw_u(p) ((*(unsigned char *)(p)) | \
1254 (*((unsigned char *)(p)+1)<<8))
1255
1256# define stq_u(v,p) stl_u(v,p)
1257# define stl_u(v,p) (*(unsigned char *)(p)) = (v); \
1258 (*((unsigned char *)(p)+1)) = ((v) >> 8); \
1259 (*((unsigned char *)(p)+2)) = ((v) >> 16); \
1260 (*((unsigned char *)(p)+3)) = ((v) >> 24)
1261# define stw_u(v,p) (*(unsigned char *)(p)) = (v); \
1262 (*((unsigned char *)(p)+1)) = ((v) >> 8)
1263
1264# define mem_barrier() eieio()
1265# define write_mem_barrier() eieio()
1266
1267#elif defined(__arm__) && defined(__linux__)
1268
1269#define ldq_u(p) (*((unsigned long *)(p)))
1270#define ldl_u(p) (*((unsigned int *)(p)))
1271#define ldw_u(p) (*((unsigned short *)(p)))
1272#define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1273#define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1274#define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1275#define mem_barrier() /* NOP */
1276#define write_mem_barrier() /* NOP */
1277
1278/* for Linux on ARM, we use the LIBC inx/outx routines */
1279/* note that the appropriate setup via "ioperm" needs to be done */
1280/* *before* any inx/outx is done. */
1281
1282#include <sys/io.h>
1283
1284static __inline__ void
1285xf_outb(unsigned short port, unsigned char val)
1286{
1287 outb(val, port);
1288}
1289
1290static __inline__ void
1291xf_outw(unsigned short port, unsigned short val)
1292{
1293 outw(val, port);
1294}
1295
1296static __inline__ void
1297xf_outl(unsigned short port, unsigned int val)
1298{
1299 outl(val, port);
1300}
1301
1302#define outb xf_outb
1303#define outw xf_outw
1304#define outl xf_outl
1305
1306#define arm_flush_cache(addr) \
1307do { \
1308 register unsigned long _beg __asm ("a1") = (unsigned long) (addr); \
1309 register unsigned long _end __asm ("a2") = (unsigned long) (addr) + 4;\
1310 register unsigned long _flg __asm ("a3") = 0; \
1311 __asm __volatile ("swi 0x9f0002 @ sys_cacheflush" \
1312 : "=r" (_beg) \
1313 : "0" (_beg), "r" (_end), "r" (_flg)); \
1314} while (0)
1315
1316# else /* ix86 */
1317
1318# define ldq_u(p) (*((unsigned long *)(p)))
1319# define ldl_u(p) (*((unsigned int *)(p)))
1320# define ldw_u(p) (*((unsigned short *)(p)))
1321# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1322# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1323# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1324# define mem_barrier() /* NOP */
1325# define write_mem_barrier() /* NOP */
1326
1327# if !defined(__SUNPRO_C)
1328# if !defined(FAKEIT) && !defined(__mc68000__) && !defined(__arm__) && !defined(__sh__) && !defined(__hppa__) && !defined(__s390__) && !defined(__m32r__)
1329# ifdef GCCUSESGAS
1330
1331/*
1332 * If gcc uses gas rather than the native assembler, the syntax of these
1333 * inlines has to be different. DHD
1334 */
1335
1336static __inline__ void
1337outb(unsigned short port, unsigned char val)
1338{
1339 __asm__ __volatile__("outb %0,%1" : :"a" (val), "d" (port));
1340}
1341
1342
1343static __inline__ void
1344outw(unsigned short port, unsigned short val)
1345{
1346 __asm__ __volatile__("outw %0,%1" : :"a" (val), "d" (port));
1347}
1348
1349static __inline__ void
1350outl(unsigned short port, unsigned int val)
1351{
1352 __asm__ __volatile__("outl %0,%1" : :"a" (val), "d" (port));
1353}
1354
1355static __inline__ unsigned int
1356inb(unsigned short port)
1357{
1358 unsigned char ret;
1359 __asm__ __volatile__("inb %1,%0" :
1360 "=a" (ret) :
1361 "d" (port));
1362 return ret;
1363}
1364
1365static __inline__ unsigned int
1366inw(unsigned short port)
1367{
1368 unsigned short ret;
1369 __asm__ __volatile__("inw %1,%0" :
1370 "=a" (ret) :
1371 "d" (port));
1372 return ret;
1373}
1374
1375static __inline__ unsigned int
1376inl(unsigned short port)
1377{
1378 unsigned int ret;
1379 __asm__ __volatile__("inl %1,%0" :
1380 "=a" (ret) :
1381 "d" (port));
1382 return ret;
1383}
1384
1385# else /* GCCUSESGAS */
1386
1387static __inline__ void
1388outb(unsigned short port, unsigned char val)
1389{
1390 __asm__ __volatile__("out%B0 (%1)" : :"a" (val), "d" (port));
1391}
1392
1393static __inline__ void
1394outw(unsigned short port, unsigned short val)
1395{
1396 __asm__ __volatile__("out%W0 (%1)" : :"a" (val), "d" (port));
1397}
1398
1399static __inline__ void
1400outl(unsigned short port, unsigned int val)
1401{
1402 __asm__ __volatile__("out%L0 (%1)" : :"a" (val), "d" (port));
1403}
1404
1405static __inline__ unsigned int
1406inb(unsigned short port)
1407{
1408 unsigned char ret;
1409 __asm__ __volatile__("in%B0 (%1)" :
1410 "=a" (ret) :
1411 "d" (port));
1412 return ret;
1413}
1414
1415static __inline__ unsigned int
1416inw(unsigned short port)
1417{
1418 unsigned short ret;
1419 __asm__ __volatile__("in%W0 (%1)" :
1420 "=a" (ret) :
1421 "d" (port));
1422 return ret;
1423}
1424
1425static __inline__ unsigned int
1426inl(unsigned short port)
1427{
1428 unsigned int ret;
1429 __asm__ __volatile__("in%L0 (%1)" :
1430 "=a" (ret) :
1431 "d" (port));
1432 return ret;
1433}
1434
1435# endif /* GCCUSESGAS */
1436
1437# else /* !defined(FAKEIT) && !defined(__mc68000__) && !defined(__arm__) && !defined(__sh__) && !defined(__hppa__) && !defined(__m32r__) */
1438
1439static __inline__ void
1440outb(unsigned short port, unsigned char val)
1441{
1442}
1443
1444static __inline__ void
1445outw(unsigned short port, unsigned short val)
1446{
1447}
1448
1449static __inline__ void
1450outl(unsigned short port, unsigned int val)
1451{
1452}
1453
1454static __inline__ unsigned int
1455inb(unsigned short port)
1456{
1457 return 0;
1458}
1459
1460static __inline__ unsigned int
1461inw(unsigned short port)
1462{
1463 return 0;
1464}
1465
1466static __inline__ unsigned int
1467inl(unsigned short port)
1468{
1469 return 0;
1470}
1471
1472# endif /* FAKEIT */
1473# endif /* __SUNPRO_C */
1474
1475# endif /* ix86 */
1476
1477# else /* !GNUC */
1478# if defined(__STDC__) && (__STDC__ == 1)
1479# ifndef asm
1480# define asm __asm
1481# endif
1482# endif
1483# ifndef SCO325
1484# if defined(__UNIXWARE__)
1485# /* avoid including <sys/types.h> for <sys/inline.h> on UnixWare */
1486# define ushort unsigned short
1487# define ushort_t unsigned short
1488# define ulong unsigned long
1489# define ulong_t unsigned long
1490# define uint_t unsigned int
1491# define uchar_t unsigned char
1492# endif /* __UNIXWARE__ */
1493# if !defined(__SUNPRO_C)
1494# include <sys/inline.h>
1495# endif
1496# else
1497# include "scoasm.h"
1498# endif
1499# if !defined(__HIGHC__) && !defined(__SUNPRO_C) || \
1500 defined(__USLC__)
1501# pragma asm partial_optimization outl
1502# pragma asm partial_optimization outw
1503# pragma asm partial_optimization outb
1504# pragma asm partial_optimization inl
1505# pragma asm partial_optimization inw
1506# pragma asm partial_optimization inb
1507# endif
1508# define ldq_u(p) (*((unsigned long *)(p)))
1509# define ldl_u(p) (*((unsigned int *)(p)))
1510# define ldw_u(p) (*((unsigned short *)(p)))
1511# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1512# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1513# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1514# define mem_barrier() /* NOP */
1515# define write_mem_barrier() /* NOP */
1516# endif /* __GNUC__ */
1517
1518# endif /* NO_INLINE */
1519
1520# ifdef __alpha__
1521/* entry points for Mmio memory access routines */
1522extern _X_EXPORT int (*xf86ReadMmio8)(void *, unsigned long);
1523extern _X_EXPORT int (*xf86ReadMmio16)(void *, unsigned long);
1524# ifndef STANDALONE_MMIO
1525extern _X_EXPORT int (*xf86ReadMmio32)(void *, unsigned long);
1526# else
1527/* Some DRI 3D drivers need MMIO_IN32. */
1528static __inline__ int
1529xf86ReadMmio32(void *Base, unsigned long Offset)
1530{
1531 __asm__ __volatile__("mb" : : : "memory");
1532 return *(volatile unsigned int*)((unsigned long)Base+(Offset));
1533}
1534# endif
1535extern _X_EXPORT void (*xf86WriteMmio8)(int, void *, unsigned long);
1536extern _X_EXPORT void (*xf86WriteMmio16)(int, void *, unsigned long);
1537extern _X_EXPORT void (*xf86WriteMmio32)(int, void *, unsigned long);
1538extern _X_EXPORT void (*xf86WriteMmioNB8)(int, void *, unsigned long);
1539extern _X_EXPORT void (*xf86WriteMmioNB16)(int, void *, unsigned long);
1540extern _X_EXPORT void (*xf86WriteMmioNB32)(int, void *, unsigned long);
1541extern _X_EXPORT void xf86SlowBCopyFromBus(unsigned char *, unsigned char *, int);
1542extern _X_EXPORT void xf86SlowBCopyToBus(unsigned char *, unsigned char *, int);
1543
1544/* Some macros to hide the system dependencies for MMIO accesses */
1545/* Changed to kill noise generated by gcc's -Wcast-align */
1546# define MMIO_IN8(base, offset) (*xf86ReadMmio8)(base, offset)
1547# define MMIO_IN16(base, offset) (*xf86ReadMmio16)(base, offset)
1548# ifndef STANDALONE_MMIO
1549# define MMIO_IN32(base, offset) (*xf86ReadMmio32)(base, offset)
1550# else
1551# define MMIO_IN32(base, offset) xf86ReadMmio32(base, offset)
1552# endif
1553
1554# define MMIO_OUT32(base, offset, val) \
1555 do { \
1556 write_mem_barrier(); \
1557 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val); \
1558 } while (0)
1559# define MMIO_ONB32(base, offset, val) \
1560 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val)
1561
1562# define MMIO_OUT8(base, offset, val) \
1563 (*xf86WriteMmio8)((CARD8)(val), base, offset)
1564# define MMIO_OUT16(base, offset, val) \
1565 (*xf86WriteMmio16)((CARD16)(val), base, offset)
1566# define MMIO_ONB8(base, offset, val) \
1567 (*xf86WriteMmioNB8)((CARD8)(val), base, offset)
1568# define MMIO_ONB16(base, offset, val) \
1569 (*xf86WriteMmioNB16)((CARD16)(val), base, offset)
1570# define MMIO_MOVE32(base, offset, val) \
1571 MMIO_OUT32(base, offset, val)
1572
1573# elif defined(__powerpc__)
1574 /*
1575 * we provide byteswapping and no byteswapping functions here
1576 * with byteswapping as default,
1577 * drivers that don't need byteswapping should define PPC_MMIO_IS_BE
1578 */
1579# define MMIO_IN8(base, offset) xf86ReadMmio8(base, offset)
1580# define MMIO_OUT8(base, offset, val) \
1581 xf86WriteMmio8(base, offset, (CARD8)(val))
1582# define MMIO_ONB8(base, offset, val) \
1583 xf86WriteMmioNB8(base, offset, (CARD8)(val))
1584
1585# if defined(PPC_MMIO_IS_BE) /* No byteswapping */
1586# define MMIO_IN16(base, offset) xf86ReadMmio16Be(base, offset)
1587# define MMIO_IN32(base, offset) xf86ReadMmio32Be(base, offset)
1588# define MMIO_OUT16(base, offset, val) \
1589 xf86WriteMmio16Be(base, offset, (CARD16)(val))
1590# define MMIO_OUT32(base, offset, val) \
1591 xf86WriteMmio32Be(base, offset, (CARD32)(val))
1592# define MMIO_ONB16(base, offset, val) \
1593 xf86WriteMmioNB16Be(base, offset, (CARD16)(val))
1594# define MMIO_ONB32(base, offset, val) \
1595 xf86WriteMmioNB32Be(base, offset, (CARD32)(val))
1596# else /* byteswapping is the default */
1597# define MMIO_IN16(base, offset) xf86ReadMmio16Le(base, offset)
1598# define MMIO_IN32(base, offset) xf86ReadMmio32Le(base, offset)
1599# define MMIO_OUT16(base, offset, val) \
1600 xf86WriteMmio16Le(base, offset, (CARD16)(val))
1601# define MMIO_OUT32(base, offset, val) \
1602 xf86WriteMmio32Le(base, offset, (CARD32)(val))
1603# define MMIO_ONB16(base, offset, val) \
1604 xf86WriteMmioNB16Le(base, offset, (CARD16)(val))
1605# define MMIO_ONB32(base, offset, val) \
1606 xf86WriteMmioNB32Le(base, offset, (CARD32)(val))
1607# endif
1608
1609# define MMIO_MOVE32(base, offset, val) \
1610 xf86WriteMmio32Be(base, offset, (CARD32)(val))
1611
1612static __inline__ void ppc_flush_icache(char *addr)
1613{
1614 __asm__ volatile (
1615 "dcbf 0,%0;"
1616 "sync;"
1617 "icbi 0,%0;"
1618 "sync;"
1619 "isync;"
1620 : : "r"(addr) : "memory");
1621}
1622
1623# elif defined(__sparc__) || defined(sparc) || defined(__sparc)
1624 /*
1625 * Like powerpc, we provide byteswapping and no byteswapping functions
1626 * here with byteswapping as default, drivers that don't need byteswapping
1627 * should define SPARC_MMIO_IS_BE (perhaps create a generic macro so that we
1628 * do not need to use PPC_MMIO_IS_BE and the sparc one in all the same places
1629 * of drivers?).
1630 */
1631# define MMIO_IN8(base, offset) xf86ReadMmio8(base, offset)
1632# define MMIO_OUT8(base, offset, val) \
1633 xf86WriteMmio8(base, offset, (CARD8)(val))
1634# define MMIO_ONB8(base, offset, val) \
1635 xf86WriteMmio8NB(base, offset, (CARD8)(val))
1636
1637# if defined(SPARC_MMIO_IS_BE) /* No byteswapping */
1638# define MMIO_IN16(base, offset) xf86ReadMmio16Be(base, offset)
1639# define MMIO_IN32(base, offset) xf86ReadMmio32Be(base, offset)
1640# define MMIO_OUT16(base, offset, val) \
1641 xf86WriteMmio16Be(base, offset, (CARD16)(val))
1642# define MMIO_OUT32(base, offset, val) \
1643 xf86WriteMmio32Be(base, offset, (CARD32)(val))
1644# define MMIO_ONB16(base, offset, val) \
1645 xf86WriteMmio16BeNB(base, offset, (CARD16)(val))
1646# define MMIO_ONB32(base, offset, val) \
1647 xf86WriteMmio32BeNB(base, offset, (CARD32)(val))
1648# else /* byteswapping is the default */
1649# define MMIO_IN16(base, offset) xf86ReadMmio16Le(base, offset)
1650# define MMIO_IN32(base, offset) xf86ReadMmio32Le(base, offset)
1651# define MMIO_OUT16(base, offset, val) \
1652 xf86WriteMmio16Le(base, offset, (CARD16)(val))
1653# define MMIO_OUT32(base, offset, val) \
1654 xf86WriteMmio32Le(base, offset, (CARD32)(val))
1655# define MMIO_ONB16(base, offset, val) \
1656 xf86WriteMmio16LeNB(base, offset, (CARD16)(val))
1657# define MMIO_ONB32(base, offset, val) \
1658 xf86WriteMmio32LeNB(base, offset, (CARD32)(val))
1659# endif
1660
1661# define MMIO_MOVE32(base, offset, val) \
1662 xf86WriteMmio32Be(base, offset, (CARD32)(val))
1663
1664# else /* !__alpha__ && !__powerpc__ && !__sparc__ */
1665
1666# define MMIO_IN8(base, offset) \
1667 *(volatile CARD8 *)(((CARD8*)(base)) + (offset))
1668# define MMIO_IN16(base, offset) \
1669 *(volatile CARD16 *)(void *)(((CARD8*)(base)) + (offset))
1670# define MMIO_IN32(base, offset) \
1671 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset))
1672# define MMIO_OUT8(base, offset, val) \
1673 *(volatile CARD8 *)(((CARD8*)(base)) + (offset)) = (val)
1674# define MMIO_OUT16(base, offset, val) \
1675 *(volatile CARD16 *)(void *)(((CARD8*)(base)) + (offset)) = (val)
1676# define MMIO_OUT32(base, offset, val) \
1677 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val)
1678# define MMIO_ONB8(base, offset, val) MMIO_OUT8(base, offset, val)
1679# define MMIO_ONB16(base, offset, val) MMIO_OUT16(base, offset, val)
1680# define MMIO_ONB32(base, offset, val) MMIO_OUT32(base, offset, val)
1681
1682# define MMIO_MOVE32(base, offset, val) MMIO_OUT32(base, offset, val)
1683
1684# endif /* __alpha__ */
1685
1686/*
1687 * With Intel, the version in os-support/misc/SlowBcopy.s is used.
1688 * This avoids port I/O during the copy (which causes problems with
1689 * some hardware).
1690 */
1691# ifdef __alpha__
1692# define slowbcopy_tobus(src,dst,count) xf86SlowBCopyToBus(src,dst,count)
1693# define slowbcopy_frombus(src,dst,count) xf86SlowBCopyFromBus(src,dst,count)
1694# else /* __alpha__ */
1695# define slowbcopy_tobus(src,dst,count) xf86SlowBcopy(src,dst,count)
1696# define slowbcopy_frombus(src,dst,count) xf86SlowBcopy(src,dst,count)
1697# endif /* __alpha__ */
1698
1699#endif /* _COMPILER_H */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette