VirtualBox

source: vbox/trunk/include/iprt/asm-amd64-x86.h@ 55420

Last change on this file since 55420 was 55319, checked in by vboxsync, 10 years ago

forward-ported r98914, r98916 from 4.3 to trunk (LogRel and build fix)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 68.1 KB
Line 
1/** @file
2 * IPRT - AMD64 and x86 Specific Assembly Functions.
3 */
4
5/*
6 * Copyright (C) 2006-2013 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___iprt_asm_amd64_x86_h
27#define ___iprt_asm_amd64_x86_h
28
29#include <iprt/types.h>
30#include <iprt/assert.h>
31#if !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86)
32# error "Not on AMD64 or x86"
33#endif
34
35#if defined(_MSC_VER) && RT_INLINE_ASM_USES_INTRIN
36# include <intrin.h>
37 /* Emit the intrinsics at all optimization levels. */
38# pragma intrinsic(_ReadWriteBarrier)
39# pragma intrinsic(__cpuid)
40# pragma intrinsic(_enable)
41# pragma intrinsic(_disable)
42# pragma intrinsic(__rdtsc)
43# pragma intrinsic(__readmsr)
44# pragma intrinsic(__writemsr)
45# pragma intrinsic(__outbyte)
46# pragma intrinsic(__outbytestring)
47# pragma intrinsic(__outword)
48# pragma intrinsic(__outwordstring)
49# pragma intrinsic(__outdword)
50# pragma intrinsic(__outdwordstring)
51# pragma intrinsic(__inbyte)
52# pragma intrinsic(__inbytestring)
53# pragma intrinsic(__inword)
54# pragma intrinsic(__inwordstring)
55# pragma intrinsic(__indword)
56# pragma intrinsic(__indwordstring)
57# pragma intrinsic(__invlpg)
58# pragma intrinsic(__wbinvd)
59# pragma intrinsic(__readcr0)
60# pragma intrinsic(__readcr2)
61# pragma intrinsic(__readcr3)
62# pragma intrinsic(__readcr4)
63# pragma intrinsic(__writecr0)
64# pragma intrinsic(__writecr3)
65# pragma intrinsic(__writecr4)
66# pragma intrinsic(__readdr)
67# pragma intrinsic(__writedr)
68# ifdef RT_ARCH_AMD64
69# pragma intrinsic(__readcr8)
70# pragma intrinsic(__writecr8)
71# endif
72# if RT_INLINE_ASM_USES_INTRIN >= 15
73# pragma intrinsic(__readeflags)
74# pragma intrinsic(__writeeflags)
75# pragma intrinsic(__rdtscp)
76# endif
77#endif
78
79
80
81/** @defgroup grp_rt_asm_amd64_x86 AMD64 and x86 Specific ASM Routines
82 * @ingroup grp_rt_asm
83 * @{
84 */
85
86/** @todo find a more proper place for these structures? */
87
88#pragma pack(1)
89/** IDTR */
90typedef struct RTIDTR
91{
92 /** Size of the IDT. */
93 uint16_t cbIdt;
94 /** Address of the IDT. */
95 uintptr_t pIdt;
96} RTIDTR, *PRTIDTR;
97#pragma pack()
98
99#pragma pack(1)
100/** @internal */
101typedef struct RTIDTRALIGNEDINT
102{
103 /** Alignment padding. */
104 uint8_t au16Padding[ARCH_BITS == 64 ? 3 : 1];
105 /** The IDTR structure. */
106 RTIDTR Idtr;
107} RTIDTRALIGNEDINT;
108#pragma pack()
109
110/** Wrapped RTIDTR for preventing misalignment exceptions. */
111typedef union RTIDTRALIGNED
112{
113 /** Try make sure this structure has optimal alignment. */
114 uint64_t auAlignmentHack[ARCH_BITS == 64 ? 2 : 1];
115 /** Aligned structure. */
116 RTIDTRALIGNEDINT s;
117} RTIDTRALIGNED;
118AssertCompileSize(RTIDTRALIGNED, ARCH_BITS * 2 / 8);
119/** Pointer to a an RTIDTR alignment wrapper. */
120typedef RTIDTRALIGNED *PRIDTRALIGNED;
121
122
123#pragma pack(1)
124/** GDTR */
125typedef struct RTGDTR
126{
127 /** Size of the GDT. */
128 uint16_t cbGdt;
129 /** Address of the GDT. */
130 uintptr_t pGdt;
131} RTGDTR, *PRTGDTR;
132#pragma pack()
133
134#pragma pack(1)
135/** @internal */
136typedef struct RTGDTRALIGNEDINT
137{
138 /** Alignment padding. */
139 uint8_t au16Padding[ARCH_BITS == 64 ? 3 : 1];
140 /** The GDTR structure. */
141 RTGDTR Gdtr;
142} RTGDTRALIGNEDINT;
143#pragma pack()
144
145/** Wrapped RTGDTR for preventing misalignment exceptions. */
146typedef union RTGDTRALIGNED
147{
148 /** Try make sure this structure has optimal alignment. */
149 uint64_t auAlignmentHack[ARCH_BITS == 64 ? 2 : 1];
150 /** Aligned structure. */
151 RTGDTRALIGNEDINT s;
152} RTGDTRALIGNED;
153AssertCompileSize(RTGDTRALIGNED, ARCH_BITS * 2 / 8);
154/** Pointer to a an RTGDTR alignment wrapper. */
155typedef RTGDTRALIGNED *PRGDTRALIGNED;
156
157
158/**
159 * Gets the content of the IDTR CPU register.
160 * @param pIdtr Where to store the IDTR contents.
161 */
162#if RT_INLINE_ASM_EXTERNAL
163DECLASM(void) ASMGetIDTR(PRTIDTR pIdtr);
164#else
165DECLINLINE(void) ASMGetIDTR(PRTIDTR pIdtr)
166{
167# if RT_INLINE_ASM_GNU_STYLE
168 __asm__ __volatile__("sidt %0" : "=m" (*pIdtr));
169# else
170 __asm
171 {
172# ifdef RT_ARCH_AMD64
173 mov rax, [pIdtr]
174 sidt [rax]
175# else
176 mov eax, [pIdtr]
177 sidt [eax]
178# endif
179 }
180# endif
181}
182#endif
183
184
185/**
186 * Gets the content of the IDTR.LIMIT CPU register.
187 * @returns IDTR limit.
188 */
189#if RT_INLINE_ASM_EXTERNAL
190DECLASM(uint16_t) ASMGetIdtrLimit(void);
191#else
192DECLINLINE(uint16_t) ASMGetIdtrLimit(void)
193{
194 RTIDTRALIGNED TmpIdtr;
195# if RT_INLINE_ASM_GNU_STYLE
196 __asm__ __volatile__("sidt %0" : "=m" (TmpIdtr.s.Idtr));
197# else
198 __asm
199 {
200 sidt [TmpIdtr.s.Idtr]
201 }
202# endif
203 return TmpIdtr.s.Idtr.cbIdt;
204}
205#endif
206
207
208/**
209 * Sets the content of the IDTR CPU register.
210 * @param pIdtr Where to load the IDTR contents from
211 */
212#if RT_INLINE_ASM_EXTERNAL
213DECLASM(void) ASMSetIDTR(const RTIDTR *pIdtr);
214#else
215DECLINLINE(void) ASMSetIDTR(const RTIDTR *pIdtr)
216{
217# if RT_INLINE_ASM_GNU_STYLE
218 __asm__ __volatile__("lidt %0" : : "m" (*pIdtr));
219# else
220 __asm
221 {
222# ifdef RT_ARCH_AMD64
223 mov rax, [pIdtr]
224 lidt [rax]
225# else
226 mov eax, [pIdtr]
227 lidt [eax]
228# endif
229 }
230# endif
231}
232#endif
233
234
235/**
236 * Gets the content of the GDTR CPU register.
237 * @param pGdtr Where to store the GDTR contents.
238 */
239#if RT_INLINE_ASM_EXTERNAL
240DECLASM(void) ASMGetGDTR(PRTGDTR pGdtr);
241#else
242DECLINLINE(void) ASMGetGDTR(PRTGDTR pGdtr)
243{
244# if RT_INLINE_ASM_GNU_STYLE
245 __asm__ __volatile__("sgdt %0" : "=m" (*pGdtr));
246# else
247 __asm
248 {
249# ifdef RT_ARCH_AMD64
250 mov rax, [pGdtr]
251 sgdt [rax]
252# else
253 mov eax, [pGdtr]
254 sgdt [eax]
255# endif
256 }
257# endif
258}
259#endif
260
261
262/**
263 * Sets the content of the GDTR CPU register.
264 * @param pIdtr Where to load the GDTR contents from
265 */
266#if RT_INLINE_ASM_EXTERNAL
267DECLASM(void) ASMSetGDTR(const RTGDTR *pGdtr);
268#else
269DECLINLINE(void) ASMSetGDTR(const RTGDTR *pGdtr)
270{
271# if RT_INLINE_ASM_GNU_STYLE
272 __asm__ __volatile__("lgdt %0" : : "m" (*pGdtr));
273# else
274 __asm
275 {
276# ifdef RT_ARCH_AMD64
277 mov rax, [pGdtr]
278 lgdt [rax]
279# else
280 mov eax, [pGdtr]
281 lgdt [eax]
282# endif
283 }
284# endif
285}
286#endif
287
288
289
290/**
291 * Get the cs register.
292 * @returns cs.
293 */
294#if RT_INLINE_ASM_EXTERNAL
295DECLASM(RTSEL) ASMGetCS(void);
296#else
297DECLINLINE(RTSEL) ASMGetCS(void)
298{
299 RTSEL SelCS;
300# if RT_INLINE_ASM_GNU_STYLE
301 __asm__ __volatile__("movw %%cs, %0\n\t" : "=r" (SelCS));
302# else
303 __asm
304 {
305 mov ax, cs
306 mov [SelCS], ax
307 }
308# endif
309 return SelCS;
310}
311#endif
312
313
314/**
315 * Get the DS register.
316 * @returns DS.
317 */
318#if RT_INLINE_ASM_EXTERNAL
319DECLASM(RTSEL) ASMGetDS(void);
320#else
321DECLINLINE(RTSEL) ASMGetDS(void)
322{
323 RTSEL SelDS;
324# if RT_INLINE_ASM_GNU_STYLE
325 __asm__ __volatile__("movw %%ds, %0\n\t" : "=r" (SelDS));
326# else
327 __asm
328 {
329 mov ax, ds
330 mov [SelDS], ax
331 }
332# endif
333 return SelDS;
334}
335#endif
336
337
338/**
339 * Get the ES register.
340 * @returns ES.
341 */
342#if RT_INLINE_ASM_EXTERNAL
343DECLASM(RTSEL) ASMGetES(void);
344#else
345DECLINLINE(RTSEL) ASMGetES(void)
346{
347 RTSEL SelES;
348# if RT_INLINE_ASM_GNU_STYLE
349 __asm__ __volatile__("movw %%es, %0\n\t" : "=r" (SelES));
350# else
351 __asm
352 {
353 mov ax, es
354 mov [SelES], ax
355 }
356# endif
357 return SelES;
358}
359#endif
360
361
362/**
363 * Get the FS register.
364 * @returns FS.
365 */
366#if RT_INLINE_ASM_EXTERNAL
367DECLASM(RTSEL) ASMGetFS(void);
368#else
369DECLINLINE(RTSEL) ASMGetFS(void)
370{
371 RTSEL SelFS;
372# if RT_INLINE_ASM_GNU_STYLE
373 __asm__ __volatile__("movw %%fs, %0\n\t" : "=r" (SelFS));
374# else
375 __asm
376 {
377 mov ax, fs
378 mov [SelFS], ax
379 }
380# endif
381 return SelFS;
382}
383# endif
384
385
386/**
387 * Get the GS register.
388 * @returns GS.
389 */
390#if RT_INLINE_ASM_EXTERNAL
391DECLASM(RTSEL) ASMGetGS(void);
392#else
393DECLINLINE(RTSEL) ASMGetGS(void)
394{
395 RTSEL SelGS;
396# if RT_INLINE_ASM_GNU_STYLE
397 __asm__ __volatile__("movw %%gs, %0\n\t" : "=r" (SelGS));
398# else
399 __asm
400 {
401 mov ax, gs
402 mov [SelGS], ax
403 }
404# endif
405 return SelGS;
406}
407#endif
408
409
410/**
411 * Get the SS register.
412 * @returns SS.
413 */
414#if RT_INLINE_ASM_EXTERNAL
415DECLASM(RTSEL) ASMGetSS(void);
416#else
417DECLINLINE(RTSEL) ASMGetSS(void)
418{
419 RTSEL SelSS;
420# if RT_INLINE_ASM_GNU_STYLE
421 __asm__ __volatile__("movw %%ss, %0\n\t" : "=r" (SelSS));
422# else
423 __asm
424 {
425 mov ax, ss
426 mov [SelSS], ax
427 }
428# endif
429 return SelSS;
430}
431#endif
432
433
434/**
435 * Get the TR register.
436 * @returns TR.
437 */
438#if RT_INLINE_ASM_EXTERNAL
439DECLASM(RTSEL) ASMGetTR(void);
440#else
441DECLINLINE(RTSEL) ASMGetTR(void)
442{
443 RTSEL SelTR;
444# if RT_INLINE_ASM_GNU_STYLE
445 __asm__ __volatile__("str %w0\n\t" : "=r" (SelTR));
446# else
447 __asm
448 {
449 str ax
450 mov [SelTR], ax
451 }
452# endif
453 return SelTR;
454}
455#endif
456
457
458/**
459 * Get the LDTR register.
460 * @returns LDTR.
461 */
462#if RT_INLINE_ASM_EXTERNAL
463DECLASM(RTSEL) ASMGetLDTR(void);
464#else
465DECLINLINE(RTSEL) ASMGetLDTR(void)
466{
467 RTSEL SelLDTR;
468# if RT_INLINE_ASM_GNU_STYLE
469 __asm__ __volatile__("sldt %w0\n\t" : "=r" (SelLDTR));
470# else
471 __asm
472 {
473 sldt ax
474 mov [SelLDTR], ax
475 }
476# endif
477 return SelLDTR;
478}
479#endif
480
481
482/**
483 * Get the access rights for the segment selector.
484 *
485 * @returns The access rights on success or UINT32_MAX on failure.
486 * @param uSel The selector value.
487 *
488 * @remarks Using UINT32_MAX for failure is chosen because valid access rights
489 * always have bits 0:7 as 0 (on both Intel & AMD).
490 */
491#if RT_INLINE_ASM_EXTERNAL
492DECLASM(uint32_t) ASMGetSegAttr(uint32_t uSel);
493#else
494DECLINLINE(uint32_t) ASMGetSegAttr(uint32_t uSel)
495{
496 uint32_t uAttr;
497 /* LAR only accesses 16-bit of the source operand, but eax for the
498 destination operand is required for getting the full 32-bit access rights. */
499# if RT_INLINE_ASM_GNU_STYLE
500 __asm__ __volatile__("lar %1, %%eax\n\t"
501 "jz done%=\n\t"
502 "movl $0xffffffff, %%eax\n\t"
503 "done%=:\n\t"
504 "movl %%eax, %0\n\t"
505 : "=r" (uAttr)
506 : "r" (uSel)
507 : "cc", "%eax");
508# else
509 __asm
510 {
511 lar eax, [uSel]
512 jz done
513 mov eax, 0ffffffffh
514 done:
515 mov [uAttr], eax
516 }
517# endif
518 return uAttr;
519}
520#endif
521
522
523/**
524 * Get the [RE]FLAGS register.
525 * @returns [RE]FLAGS.
526 */
527#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
528DECLASM(RTCCUINTREG) ASMGetFlags(void);
529#else
530DECLINLINE(RTCCUINTREG) ASMGetFlags(void)
531{
532 RTCCUINTREG uFlags;
533# if RT_INLINE_ASM_GNU_STYLE
534# ifdef RT_ARCH_AMD64
535 __asm__ __volatile__("pushfq\n\t"
536 "popq %0\n\t"
537 : "=r" (uFlags));
538# else
539 __asm__ __volatile__("pushfl\n\t"
540 "popl %0\n\t"
541 : "=r" (uFlags));
542# endif
543# elif RT_INLINE_ASM_USES_INTRIN >= 15
544 uFlags = __readeflags();
545# else
546 __asm
547 {
548# ifdef RT_ARCH_AMD64
549 pushfq
550 pop [uFlags]
551# else
552 pushfd
553 pop [uFlags]
554# endif
555 }
556# endif
557 return uFlags;
558}
559#endif
560
561
562/**
563 * Set the [RE]FLAGS register.
564 * @param uFlags The new [RE]FLAGS value.
565 */
566#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
567DECLASM(void) ASMSetFlags(RTCCUINTREG uFlags);
568#else
569DECLINLINE(void) ASMSetFlags(RTCCUINTREG uFlags)
570{
571# if RT_INLINE_ASM_GNU_STYLE
572# ifdef RT_ARCH_AMD64
573 __asm__ __volatile__("pushq %0\n\t"
574 "popfq\n\t"
575 : : "g" (uFlags));
576# else
577 __asm__ __volatile__("pushl %0\n\t"
578 "popfl\n\t"
579 : : "g" (uFlags));
580# endif
581# elif RT_INLINE_ASM_USES_INTRIN >= 15
582 __writeeflags(uFlags);
583# else
584 __asm
585 {
586# ifdef RT_ARCH_AMD64
587 push [uFlags]
588 popfq
589# else
590 push [uFlags]
591 popfd
592# endif
593 }
594# endif
595}
596#endif
597
598
599/**
600 * Gets the content of the CPU timestamp counter register.
601 *
602 * @returns TSC.
603 */
604#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
605DECLASM(uint64_t) ASMReadTSC(void);
606#else
607DECLINLINE(uint64_t) ASMReadTSC(void)
608{
609 RTUINT64U u;
610# if RT_INLINE_ASM_GNU_STYLE
611 __asm__ __volatile__("rdtsc\n\t" : "=a" (u.s.Lo), "=d" (u.s.Hi));
612# else
613# if RT_INLINE_ASM_USES_INTRIN
614 u.u = __rdtsc();
615# else
616 __asm
617 {
618 rdtsc
619 mov [u.s.Lo], eax
620 mov [u.s.Hi], edx
621 }
622# endif
623# endif
624 return u.u;
625}
626#endif
627
628
629/**
630 * Gets the content of the CPU timestamp counter register and the
631 * assoicated AUX value.
632 *
633 * @returns TSC.
634 * @param puAux Where to store the AUX value.
635 */
636#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
637DECLASM(uint64_t) ASMReadTscWithAux(uint32_t *puAux);
638#else
639DECLINLINE(uint64_t) ASMReadTscWithAux(uint32_t *puAux)
640{
641 RTUINT64U u;
642# if RT_INLINE_ASM_GNU_STYLE
643 /* rdtscp is not supported by ancient linux build VM of course :-( */
644 /*__asm__ __volatile__("rdtscp\n\t" : "=a" (u.s.Lo), "=d" (u.s.Hi), "=c" (*puAux)); */
645 __asm__ __volatile__(".byte 0x0f,0x01,0xf9\n\t" : "=a" (u.s.Lo), "=d" (u.s.Hi), "=c" (*puAux));
646# else
647# if RT_INLINE_ASM_USES_INTRIN >= 15
648 u.u = __rdtscp(puAux);
649# else
650 __asm
651 {
652 rdtscp
653 mov [u.s.Lo], eax
654 mov [u.s.Hi], edx
655 mov eax, [puAux]
656 mov [eax], ecx
657 }
658# endif
659# endif
660 return u.u;
661}
662#endif
663
664
665/**
666 * Performs the cpuid instruction returning all registers.
667 *
668 * @param uOperator CPUID operation (eax).
669 * @param pvEAX Where to store eax.
670 * @param pvEBX Where to store ebx.
671 * @param pvECX Where to store ecx.
672 * @param pvEDX Where to store edx.
673 * @remark We're using void pointers to ease the use of special bitfield structures and such.
674 */
675#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
676DECLASM(void) ASMCpuId(uint32_t uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX);
677#else
678DECLINLINE(void) ASMCpuId(uint32_t uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
679{
680# if RT_INLINE_ASM_GNU_STYLE
681# ifdef RT_ARCH_AMD64
682 RTCCUINTREG uRAX, uRBX, uRCX, uRDX;
683 __asm__ __volatile__ ("cpuid\n\t"
684 : "=a" (uRAX),
685 "=b" (uRBX),
686 "=c" (uRCX),
687 "=d" (uRDX)
688 : "0" (uOperator), "2" (0));
689 *(uint32_t *)pvEAX = (uint32_t)uRAX;
690 *(uint32_t *)pvEBX = (uint32_t)uRBX;
691 *(uint32_t *)pvECX = (uint32_t)uRCX;
692 *(uint32_t *)pvEDX = (uint32_t)uRDX;
693# else
694 __asm__ __volatile__ ("xchgl %%ebx, %1\n\t"
695 "cpuid\n\t"
696 "xchgl %%ebx, %1\n\t"
697 : "=a" (*(uint32_t *)pvEAX),
698 "=r" (*(uint32_t *)pvEBX),
699 "=c" (*(uint32_t *)pvECX),
700 "=d" (*(uint32_t *)pvEDX)
701 : "0" (uOperator), "2" (0));
702# endif
703
704# elif RT_INLINE_ASM_USES_INTRIN
705 int aInfo[4];
706 __cpuid(aInfo, uOperator);
707 *(uint32_t *)pvEAX = aInfo[0];
708 *(uint32_t *)pvEBX = aInfo[1];
709 *(uint32_t *)pvECX = aInfo[2];
710 *(uint32_t *)pvEDX = aInfo[3];
711
712# else
713 uint32_t uEAX;
714 uint32_t uEBX;
715 uint32_t uECX;
716 uint32_t uEDX;
717 __asm
718 {
719 push ebx
720 mov eax, [uOperator]
721 cpuid
722 mov [uEAX], eax
723 mov [uEBX], ebx
724 mov [uECX], ecx
725 mov [uEDX], edx
726 pop ebx
727 }
728 *(uint32_t *)pvEAX = uEAX;
729 *(uint32_t *)pvEBX = uEBX;
730 *(uint32_t *)pvECX = uECX;
731 *(uint32_t *)pvEDX = uEDX;
732# endif
733}
734#endif
735
736
737/**
738 * Performs the CPUID instruction with EAX and ECX input returning ALL output
739 * registers.
740 *
741 * @param uOperator CPUID operation (eax).
742 * @param uIdxECX ecx index
743 * @param pvEAX Where to store eax.
744 * @param pvEBX Where to store ebx.
745 * @param pvECX Where to store ecx.
746 * @param pvEDX Where to store edx.
747 * @remark We're using void pointers to ease the use of special bitfield structures and such.
748 */
749#if RT_INLINE_ASM_EXTERNAL || RT_INLINE_ASM_USES_INTRIN
750DECLASM(void) ASMCpuId_Idx_ECX(uint32_t uOperator, uint32_t uIdxECX, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX);
751#else
752DECLINLINE(void) ASMCpuId_Idx_ECX(uint32_t uOperator, uint32_t uIdxECX, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
753{
754# if RT_INLINE_ASM_GNU_STYLE
755# ifdef RT_ARCH_AMD64
756 RTCCUINTREG uRAX, uRBX, uRCX, uRDX;
757 __asm__ ("cpuid\n\t"
758 : "=a" (uRAX),
759 "=b" (uRBX),
760 "=c" (uRCX),
761 "=d" (uRDX)
762 : "0" (uOperator),
763 "2" (uIdxECX));
764 *(uint32_t *)pvEAX = (uint32_t)uRAX;
765 *(uint32_t *)pvEBX = (uint32_t)uRBX;
766 *(uint32_t *)pvECX = (uint32_t)uRCX;
767 *(uint32_t *)pvEDX = (uint32_t)uRDX;
768# else
769 __asm__ ("xchgl %%ebx, %1\n\t"
770 "cpuid\n\t"
771 "xchgl %%ebx, %1\n\t"
772 : "=a" (*(uint32_t *)pvEAX),
773 "=r" (*(uint32_t *)pvEBX),
774 "=c" (*(uint32_t *)pvECX),
775 "=d" (*(uint32_t *)pvEDX)
776 : "0" (uOperator),
777 "2" (uIdxECX));
778# endif
779
780# elif RT_INLINE_ASM_USES_INTRIN
781 int aInfo[4];
782 __cpuidex(aInfo, uOperator, uIdxECX);
783 *(uint32_t *)pvEAX = aInfo[0];
784 *(uint32_t *)pvEBX = aInfo[1];
785 *(uint32_t *)pvECX = aInfo[2];
786 *(uint32_t *)pvEDX = aInfo[3];
787
788# else
789 uint32_t uEAX;
790 uint32_t uEBX;
791 uint32_t uECX;
792 uint32_t uEDX;
793 __asm
794 {
795 push ebx
796 mov eax, [uOperator]
797 mov ecx, [uIdxECX]
798 cpuid
799 mov [uEAX], eax
800 mov [uEBX], ebx
801 mov [uECX], ecx
802 mov [uEDX], edx
803 pop ebx
804 }
805 *(uint32_t *)pvEAX = uEAX;
806 *(uint32_t *)pvEBX = uEBX;
807 *(uint32_t *)pvECX = uECX;
808 *(uint32_t *)pvEDX = uEDX;
809# endif
810}
811#endif
812
813
814/**
815 * CPUID variant that initializes all 4 registers before the CPUID instruction.
816 *
817 * @returns The EAX result value.
818 * @param uOperator CPUID operation (eax).
819 * @param uInitEBX The value to assign EBX prior to the CPUID instruction.
820 * @param uInitECX The value to assign ECX prior to the CPUID instruction.
821 * @param uInitEDX The value to assign EDX prior to the CPUID instruction.
822 * @param pvEAX Where to store eax. Optional.
823 * @param pvEBX Where to store ebx. Optional.
824 * @param pvECX Where to store ecx. Optional.
825 * @param pvEDX Where to store edx. Optional.
826 */
827DECLASM(uint32_t) ASMCpuIdExSlow(uint32_t uOperator, uint32_t uInitEBX, uint32_t uInitECX, uint32_t uInitEDX,
828 void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX);
829
830
831/**
832 * Performs the cpuid instruction returning ecx and edx.
833 *
834 * @param uOperator CPUID operation (eax).
835 * @param pvECX Where to store ecx.
836 * @param pvEDX Where to store edx.
837 * @remark We're using void pointers to ease the use of special bitfield structures and such.
838 */
839#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
840DECLASM(void) ASMCpuId_ECX_EDX(uint32_t uOperator, void *pvECX, void *pvEDX);
841#else
842DECLINLINE(void) ASMCpuId_ECX_EDX(uint32_t uOperator, void *pvECX, void *pvEDX)
843{
844 uint32_t uEBX;
845 ASMCpuId(uOperator, &uOperator, &uEBX, pvECX, pvEDX);
846}
847#endif
848
849
850/**
851 * Performs the cpuid instruction returning eax.
852 *
853 * @param uOperator CPUID operation (eax).
854 * @returns EAX after cpuid operation.
855 */
856#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
857DECLASM(uint32_t) ASMCpuId_EAX(uint32_t uOperator);
858#else
859DECLINLINE(uint32_t) ASMCpuId_EAX(uint32_t uOperator)
860{
861 RTCCUINTREG xAX;
862# if RT_INLINE_ASM_GNU_STYLE
863# ifdef RT_ARCH_AMD64
864 __asm__ ("cpuid"
865 : "=a" (xAX)
866 : "0" (uOperator)
867 : "rbx", "rcx", "rdx");
868# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
869 __asm__ ("push %%ebx\n\t"
870 "cpuid\n\t"
871 "pop %%ebx\n\t"
872 : "=a" (xAX)
873 : "0" (uOperator)
874 : "ecx", "edx");
875# else
876 __asm__ ("cpuid"
877 : "=a" (xAX)
878 : "0" (uOperator)
879 : "edx", "ecx", "ebx");
880# endif
881
882# elif RT_INLINE_ASM_USES_INTRIN
883 int aInfo[4];
884 __cpuid(aInfo, uOperator);
885 xAX = aInfo[0];
886
887# else
888 __asm
889 {
890 push ebx
891 mov eax, [uOperator]
892 cpuid
893 mov [xAX], eax
894 pop ebx
895 }
896# endif
897 return (uint32_t)xAX;
898}
899#endif
900
901
902/**
903 * Performs the cpuid instruction returning ebx.
904 *
905 * @param uOperator CPUID operation (eax).
906 * @returns EBX after cpuid operation.
907 */
908#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
909DECLASM(uint32_t) ASMCpuId_EBX(uint32_t uOperator);
910#else
911DECLINLINE(uint32_t) ASMCpuId_EBX(uint32_t uOperator)
912{
913 RTCCUINTREG xBX;
914# if RT_INLINE_ASM_GNU_STYLE
915# ifdef RT_ARCH_AMD64
916 RTCCUINTREG uSpill;
917 __asm__ ("cpuid"
918 : "=a" (uSpill),
919 "=b" (xBX)
920 : "0" (uOperator)
921 : "rdx", "rcx");
922# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
923 __asm__ ("push %%ebx\n\t"
924 "cpuid\n\t"
925 "mov %%ebx, %%edx\n\t"
926 "pop %%ebx\n\t"
927 : "=a" (uOperator),
928 "=d" (xBX)
929 : "0" (uOperator)
930 : "ecx");
931# else
932 __asm__ ("cpuid"
933 : "=a" (uOperator),
934 "=b" (xBX)
935 : "0" (uOperator)
936 : "edx", "ecx");
937# endif
938
939# elif RT_INLINE_ASM_USES_INTRIN
940 int aInfo[4];
941 __cpuid(aInfo, uOperator);
942 xBX = aInfo[1];
943
944# else
945 __asm
946 {
947 push ebx
948 mov eax, [uOperator]
949 cpuid
950 mov [xBX], ebx
951 pop ebx
952 }
953# endif
954 return (uint32_t)xBX;
955}
956#endif
957
958
959/**
960 * Performs the cpuid instruction returning ecx.
961 *
962 * @param uOperator CPUID operation (eax).
963 * @returns ECX after cpuid operation.
964 */
965#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
966DECLASM(uint32_t) ASMCpuId_ECX(uint32_t uOperator);
967#else
968DECLINLINE(uint32_t) ASMCpuId_ECX(uint32_t uOperator)
969{
970 RTCCUINTREG xCX;
971# if RT_INLINE_ASM_GNU_STYLE
972# ifdef RT_ARCH_AMD64
973 RTCCUINTREG uSpill;
974 __asm__ ("cpuid"
975 : "=a" (uSpill),
976 "=c" (xCX)
977 : "0" (uOperator)
978 : "rbx", "rdx");
979# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
980 __asm__ ("push %%ebx\n\t"
981 "cpuid\n\t"
982 "pop %%ebx\n\t"
983 : "=a" (uOperator),
984 "=c" (xCX)
985 : "0" (uOperator)
986 : "edx");
987# else
988 __asm__ ("cpuid"
989 : "=a" (uOperator),
990 "=c" (xCX)
991 : "0" (uOperator)
992 : "ebx", "edx");
993
994# endif
995
996# elif RT_INLINE_ASM_USES_INTRIN
997 int aInfo[4];
998 __cpuid(aInfo, uOperator);
999 xCX = aInfo[2];
1000
1001# else
1002 __asm
1003 {
1004 push ebx
1005 mov eax, [uOperator]
1006 cpuid
1007 mov [xCX], ecx
1008 pop ebx
1009 }
1010# endif
1011 return (uint32_t)xCX;
1012}
1013#endif
1014
1015
1016/**
1017 * Performs the cpuid instruction returning edx.
1018 *
1019 * @param uOperator CPUID operation (eax).
1020 * @returns EDX after cpuid operation.
1021 */
1022#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1023DECLASM(uint32_t) ASMCpuId_EDX(uint32_t uOperator);
1024#else
1025DECLINLINE(uint32_t) ASMCpuId_EDX(uint32_t uOperator)
1026{
1027 RTCCUINTREG xDX;
1028# if RT_INLINE_ASM_GNU_STYLE
1029# ifdef RT_ARCH_AMD64
1030 RTCCUINTREG uSpill;
1031 __asm__ ("cpuid"
1032 : "=a" (uSpill),
1033 "=d" (xDX)
1034 : "0" (uOperator)
1035 : "rbx", "rcx");
1036# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1037 __asm__ ("push %%ebx\n\t"
1038 "cpuid\n\t"
1039 "pop %%ebx\n\t"
1040 : "=a" (uOperator),
1041 "=d" (xDX)
1042 : "0" (uOperator)
1043 : "ecx");
1044# else
1045 __asm__ ("cpuid"
1046 : "=a" (uOperator),
1047 "=d" (xDX)
1048 : "0" (uOperator)
1049 : "ebx", "ecx");
1050# endif
1051
1052# elif RT_INLINE_ASM_USES_INTRIN
1053 int aInfo[4];
1054 __cpuid(aInfo, uOperator);
1055 xDX = aInfo[3];
1056
1057# else
1058 __asm
1059 {
1060 push ebx
1061 mov eax, [uOperator]
1062 cpuid
1063 mov [xDX], edx
1064 pop ebx
1065 }
1066# endif
1067 return (uint32_t)xDX;
1068}
1069#endif
1070
1071
1072/**
1073 * Checks if the current CPU supports CPUID.
1074 *
1075 * @returns true if CPUID is supported.
1076 */
1077DECLINLINE(bool) ASMHasCpuId(void)
1078{
1079#ifdef RT_ARCH_AMD64
1080 return true; /* ASSUME that all amd64 compatible CPUs have cpuid. */
1081#else /* !RT_ARCH_AMD64 */
1082 bool fRet = false;
1083# if RT_INLINE_ASM_GNU_STYLE
1084 uint32_t u1;
1085 uint32_t u2;
1086 __asm__ ("pushf\n\t"
1087 "pop %1\n\t"
1088 "mov %1, %2\n\t"
1089 "xorl $0x200000, %1\n\t"
1090 "push %1\n\t"
1091 "popf\n\t"
1092 "pushf\n\t"
1093 "pop %1\n\t"
1094 "cmpl %1, %2\n\t"
1095 "setne %0\n\t"
1096 "push %2\n\t"
1097 "popf\n\t"
1098 : "=m" (fRet), "=r" (u1), "=r" (u2));
1099# else
1100 __asm
1101 {
1102 pushfd
1103 pop eax
1104 mov ebx, eax
1105 xor eax, 0200000h
1106 push eax
1107 popfd
1108 pushfd
1109 pop eax
1110 cmp eax, ebx
1111 setne fRet
1112 push ebx
1113 popfd
1114 }
1115# endif
1116 return fRet;
1117#endif /* !RT_ARCH_AMD64 */
1118}
1119
1120
1121/**
1122 * Gets the APIC ID of the current CPU.
1123 *
1124 * @returns the APIC ID.
1125 */
1126#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1127DECLASM(uint8_t) ASMGetApicId(void);
1128#else
1129DECLINLINE(uint8_t) ASMGetApicId(void)
1130{
1131 RTCCUINTREG xBX;
1132# if RT_INLINE_ASM_GNU_STYLE
1133# ifdef RT_ARCH_AMD64
1134 RTCCUINTREG uSpill;
1135 __asm__ __volatile__ ("cpuid"
1136 : "=a" (uSpill),
1137 "=b" (xBX)
1138 : "0" (1)
1139 : "rcx", "rdx");
1140# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1141 RTCCUINTREG uSpill;
1142 __asm__ __volatile__ ("mov %%ebx,%1\n\t"
1143 "cpuid\n\t"
1144 "xchgl %%ebx,%1\n\t"
1145 : "=a" (uSpill),
1146 "=rm" (xBX)
1147 : "0" (1)
1148 : "ecx", "edx");
1149# else
1150 RTCCUINTREG uSpill;
1151 __asm__ __volatile__ ("cpuid"
1152 : "=a" (uSpill),
1153 "=b" (xBX)
1154 : "0" (1)
1155 : "ecx", "edx");
1156# endif
1157
1158# elif RT_INLINE_ASM_USES_INTRIN
1159 int aInfo[4];
1160 __cpuid(aInfo, 1);
1161 xBX = aInfo[1];
1162
1163# else
1164 __asm
1165 {
1166 push ebx
1167 mov eax, 1
1168 cpuid
1169 mov [xBX], ebx
1170 pop ebx
1171 }
1172# endif
1173 return (uint8_t)(xBX >> 24);
1174}
1175#endif
1176
1177
1178/**
1179 * Tests if it a genuine Intel CPU based on the ASMCpuId(0) output.
1180 *
1181 * @returns true/false.
1182 * @param uEBX EBX return from ASMCpuId(0)
1183 * @param uECX ECX return from ASMCpuId(0)
1184 * @param uEDX EDX return from ASMCpuId(0)
1185 */
1186DECLINLINE(bool) ASMIsIntelCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1187{
1188 return uEBX == UINT32_C(0x756e6547)
1189 && uECX == UINT32_C(0x6c65746e)
1190 && uEDX == UINT32_C(0x49656e69);
1191}
1192
1193
1194/**
1195 * Tests if this is a genuine Intel CPU.
1196 *
1197 * @returns true/false.
1198 * @remarks ASSUMES that cpuid is supported by the CPU.
1199 */
1200DECLINLINE(bool) ASMIsIntelCpu(void)
1201{
1202 uint32_t uEAX, uEBX, uECX, uEDX;
1203 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1204 return ASMIsIntelCpuEx(uEBX, uECX, uEDX);
1205}
1206
1207
1208/**
1209 * Tests if it an authentic AMD CPU based on the ASMCpuId(0) output.
1210 *
1211 * @returns true/false.
1212 * @param uEBX EBX return from ASMCpuId(0)
1213 * @param uECX ECX return from ASMCpuId(0)
1214 * @param uEDX EDX return from ASMCpuId(0)
1215 */
1216DECLINLINE(bool) ASMIsAmdCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1217{
1218 return uEBX == UINT32_C(0x68747541)
1219 && uECX == UINT32_C(0x444d4163)
1220 && uEDX == UINT32_C(0x69746e65);
1221}
1222
1223
1224/**
1225 * Tests if this is an authentic AMD CPU.
1226 *
1227 * @returns true/false.
1228 * @remarks ASSUMES that cpuid is supported by the CPU.
1229 */
1230DECLINLINE(bool) ASMIsAmdCpu(void)
1231{
1232 uint32_t uEAX, uEBX, uECX, uEDX;
1233 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1234 return ASMIsAmdCpuEx(uEBX, uECX, uEDX);
1235}
1236
1237
1238/**
1239 * Tests if it a centaur hauling VIA CPU based on the ASMCpuId(0) output.
1240 *
1241 * @returns true/false.
1242 * @param uEBX EBX return from ASMCpuId(0).
1243 * @param uECX ECX return from ASMCpuId(0).
1244 * @param uEDX EDX return from ASMCpuId(0).
1245 */
1246DECLINLINE(bool) ASMIsViaCentaurCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1247{
1248 return uEBX == UINT32_C(0x746e6543)
1249 && uECX == UINT32_C(0x736c7561)
1250 && uEDX == UINT32_C(0x48727561);
1251}
1252
1253
1254/**
1255 * Tests if this is a centaur hauling VIA CPU.
1256 *
1257 * @returns true/false.
1258 * @remarks ASSUMES that cpuid is supported by the CPU.
1259 */
1260DECLINLINE(bool) ASMIsViaCentaurCpu(void)
1261{
1262 uint32_t uEAX, uEBX, uECX, uEDX;
1263 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1264 return ASMIsAmdCpuEx(uEBX, uECX, uEDX);
1265}
1266
1267
1268/**
1269 * Checks whether ASMCpuId_EAX(0x00000000) indicates a valid range.
1270 *
1271 *
1272 * @returns true/false.
1273 * @param uEAX The EAX value of CPUID leaf 0x00000000.
1274 *
1275 * @note This only succeeds if there are at least two leaves in the range.
1276 * @remarks The upper range limit is just some half reasonable value we've
1277 * picked out of thin air.
1278 */
1279DECLINLINE(bool) ASMIsValidStdRange(uint32_t uEAX)
1280{
1281 return uEAX >= UINT32_C(0x00000001) && uEAX <= UINT32_C(0x000fffff);
1282}
1283
1284
1285/**
1286 * Checks whether ASMCpuId_EAX(0x80000000) indicates a valid range.
1287 *
1288 * This only succeeds if there are at least two leaves in the range.
1289 *
1290 * @returns true/false.
1291 * @param uEAX The EAX value of CPUID leaf 0x80000000.
1292 *
1293 * @note This only succeeds if there are at least two leaves in the range.
1294 * @remarks The upper range limit is just some half reasonable value we've
1295 * picked out of thin air.
1296 */
1297DECLINLINE(bool) ASMIsValidExtRange(uint32_t uEAX)
1298{
1299 return uEAX >= UINT32_C(0x80000001) && uEAX <= UINT32_C(0x800fffff);
1300}
1301
1302
1303/**
1304 * Extracts the CPU family from ASMCpuId(1) or ASMCpuId(0x80000001)
1305 *
1306 * @returns Family.
1307 * @param uEAX EAX return from ASMCpuId(1) or ASMCpuId(0x80000001).
1308 */
1309DECLINLINE(uint32_t) ASMGetCpuFamily(uint32_t uEAX)
1310{
1311 return ((uEAX >> 8) & 0xf) == 0xf
1312 ? ((uEAX >> 20) & 0x7f) + 0xf
1313 : ((uEAX >> 8) & 0xf);
1314}
1315
1316
1317/**
1318 * Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001), Intel variant.
1319 *
1320 * @returns Model.
1321 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1322 */
1323DECLINLINE(uint32_t) ASMGetCpuModelIntel(uint32_t uEAX)
1324{
1325 return ((uEAX >> 8) & 0xf) == 0xf || (((uEAX >> 8) & 0xf) == 0x6) /* family! */
1326 ? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
1327 : ((uEAX >> 4) & 0xf);
1328}
1329
1330
1331/**
1332 * Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001), AMD variant.
1333 *
1334 * @returns Model.
1335 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1336 */
1337DECLINLINE(uint32_t) ASMGetCpuModelAMD(uint32_t uEAX)
1338{
1339 return ((uEAX >> 8) & 0xf) == 0xf
1340 ? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
1341 : ((uEAX >> 4) & 0xf);
1342}
1343
1344
1345/**
1346 * Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001)
1347 *
1348 * @returns Model.
1349 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1350 * @param fIntel Whether it's an intel CPU. Use ASMIsIntelCpuEx() or ASMIsIntelCpu().
1351 */
1352DECLINLINE(uint32_t) ASMGetCpuModel(uint32_t uEAX, bool fIntel)
1353{
1354 return ((uEAX >> 8) & 0xf) == 0xf || (((uEAX >> 8) & 0xf) == 0x6 && fIntel) /* family! */
1355 ? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
1356 : ((uEAX >> 4) & 0xf);
1357}
1358
1359
1360/**
1361 * Extracts the CPU stepping from ASMCpuId(1) or ASMCpuId(0x80000001)
1362 *
1363 * @returns Model.
1364 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1365 */
1366DECLINLINE(uint32_t) ASMGetCpuStepping(uint32_t uEAX)
1367{
1368 return uEAX & 0xf;
1369}
1370
1371
1372/**
1373 * Get cr0.
1374 * @returns cr0.
1375 */
1376#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1377DECLASM(RTCCUINTREG) ASMGetCR0(void);
1378#else
1379DECLINLINE(RTCCUINTREG) ASMGetCR0(void)
1380{
1381 RTCCUINTREG uCR0;
1382# if RT_INLINE_ASM_USES_INTRIN
1383 uCR0 = __readcr0();
1384
1385# elif RT_INLINE_ASM_GNU_STYLE
1386# ifdef RT_ARCH_AMD64
1387 __asm__ __volatile__("movq %%cr0, %0\t\n" : "=r" (uCR0));
1388# else
1389 __asm__ __volatile__("movl %%cr0, %0\t\n" : "=r" (uCR0));
1390# endif
1391# else
1392 __asm
1393 {
1394# ifdef RT_ARCH_AMD64
1395 mov rax, cr0
1396 mov [uCR0], rax
1397# else
1398 mov eax, cr0
1399 mov [uCR0], eax
1400# endif
1401 }
1402# endif
1403 return uCR0;
1404}
1405#endif
1406
1407
1408/**
1409 * Sets the CR0 register.
1410 * @param uCR0 The new CR0 value.
1411 */
1412#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1413DECLASM(void) ASMSetCR0(RTCCUINTREG uCR0);
1414#else
1415DECLINLINE(void) ASMSetCR0(RTCCUINTREG uCR0)
1416{
1417# if RT_INLINE_ASM_USES_INTRIN
1418 __writecr0(uCR0);
1419
1420# elif RT_INLINE_ASM_GNU_STYLE
1421# ifdef RT_ARCH_AMD64
1422 __asm__ __volatile__("movq %0, %%cr0\n\t" :: "r" (uCR0));
1423# else
1424 __asm__ __volatile__("movl %0, %%cr0\n\t" :: "r" (uCR0));
1425# endif
1426# else
1427 __asm
1428 {
1429# ifdef RT_ARCH_AMD64
1430 mov rax, [uCR0]
1431 mov cr0, rax
1432# else
1433 mov eax, [uCR0]
1434 mov cr0, eax
1435# endif
1436 }
1437# endif
1438}
1439#endif
1440
1441
1442/**
1443 * Get cr2.
1444 * @returns cr2.
1445 */
1446#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1447DECLASM(RTCCUINTREG) ASMGetCR2(void);
1448#else
1449DECLINLINE(RTCCUINTREG) ASMGetCR2(void)
1450{
1451 RTCCUINTREG uCR2;
1452# if RT_INLINE_ASM_USES_INTRIN
1453 uCR2 = __readcr2();
1454
1455# elif RT_INLINE_ASM_GNU_STYLE
1456# ifdef RT_ARCH_AMD64
1457 __asm__ __volatile__("movq %%cr2, %0\t\n" : "=r" (uCR2));
1458# else
1459 __asm__ __volatile__("movl %%cr2, %0\t\n" : "=r" (uCR2));
1460# endif
1461# else
1462 __asm
1463 {
1464# ifdef RT_ARCH_AMD64
1465 mov rax, cr2
1466 mov [uCR2], rax
1467# else
1468 mov eax, cr2
1469 mov [uCR2], eax
1470# endif
1471 }
1472# endif
1473 return uCR2;
1474}
1475#endif
1476
1477
1478/**
1479 * Sets the CR2 register.
1480 * @param uCR2 The new CR0 value.
1481 */
1482#if RT_INLINE_ASM_EXTERNAL
1483DECLASM(void) ASMSetCR2(RTCCUINTREG uCR2);
1484#else
1485DECLINLINE(void) ASMSetCR2(RTCCUINTREG uCR2)
1486{
1487# if RT_INLINE_ASM_GNU_STYLE
1488# ifdef RT_ARCH_AMD64
1489 __asm__ __volatile__("movq %0, %%cr2\n\t" :: "r" (uCR2));
1490# else
1491 __asm__ __volatile__("movl %0, %%cr2\n\t" :: "r" (uCR2));
1492# endif
1493# else
1494 __asm
1495 {
1496# ifdef RT_ARCH_AMD64
1497 mov rax, [uCR2]
1498 mov cr2, rax
1499# else
1500 mov eax, [uCR2]
1501 mov cr2, eax
1502# endif
1503 }
1504# endif
1505}
1506#endif
1507
1508
1509/**
1510 * Get cr3.
1511 * @returns cr3.
1512 */
1513#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1514DECLASM(RTCCUINTREG) ASMGetCR3(void);
1515#else
1516DECLINLINE(RTCCUINTREG) ASMGetCR3(void)
1517{
1518 RTCCUINTREG uCR3;
1519# if RT_INLINE_ASM_USES_INTRIN
1520 uCR3 = __readcr3();
1521
1522# elif RT_INLINE_ASM_GNU_STYLE
1523# ifdef RT_ARCH_AMD64
1524 __asm__ __volatile__("movq %%cr3, %0\t\n" : "=r" (uCR3));
1525# else
1526 __asm__ __volatile__("movl %%cr3, %0\t\n" : "=r" (uCR3));
1527# endif
1528# else
1529 __asm
1530 {
1531# ifdef RT_ARCH_AMD64
1532 mov rax, cr3
1533 mov [uCR3], rax
1534# else
1535 mov eax, cr3
1536 mov [uCR3], eax
1537# endif
1538 }
1539# endif
1540 return uCR3;
1541}
1542#endif
1543
1544
1545/**
1546 * Sets the CR3 register.
1547 *
1548 * @param uCR3 New CR3 value.
1549 */
1550#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1551DECLASM(void) ASMSetCR3(RTCCUINTREG uCR3);
1552#else
1553DECLINLINE(void) ASMSetCR3(RTCCUINTREG uCR3)
1554{
1555# if RT_INLINE_ASM_USES_INTRIN
1556 __writecr3(uCR3);
1557
1558# elif RT_INLINE_ASM_GNU_STYLE
1559# ifdef RT_ARCH_AMD64
1560 __asm__ __volatile__("movq %0, %%cr3\n\t" : : "r" (uCR3));
1561# else
1562 __asm__ __volatile__("movl %0, %%cr3\n\t" : : "r" (uCR3));
1563# endif
1564# else
1565 __asm
1566 {
1567# ifdef RT_ARCH_AMD64
1568 mov rax, [uCR3]
1569 mov cr3, rax
1570# else
1571 mov eax, [uCR3]
1572 mov cr3, eax
1573# endif
1574 }
1575# endif
1576}
1577#endif
1578
1579
1580/**
1581 * Reloads the CR3 register.
1582 */
1583#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1584DECLASM(void) ASMReloadCR3(void);
1585#else
1586DECLINLINE(void) ASMReloadCR3(void)
1587{
1588# if RT_INLINE_ASM_USES_INTRIN
1589 __writecr3(__readcr3());
1590
1591# elif RT_INLINE_ASM_GNU_STYLE
1592 RTCCUINTREG u;
1593# ifdef RT_ARCH_AMD64
1594 __asm__ __volatile__("movq %%cr3, %0\n\t"
1595 "movq %0, %%cr3\n\t"
1596 : "=r" (u));
1597# else
1598 __asm__ __volatile__("movl %%cr3, %0\n\t"
1599 "movl %0, %%cr3\n\t"
1600 : "=r" (u));
1601# endif
1602# else
1603 __asm
1604 {
1605# ifdef RT_ARCH_AMD64
1606 mov rax, cr3
1607 mov cr3, rax
1608# else
1609 mov eax, cr3
1610 mov cr3, eax
1611# endif
1612 }
1613# endif
1614}
1615#endif
1616
1617
1618/**
1619 * Get cr4.
1620 * @returns cr4.
1621 */
1622#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1623DECLASM(RTCCUINTREG) ASMGetCR4(void);
1624#else
1625DECLINLINE(RTCCUINTREG) ASMGetCR4(void)
1626{
1627 RTCCUINTREG uCR4;
1628# if RT_INLINE_ASM_USES_INTRIN
1629 uCR4 = __readcr4();
1630
1631# elif RT_INLINE_ASM_GNU_STYLE
1632# ifdef RT_ARCH_AMD64
1633 __asm__ __volatile__("movq %%cr4, %0\t\n" : "=r" (uCR4));
1634# else
1635 __asm__ __volatile__("movl %%cr4, %0\t\n" : "=r" (uCR4));
1636# endif
1637# else
1638 __asm
1639 {
1640# ifdef RT_ARCH_AMD64
1641 mov rax, cr4
1642 mov [uCR4], rax
1643# else
1644 push eax /* just in case */
1645 /*mov eax, cr4*/
1646 _emit 0x0f
1647 _emit 0x20
1648 _emit 0xe0
1649 mov [uCR4], eax
1650 pop eax
1651# endif
1652 }
1653# endif
1654 return uCR4;
1655}
1656#endif
1657
1658
1659/**
1660 * Sets the CR4 register.
1661 *
1662 * @param uCR4 New CR4 value.
1663 */
1664#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1665DECLASM(void) ASMSetCR4(RTCCUINTREG uCR4);
1666#else
1667DECLINLINE(void) ASMSetCR4(RTCCUINTREG uCR4)
1668{
1669# if RT_INLINE_ASM_USES_INTRIN
1670 __writecr4(uCR4);
1671
1672# elif RT_INLINE_ASM_GNU_STYLE
1673# ifdef RT_ARCH_AMD64
1674 __asm__ __volatile__("movq %0, %%cr4\n\t" : : "r" (uCR4));
1675# else
1676 __asm__ __volatile__("movl %0, %%cr4\n\t" : : "r" (uCR4));
1677# endif
1678# else
1679 __asm
1680 {
1681# ifdef RT_ARCH_AMD64
1682 mov rax, [uCR4]
1683 mov cr4, rax
1684# else
1685 mov eax, [uCR4]
1686 _emit 0x0F
1687 _emit 0x22
1688 _emit 0xE0 /* mov cr4, eax */
1689# endif
1690 }
1691# endif
1692}
1693#endif
1694
1695
1696/**
1697 * Get cr8.
1698 * @returns cr8.
1699 * @remark The lock prefix hack for access from non-64-bit modes is NOT used and 0 is returned.
1700 */
1701#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1702DECLASM(RTCCUINTREG) ASMGetCR8(void);
1703#else
1704DECLINLINE(RTCCUINTREG) ASMGetCR8(void)
1705{
1706# ifdef RT_ARCH_AMD64
1707 RTCCUINTREG uCR8;
1708# if RT_INLINE_ASM_USES_INTRIN
1709 uCR8 = __readcr8();
1710
1711# elif RT_INLINE_ASM_GNU_STYLE
1712 __asm__ __volatile__("movq %%cr8, %0\t\n" : "=r" (uCR8));
1713# else
1714 __asm
1715 {
1716 mov rax, cr8
1717 mov [uCR8], rax
1718 }
1719# endif
1720 return uCR8;
1721# else /* !RT_ARCH_AMD64 */
1722 return 0;
1723# endif /* !RT_ARCH_AMD64 */
1724}
1725#endif
1726
1727
1728/**
1729 * Get XCR0 (eXtended feature Control Register 0).
1730 * @returns xcr0.
1731 */
1732DECLASM(uint64_t) ASMGetXcr0(void);
1733
1734/**
1735 * Sets the XCR0 register.
1736 * @param uXcr0 The new XCR0 value.
1737 */
1738DECLASM(void) ASMSetXcr0(uint64_t uXcr0);
1739
1740struct X86XSAVEAREA;
1741/**
1742 * Save extended CPU state.
1743 * @param pXStateArea Where to save the state.
1744 * @param fComponents Which state components to save.
1745 */
1746DECLASM(void) ASMXSave(struct X86XSAVEAREA *pXStateArea, uint64_t fComponents);
1747
1748/**
1749 * Loads extended CPU state.
1750 * @param pXStateArea Where to load the state from.
1751 * @param fComponents Which state components to load.
1752 */
1753DECLASM(void) ASMXRstor(struct X86XSAVEAREA const *pXStateArea, uint64_t fComponents);
1754
1755
1756/**
1757 * Enables interrupts (EFLAGS.IF).
1758 */
1759#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1760DECLASM(void) ASMIntEnable(void);
1761#else
1762DECLINLINE(void) ASMIntEnable(void)
1763{
1764# if RT_INLINE_ASM_GNU_STYLE
1765 __asm("sti\n");
1766# elif RT_INLINE_ASM_USES_INTRIN
1767 _enable();
1768# else
1769 __asm sti
1770# endif
1771}
1772#endif
1773
1774
1775/**
1776 * Disables interrupts (!EFLAGS.IF).
1777 */
1778#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1779DECLASM(void) ASMIntDisable(void);
1780#else
1781DECLINLINE(void) ASMIntDisable(void)
1782{
1783# if RT_INLINE_ASM_GNU_STYLE
1784 __asm("cli\n");
1785# elif RT_INLINE_ASM_USES_INTRIN
1786 _disable();
1787# else
1788 __asm cli
1789# endif
1790}
1791#endif
1792
1793
1794/**
1795 * Disables interrupts and returns previous xFLAGS.
1796 */
1797#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1798DECLASM(RTCCUINTREG) ASMIntDisableFlags(void);
1799#else
1800DECLINLINE(RTCCUINTREG) ASMIntDisableFlags(void)
1801{
1802 RTCCUINTREG xFlags;
1803# if RT_INLINE_ASM_GNU_STYLE
1804# ifdef RT_ARCH_AMD64
1805 __asm__ __volatile__("pushfq\n\t"
1806 "cli\n\t"
1807 "popq %0\n\t"
1808 : "=r" (xFlags));
1809# else
1810 __asm__ __volatile__("pushfl\n\t"
1811 "cli\n\t"
1812 "popl %0\n\t"
1813 : "=r" (xFlags));
1814# endif
1815# elif RT_INLINE_ASM_USES_INTRIN && !defined(RT_ARCH_X86)
1816 xFlags = ASMGetFlags();
1817 _disable();
1818# else
1819 __asm {
1820 pushfd
1821 cli
1822 pop [xFlags]
1823 }
1824# endif
1825 return xFlags;
1826}
1827#endif
1828
1829
1830/**
1831 * Are interrupts enabled?
1832 *
1833 * @returns true / false.
1834 */
1835DECLINLINE(bool) ASMIntAreEnabled(void)
1836{
1837 RTCCUINTREG uFlags = ASMGetFlags();
1838 return uFlags & 0x200 /* X86_EFL_IF */ ? true : false;
1839}
1840
1841
1842/**
1843 * Halts the CPU until interrupted.
1844 */
1845#if RT_INLINE_ASM_EXTERNAL
1846DECLASM(void) ASMHalt(void);
1847#else
1848DECLINLINE(void) ASMHalt(void)
1849{
1850# if RT_INLINE_ASM_GNU_STYLE
1851 __asm__ __volatile__("hlt\n\t");
1852# else
1853 __asm {
1854 hlt
1855 }
1856# endif
1857}
1858#endif
1859
1860
1861/**
1862 * Reads a machine specific register.
1863 *
1864 * @returns Register content.
1865 * @param uRegister Register to read.
1866 */
1867#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1868DECLASM(uint64_t) ASMRdMsr(uint32_t uRegister);
1869#else
1870DECLINLINE(uint64_t) ASMRdMsr(uint32_t uRegister)
1871{
1872 RTUINT64U u;
1873# if RT_INLINE_ASM_GNU_STYLE
1874 __asm__ __volatile__("rdmsr\n\t"
1875 : "=a" (u.s.Lo),
1876 "=d" (u.s.Hi)
1877 : "c" (uRegister));
1878
1879# elif RT_INLINE_ASM_USES_INTRIN
1880 u.u = __readmsr(uRegister);
1881
1882# else
1883 __asm
1884 {
1885 mov ecx, [uRegister]
1886 rdmsr
1887 mov [u.s.Lo], eax
1888 mov [u.s.Hi], edx
1889 }
1890# endif
1891
1892 return u.u;
1893}
1894#endif
1895
1896
1897/**
1898 * Writes a machine specific register.
1899 *
1900 * @returns Register content.
1901 * @param uRegister Register to write to.
1902 * @param u64Val Value to write.
1903 */
1904#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1905DECLASM(void) ASMWrMsr(uint32_t uRegister, uint64_t u64Val);
1906#else
1907DECLINLINE(void) ASMWrMsr(uint32_t uRegister, uint64_t u64Val)
1908{
1909 RTUINT64U u;
1910
1911 u.u = u64Val;
1912# if RT_INLINE_ASM_GNU_STYLE
1913 __asm__ __volatile__("wrmsr\n\t"
1914 ::"a" (u.s.Lo),
1915 "d" (u.s.Hi),
1916 "c" (uRegister));
1917
1918# elif RT_INLINE_ASM_USES_INTRIN
1919 __writemsr(uRegister, u.u);
1920
1921# else
1922 __asm
1923 {
1924 mov ecx, [uRegister]
1925 mov edx, [u.s.Hi]
1926 mov eax, [u.s.Lo]
1927 wrmsr
1928 }
1929# endif
1930}
1931#endif
1932
1933
1934/**
1935 * Reads a machine specific register, extended version (for AMD).
1936 *
1937 * @returns Register content.
1938 * @param uRegister Register to read.
1939 * @param uXDI RDI/EDI value.
1940 */
1941#if RT_INLINE_ASM_EXTERNAL
1942DECLASM(uint64_t) ASMRdMsrEx(uint32_t uRegister, RTCCUINTREG uXDI);
1943#else
1944DECLINLINE(uint64_t) ASMRdMsrEx(uint32_t uRegister, RTCCUINTREG uXDI)
1945{
1946 RTUINT64U u;
1947# if RT_INLINE_ASM_GNU_STYLE
1948 __asm__ __volatile__("rdmsr\n\t"
1949 : "=a" (u.s.Lo),
1950 "=d" (u.s.Hi)
1951 : "c" (uRegister),
1952 "D" (uXDI));
1953
1954# else
1955 __asm
1956 {
1957 mov ecx, [uRegister]
1958 xchg edi, [uXDI]
1959 rdmsr
1960 mov [u.s.Lo], eax
1961 mov [u.s.Hi], edx
1962 xchg edi, [uXDI]
1963 }
1964# endif
1965
1966 return u.u;
1967}
1968#endif
1969
1970
1971/**
1972 * Writes a machine specific register, extended version (for AMD).
1973 *
1974 * @returns Register content.
1975 * @param uRegister Register to write to.
1976 * @param uXDI RDI/EDI value.
1977 * @param u64Val Value to write.
1978 */
1979#if RT_INLINE_ASM_EXTERNAL
1980DECLASM(void) ASMWrMsrEx(uint32_t uRegister, RTCCUINTREG uXDI, uint64_t u64Val);
1981#else
1982DECLINLINE(void) ASMWrMsrEx(uint32_t uRegister, RTCCUINTREG uXDI, uint64_t u64Val)
1983{
1984 RTUINT64U u;
1985
1986 u.u = u64Val;
1987# if RT_INLINE_ASM_GNU_STYLE
1988 __asm__ __volatile__("wrmsr\n\t"
1989 ::"a" (u.s.Lo),
1990 "d" (u.s.Hi),
1991 "c" (uRegister),
1992 "D" (uXDI));
1993
1994# else
1995 __asm
1996 {
1997 mov ecx, [uRegister]
1998 xchg edi, [uXDI]
1999 mov edx, [u.s.Hi]
2000 mov eax, [u.s.Lo]
2001 wrmsr
2002 xchg edi, [uXDI]
2003 }
2004# endif
2005}
2006#endif
2007
2008
2009
2010/**
2011 * Reads low part of a machine specific register.
2012 *
2013 * @returns Register content.
2014 * @param uRegister Register to read.
2015 */
2016#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2017DECLASM(uint32_t) ASMRdMsr_Low(uint32_t uRegister);
2018#else
2019DECLINLINE(uint32_t) ASMRdMsr_Low(uint32_t uRegister)
2020{
2021 uint32_t u32;
2022# if RT_INLINE_ASM_GNU_STYLE
2023 __asm__ __volatile__("rdmsr\n\t"
2024 : "=a" (u32)
2025 : "c" (uRegister)
2026 : "edx");
2027
2028# elif RT_INLINE_ASM_USES_INTRIN
2029 u32 = (uint32_t)__readmsr(uRegister);
2030
2031#else
2032 __asm
2033 {
2034 mov ecx, [uRegister]
2035 rdmsr
2036 mov [u32], eax
2037 }
2038# endif
2039
2040 return u32;
2041}
2042#endif
2043
2044
2045/**
2046 * Reads high part of a machine specific register.
2047 *
2048 * @returns Register content.
2049 * @param uRegister Register to read.
2050 */
2051#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2052DECLASM(uint32_t) ASMRdMsr_High(uint32_t uRegister);
2053#else
2054DECLINLINE(uint32_t) ASMRdMsr_High(uint32_t uRegister)
2055{
2056 uint32_t u32;
2057# if RT_INLINE_ASM_GNU_STYLE
2058 __asm__ __volatile__("rdmsr\n\t"
2059 : "=d" (u32)
2060 : "c" (uRegister)
2061 : "eax");
2062
2063# elif RT_INLINE_ASM_USES_INTRIN
2064 u32 = (uint32_t)(__readmsr(uRegister) >> 32);
2065
2066# else
2067 __asm
2068 {
2069 mov ecx, [uRegister]
2070 rdmsr
2071 mov [u32], edx
2072 }
2073# endif
2074
2075 return u32;
2076}
2077#endif
2078
2079
2080/**
2081 * Gets dr0.
2082 *
2083 * @returns dr0.
2084 */
2085#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2086DECLASM(RTCCUINTREG) ASMGetDR0(void);
2087#else
2088DECLINLINE(RTCCUINTREG) ASMGetDR0(void)
2089{
2090 RTCCUINTREG uDR0;
2091# if RT_INLINE_ASM_USES_INTRIN
2092 uDR0 = __readdr(0);
2093# elif RT_INLINE_ASM_GNU_STYLE
2094# ifdef RT_ARCH_AMD64
2095 __asm__ __volatile__("movq %%dr0, %0\n\t" : "=r" (uDR0));
2096# else
2097 __asm__ __volatile__("movl %%dr0, %0\n\t" : "=r" (uDR0));
2098# endif
2099# else
2100 __asm
2101 {
2102# ifdef RT_ARCH_AMD64
2103 mov rax, dr0
2104 mov [uDR0], rax
2105# else
2106 mov eax, dr0
2107 mov [uDR0], eax
2108# endif
2109 }
2110# endif
2111 return uDR0;
2112}
2113#endif
2114
2115
2116/**
2117 * Gets dr1.
2118 *
2119 * @returns dr1.
2120 */
2121#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2122DECLASM(RTCCUINTREG) ASMGetDR1(void);
2123#else
2124DECLINLINE(RTCCUINTREG) ASMGetDR1(void)
2125{
2126 RTCCUINTREG uDR1;
2127# if RT_INLINE_ASM_USES_INTRIN
2128 uDR1 = __readdr(1);
2129# elif RT_INLINE_ASM_GNU_STYLE
2130# ifdef RT_ARCH_AMD64
2131 __asm__ __volatile__("movq %%dr1, %0\n\t" : "=r" (uDR1));
2132# else
2133 __asm__ __volatile__("movl %%dr1, %0\n\t" : "=r" (uDR1));
2134# endif
2135# else
2136 __asm
2137 {
2138# ifdef RT_ARCH_AMD64
2139 mov rax, dr1
2140 mov [uDR1], rax
2141# else
2142 mov eax, dr1
2143 mov [uDR1], eax
2144# endif
2145 }
2146# endif
2147 return uDR1;
2148}
2149#endif
2150
2151
2152/**
2153 * Gets dr2.
2154 *
2155 * @returns dr2.
2156 */
2157#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2158DECLASM(RTCCUINTREG) ASMGetDR2(void);
2159#else
2160DECLINLINE(RTCCUINTREG) ASMGetDR2(void)
2161{
2162 RTCCUINTREG uDR2;
2163# if RT_INLINE_ASM_USES_INTRIN
2164 uDR2 = __readdr(2);
2165# elif RT_INLINE_ASM_GNU_STYLE
2166# ifdef RT_ARCH_AMD64
2167 __asm__ __volatile__("movq %%dr2, %0\n\t" : "=r" (uDR2));
2168# else
2169 __asm__ __volatile__("movl %%dr2, %0\n\t" : "=r" (uDR2));
2170# endif
2171# else
2172 __asm
2173 {
2174# ifdef RT_ARCH_AMD64
2175 mov rax, dr2
2176 mov [uDR2], rax
2177# else
2178 mov eax, dr2
2179 mov [uDR2], eax
2180# endif
2181 }
2182# endif
2183 return uDR2;
2184}
2185#endif
2186
2187
2188/**
2189 * Gets dr3.
2190 *
2191 * @returns dr3.
2192 */
2193#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2194DECLASM(RTCCUINTREG) ASMGetDR3(void);
2195#else
2196DECLINLINE(RTCCUINTREG) ASMGetDR3(void)
2197{
2198 RTCCUINTREG uDR3;
2199# if RT_INLINE_ASM_USES_INTRIN
2200 uDR3 = __readdr(3);
2201# elif RT_INLINE_ASM_GNU_STYLE
2202# ifdef RT_ARCH_AMD64
2203 __asm__ __volatile__("movq %%dr3, %0\n\t" : "=r" (uDR3));
2204# else
2205 __asm__ __volatile__("movl %%dr3, %0\n\t" : "=r" (uDR3));
2206# endif
2207# else
2208 __asm
2209 {
2210# ifdef RT_ARCH_AMD64
2211 mov rax, dr3
2212 mov [uDR3], rax
2213# else
2214 mov eax, dr3
2215 mov [uDR3], eax
2216# endif
2217 }
2218# endif
2219 return uDR3;
2220}
2221#endif
2222
2223
2224/**
2225 * Gets dr6.
2226 *
2227 * @returns dr6.
2228 */
2229#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2230DECLASM(RTCCUINTREG) ASMGetDR6(void);
2231#else
2232DECLINLINE(RTCCUINTREG) ASMGetDR6(void)
2233{
2234 RTCCUINTREG uDR6;
2235# if RT_INLINE_ASM_USES_INTRIN
2236 uDR6 = __readdr(6);
2237# elif RT_INLINE_ASM_GNU_STYLE
2238# ifdef RT_ARCH_AMD64
2239 __asm__ __volatile__("movq %%dr6, %0\n\t" : "=r" (uDR6));
2240# else
2241 __asm__ __volatile__("movl %%dr6, %0\n\t" : "=r" (uDR6));
2242# endif
2243# else
2244 __asm
2245 {
2246# ifdef RT_ARCH_AMD64
2247 mov rax, dr6
2248 mov [uDR6], rax
2249# else
2250 mov eax, dr6
2251 mov [uDR6], eax
2252# endif
2253 }
2254# endif
2255 return uDR6;
2256}
2257#endif
2258
2259
2260/**
2261 * Reads and clears DR6.
2262 *
2263 * @returns DR6.
2264 */
2265#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2266DECLASM(RTCCUINTREG) ASMGetAndClearDR6(void);
2267#else
2268DECLINLINE(RTCCUINTREG) ASMGetAndClearDR6(void)
2269{
2270 RTCCUINTREG uDR6;
2271# if RT_INLINE_ASM_USES_INTRIN
2272 uDR6 = __readdr(6);
2273 __writedr(6, 0xffff0ff0U); /* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
2274# elif RT_INLINE_ASM_GNU_STYLE
2275 RTCCUINTREG uNewValue = 0xffff0ff0U;/* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
2276# ifdef RT_ARCH_AMD64
2277 __asm__ __volatile__("movq %%dr6, %0\n\t"
2278 "movq %1, %%dr6\n\t"
2279 : "=r" (uDR6)
2280 : "r" (uNewValue));
2281# else
2282 __asm__ __volatile__("movl %%dr6, %0\n\t"
2283 "movl %1, %%dr6\n\t"
2284 : "=r" (uDR6)
2285 : "r" (uNewValue));
2286# endif
2287# else
2288 __asm
2289 {
2290# ifdef RT_ARCH_AMD64
2291 mov rax, dr6
2292 mov [uDR6], rax
2293 mov rcx, rax
2294 mov ecx, 0ffff0ff0h; /* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
2295 mov dr6, rcx
2296# else
2297 mov eax, dr6
2298 mov [uDR6], eax
2299 mov ecx, 0ffff0ff0h; /* 31-16 and 4-11 are 1's, 12 is zero. */
2300 mov dr6, ecx
2301# endif
2302 }
2303# endif
2304 return uDR6;
2305}
2306#endif
2307
2308
2309/**
2310 * Gets dr7.
2311 *
2312 * @returns dr7.
2313 */
2314#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2315DECLASM(RTCCUINTREG) ASMGetDR7(void);
2316#else
2317DECLINLINE(RTCCUINTREG) ASMGetDR7(void)
2318{
2319 RTCCUINTREG uDR7;
2320# if RT_INLINE_ASM_USES_INTRIN
2321 uDR7 = __readdr(7);
2322# elif RT_INLINE_ASM_GNU_STYLE
2323# ifdef RT_ARCH_AMD64
2324 __asm__ __volatile__("movq %%dr7, %0\n\t" : "=r" (uDR7));
2325# else
2326 __asm__ __volatile__("movl %%dr7, %0\n\t" : "=r" (uDR7));
2327# endif
2328# else
2329 __asm
2330 {
2331# ifdef RT_ARCH_AMD64
2332 mov rax, dr7
2333 mov [uDR7], rax
2334# else
2335 mov eax, dr7
2336 mov [uDR7], eax
2337# endif
2338 }
2339# endif
2340 return uDR7;
2341}
2342#endif
2343
2344
2345/**
2346 * Sets dr0.
2347 *
2348 * @param uDRVal Debug register value to write
2349 */
2350#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2351DECLASM(void) ASMSetDR0(RTCCUINTREG uDRVal);
2352#else
2353DECLINLINE(void) ASMSetDR0(RTCCUINTREG uDRVal)
2354{
2355# if RT_INLINE_ASM_USES_INTRIN
2356 __writedr(0, uDRVal);
2357# elif RT_INLINE_ASM_GNU_STYLE
2358# ifdef RT_ARCH_AMD64
2359 __asm__ __volatile__("movq %0, %%dr0\n\t" : : "r" (uDRVal));
2360# else
2361 __asm__ __volatile__("movl %0, %%dr0\n\t" : : "r" (uDRVal));
2362# endif
2363# else
2364 __asm
2365 {
2366# ifdef RT_ARCH_AMD64
2367 mov rax, [uDRVal]
2368 mov dr0, rax
2369# else
2370 mov eax, [uDRVal]
2371 mov dr0, eax
2372# endif
2373 }
2374# endif
2375}
2376#endif
2377
2378
2379/**
2380 * Sets dr1.
2381 *
2382 * @param uDRVal Debug register value to write
2383 */
2384#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2385DECLASM(void) ASMSetDR1(RTCCUINTREG uDRVal);
2386#else
2387DECLINLINE(void) ASMSetDR1(RTCCUINTREG uDRVal)
2388{
2389# if RT_INLINE_ASM_USES_INTRIN
2390 __writedr(1, uDRVal);
2391# elif RT_INLINE_ASM_GNU_STYLE
2392# ifdef RT_ARCH_AMD64
2393 __asm__ __volatile__("movq %0, %%dr1\n\t" : : "r" (uDRVal));
2394# else
2395 __asm__ __volatile__("movl %0, %%dr1\n\t" : : "r" (uDRVal));
2396# endif
2397# else
2398 __asm
2399 {
2400# ifdef RT_ARCH_AMD64
2401 mov rax, [uDRVal]
2402 mov dr1, rax
2403# else
2404 mov eax, [uDRVal]
2405 mov dr1, eax
2406# endif
2407 }
2408# endif
2409}
2410#endif
2411
2412
2413/**
2414 * Sets dr2.
2415 *
2416 * @param uDRVal Debug register value to write
2417 */
2418#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2419DECLASM(void) ASMSetDR2(RTCCUINTREG uDRVal);
2420#else
2421DECLINLINE(void) ASMSetDR2(RTCCUINTREG uDRVal)
2422{
2423# if RT_INLINE_ASM_USES_INTRIN
2424 __writedr(2, uDRVal);
2425# elif RT_INLINE_ASM_GNU_STYLE
2426# ifdef RT_ARCH_AMD64
2427 __asm__ __volatile__("movq %0, %%dr2\n\t" : : "r" (uDRVal));
2428# else
2429 __asm__ __volatile__("movl %0, %%dr2\n\t" : : "r" (uDRVal));
2430# endif
2431# else
2432 __asm
2433 {
2434# ifdef RT_ARCH_AMD64
2435 mov rax, [uDRVal]
2436 mov dr2, rax
2437# else
2438 mov eax, [uDRVal]
2439 mov dr2, eax
2440# endif
2441 }
2442# endif
2443}
2444#endif
2445
2446
2447/**
2448 * Sets dr3.
2449 *
2450 * @param uDRVal Debug register value to write
2451 */
2452#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2453DECLASM(void) ASMSetDR3(RTCCUINTREG uDRVal);
2454#else
2455DECLINLINE(void) ASMSetDR3(RTCCUINTREG uDRVal)
2456{
2457# if RT_INLINE_ASM_USES_INTRIN
2458 __writedr(3, uDRVal);
2459# elif RT_INLINE_ASM_GNU_STYLE
2460# ifdef RT_ARCH_AMD64
2461 __asm__ __volatile__("movq %0, %%dr3\n\t" : : "r" (uDRVal));
2462# else
2463 __asm__ __volatile__("movl %0, %%dr3\n\t" : : "r" (uDRVal));
2464# endif
2465# else
2466 __asm
2467 {
2468# ifdef RT_ARCH_AMD64
2469 mov rax, [uDRVal]
2470 mov dr3, rax
2471# else
2472 mov eax, [uDRVal]
2473 mov dr3, eax
2474# endif
2475 }
2476# endif
2477}
2478#endif
2479
2480
2481/**
2482 * Sets dr6.
2483 *
2484 * @param uDRVal Debug register value to write
2485 */
2486#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2487DECLASM(void) ASMSetDR6(RTCCUINTREG uDRVal);
2488#else
2489DECLINLINE(void) ASMSetDR6(RTCCUINTREG uDRVal)
2490{
2491# if RT_INLINE_ASM_USES_INTRIN
2492 __writedr(6, uDRVal);
2493# elif RT_INLINE_ASM_GNU_STYLE
2494# ifdef RT_ARCH_AMD64
2495 __asm__ __volatile__("movq %0, %%dr6\n\t" : : "r" (uDRVal));
2496# else
2497 __asm__ __volatile__("movl %0, %%dr6\n\t" : : "r" (uDRVal));
2498# endif
2499# else
2500 __asm
2501 {
2502# ifdef RT_ARCH_AMD64
2503 mov rax, [uDRVal]
2504 mov dr6, rax
2505# else
2506 mov eax, [uDRVal]
2507 mov dr6, eax
2508# endif
2509 }
2510# endif
2511}
2512#endif
2513
2514
2515/**
2516 * Sets dr7.
2517 *
2518 * @param uDRVal Debug register value to write
2519 */
2520#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2521DECLASM(void) ASMSetDR7(RTCCUINTREG uDRVal);
2522#else
2523DECLINLINE(void) ASMSetDR7(RTCCUINTREG uDRVal)
2524{
2525# if RT_INLINE_ASM_USES_INTRIN
2526 __writedr(7, uDRVal);
2527# elif RT_INLINE_ASM_GNU_STYLE
2528# ifdef RT_ARCH_AMD64
2529 __asm__ __volatile__("movq %0, %%dr7\n\t" : : "r" (uDRVal));
2530# else
2531 __asm__ __volatile__("movl %0, %%dr7\n\t" : : "r" (uDRVal));
2532# endif
2533# else
2534 __asm
2535 {
2536# ifdef RT_ARCH_AMD64
2537 mov rax, [uDRVal]
2538 mov dr7, rax
2539# else
2540 mov eax, [uDRVal]
2541 mov dr7, eax
2542# endif
2543 }
2544# endif
2545}
2546#endif
2547
2548
2549/**
2550 * Writes a 8-bit unsigned integer to an I/O port, ordered.
2551 *
2552 * @param Port I/O port to write to.
2553 * @param u8 8-bit integer to write.
2554 */
2555#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2556DECLASM(void) ASMOutU8(RTIOPORT Port, uint8_t u8);
2557#else
2558DECLINLINE(void) ASMOutU8(RTIOPORT Port, uint8_t u8)
2559{
2560# if RT_INLINE_ASM_GNU_STYLE
2561 __asm__ __volatile__("outb %b1, %w0\n\t"
2562 :: "Nd" (Port),
2563 "a" (u8));
2564
2565# elif RT_INLINE_ASM_USES_INTRIN
2566 __outbyte(Port, u8);
2567
2568# else
2569 __asm
2570 {
2571 mov dx, [Port]
2572 mov al, [u8]
2573 out dx, al
2574 }
2575# endif
2576}
2577#endif
2578
2579
2580/**
2581 * Reads a 8-bit unsigned integer from an I/O port, ordered.
2582 *
2583 * @returns 8-bit integer.
2584 * @param Port I/O port to read from.
2585 */
2586#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2587DECLASM(uint8_t) ASMInU8(RTIOPORT Port);
2588#else
2589DECLINLINE(uint8_t) ASMInU8(RTIOPORT Port)
2590{
2591 uint8_t u8;
2592# if RT_INLINE_ASM_GNU_STYLE
2593 __asm__ __volatile__("inb %w1, %b0\n\t"
2594 : "=a" (u8)
2595 : "Nd" (Port));
2596
2597# elif RT_INLINE_ASM_USES_INTRIN
2598 u8 = __inbyte(Port);
2599
2600# else
2601 __asm
2602 {
2603 mov dx, [Port]
2604 in al, dx
2605 mov [u8], al
2606 }
2607# endif
2608 return u8;
2609}
2610#endif
2611
2612
2613/**
2614 * Writes a 16-bit unsigned integer to an I/O port, ordered.
2615 *
2616 * @param Port I/O port to write to.
2617 * @param u16 16-bit integer to write.
2618 */
2619#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2620DECLASM(void) ASMOutU16(RTIOPORT Port, uint16_t u16);
2621#else
2622DECLINLINE(void) ASMOutU16(RTIOPORT Port, uint16_t u16)
2623{
2624# if RT_INLINE_ASM_GNU_STYLE
2625 __asm__ __volatile__("outw %w1, %w0\n\t"
2626 :: "Nd" (Port),
2627 "a" (u16));
2628
2629# elif RT_INLINE_ASM_USES_INTRIN
2630 __outword(Port, u16);
2631
2632# else
2633 __asm
2634 {
2635 mov dx, [Port]
2636 mov ax, [u16]
2637 out dx, ax
2638 }
2639# endif
2640}
2641#endif
2642
2643
2644/**
2645 * Reads a 16-bit unsigned integer from an I/O port, ordered.
2646 *
2647 * @returns 16-bit integer.
2648 * @param Port I/O port to read from.
2649 */
2650#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2651DECLASM(uint16_t) ASMInU16(RTIOPORT Port);
2652#else
2653DECLINLINE(uint16_t) ASMInU16(RTIOPORT Port)
2654{
2655 uint16_t u16;
2656# if RT_INLINE_ASM_GNU_STYLE
2657 __asm__ __volatile__("inw %w1, %w0\n\t"
2658 : "=a" (u16)
2659 : "Nd" (Port));
2660
2661# elif RT_INLINE_ASM_USES_INTRIN
2662 u16 = __inword(Port);
2663
2664# else
2665 __asm
2666 {
2667 mov dx, [Port]
2668 in ax, dx
2669 mov [u16], ax
2670 }
2671# endif
2672 return u16;
2673}
2674#endif
2675
2676
2677/**
2678 * Writes a 32-bit unsigned integer to an I/O port, ordered.
2679 *
2680 * @param Port I/O port to write to.
2681 * @param u32 32-bit integer to write.
2682 */
2683#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2684DECLASM(void) ASMOutU32(RTIOPORT Port, uint32_t u32);
2685#else
2686DECLINLINE(void) ASMOutU32(RTIOPORT Port, uint32_t u32)
2687{
2688# if RT_INLINE_ASM_GNU_STYLE
2689 __asm__ __volatile__("outl %1, %w0\n\t"
2690 :: "Nd" (Port),
2691 "a" (u32));
2692
2693# elif RT_INLINE_ASM_USES_INTRIN
2694 __outdword(Port, u32);
2695
2696# else
2697 __asm
2698 {
2699 mov dx, [Port]
2700 mov eax, [u32]
2701 out dx, eax
2702 }
2703# endif
2704}
2705#endif
2706
2707
2708/**
2709 * Reads a 32-bit unsigned integer from an I/O port, ordered.
2710 *
2711 * @returns 32-bit integer.
2712 * @param Port I/O port to read from.
2713 */
2714#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2715DECLASM(uint32_t) ASMInU32(RTIOPORT Port);
2716#else
2717DECLINLINE(uint32_t) ASMInU32(RTIOPORT Port)
2718{
2719 uint32_t u32;
2720# if RT_INLINE_ASM_GNU_STYLE
2721 __asm__ __volatile__("inl %w1, %0\n\t"
2722 : "=a" (u32)
2723 : "Nd" (Port));
2724
2725# elif RT_INLINE_ASM_USES_INTRIN
2726 u32 = __indword(Port);
2727
2728# else
2729 __asm
2730 {
2731 mov dx, [Port]
2732 in eax, dx
2733 mov [u32], eax
2734 }
2735# endif
2736 return u32;
2737}
2738#endif
2739
2740
2741/**
2742 * Writes a string of 8-bit unsigned integer items to an I/O port, ordered.
2743 *
2744 * @param Port I/O port to write to.
2745 * @param pau8 Pointer to the string buffer.
2746 * @param c The number of items to write.
2747 */
2748#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2749DECLASM(void) ASMOutStrU8(RTIOPORT Port, uint8_t const *pau8, size_t c);
2750#else
2751DECLINLINE(void) ASMOutStrU8(RTIOPORT Port, uint8_t const *pau8, size_t c)
2752{
2753# if RT_INLINE_ASM_GNU_STYLE
2754 __asm__ __volatile__("rep; outsb\n\t"
2755 : "+S" (pau8),
2756 "+c" (c)
2757 : "d" (Port));
2758
2759# elif RT_INLINE_ASM_USES_INTRIN
2760 __outbytestring(Port, (unsigned char *)pau8, (unsigned long)c);
2761
2762# else
2763 __asm
2764 {
2765 mov dx, [Port]
2766 mov ecx, [c]
2767 mov eax, [pau8]
2768 xchg esi, eax
2769 rep outsb
2770 xchg esi, eax
2771 }
2772# endif
2773}
2774#endif
2775
2776
2777/**
2778 * Reads a string of 8-bit unsigned integer items from an I/O port, ordered.
2779 *
2780 * @param Port I/O port to read from.
2781 * @param pau8 Pointer to the string buffer (output).
2782 * @param c The number of items to read.
2783 */
2784#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2785DECLASM(void) ASMInStrU8(RTIOPORT Port, uint8_t *pau8, size_t c);
2786#else
2787DECLINLINE(void) ASMInStrU8(RTIOPORT Port, uint8_t *pau8, size_t c)
2788{
2789# if RT_INLINE_ASM_GNU_STYLE
2790 __asm__ __volatile__("rep; insb\n\t"
2791 : "+D" (pau8),
2792 "+c" (c)
2793 : "d" (Port));
2794
2795# elif RT_INLINE_ASM_USES_INTRIN
2796 __inbytestring(Port, pau8, (unsigned long)c);
2797
2798# else
2799 __asm
2800 {
2801 mov dx, [Port]
2802 mov ecx, [c]
2803 mov eax, [pau8]
2804 xchg edi, eax
2805 rep insb
2806 xchg edi, eax
2807 }
2808# endif
2809}
2810#endif
2811
2812
2813/**
2814 * Writes a string of 16-bit unsigned integer items to an I/O port, ordered.
2815 *
2816 * @param Port I/O port to write to.
2817 * @param pau16 Pointer to the string buffer.
2818 * @param c The number of items to write.
2819 */
2820#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2821DECLASM(void) ASMOutStrU16(RTIOPORT Port, uint16_t const *pau16, size_t c);
2822#else
2823DECLINLINE(void) ASMOutStrU16(RTIOPORT Port, uint16_t const *pau16, size_t c)
2824{
2825# if RT_INLINE_ASM_GNU_STYLE
2826 __asm__ __volatile__("rep; outsw\n\t"
2827 : "+S" (pau16),
2828 "+c" (c)
2829 : "d" (Port));
2830
2831# elif RT_INLINE_ASM_USES_INTRIN
2832 __outwordstring(Port, (unsigned short *)pau16, (unsigned long)c);
2833
2834# else
2835 __asm
2836 {
2837 mov dx, [Port]
2838 mov ecx, [c]
2839 mov eax, [pau16]
2840 xchg esi, eax
2841 rep outsw
2842 xchg esi, eax
2843 }
2844# endif
2845}
2846#endif
2847
2848
2849/**
2850 * Reads a string of 16-bit unsigned integer items from an I/O port, ordered.
2851 *
2852 * @param Port I/O port to read from.
2853 * @param pau16 Pointer to the string buffer (output).
2854 * @param c The number of items to read.
2855 */
2856#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2857DECLASM(void) ASMInStrU16(RTIOPORT Port, uint16_t *pau16, size_t c);
2858#else
2859DECLINLINE(void) ASMInStrU16(RTIOPORT Port, uint16_t *pau16, size_t c)
2860{
2861# if RT_INLINE_ASM_GNU_STYLE
2862 __asm__ __volatile__("rep; insw\n\t"
2863 : "+D" (pau16),
2864 "+c" (c)
2865 : "d" (Port));
2866
2867# elif RT_INLINE_ASM_USES_INTRIN
2868 __inwordstring(Port, pau16, (unsigned long)c);
2869
2870# else
2871 __asm
2872 {
2873 mov dx, [Port]
2874 mov ecx, [c]
2875 mov eax, [pau16]
2876 xchg edi, eax
2877 rep insw
2878 xchg edi, eax
2879 }
2880# endif
2881}
2882#endif
2883
2884
2885/**
2886 * Writes a string of 32-bit unsigned integer items to an I/O port, ordered.
2887 *
2888 * @param Port I/O port to write to.
2889 * @param pau32 Pointer to the string buffer.
2890 * @param c The number of items to write.
2891 */
2892#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2893DECLASM(void) ASMOutStrU32(RTIOPORT Port, uint32_t const *pau32, size_t c);
2894#else
2895DECLINLINE(void) ASMOutStrU32(RTIOPORT Port, uint32_t const *pau32, size_t c)
2896{
2897# if RT_INLINE_ASM_GNU_STYLE
2898 __asm__ __volatile__("rep; outsl\n\t"
2899 : "+S" (pau32),
2900 "+c" (c)
2901 : "d" (Port));
2902
2903# elif RT_INLINE_ASM_USES_INTRIN
2904 __outdwordstring(Port, (unsigned long *)pau32, (unsigned long)c);
2905
2906# else
2907 __asm
2908 {
2909 mov dx, [Port]
2910 mov ecx, [c]
2911 mov eax, [pau32]
2912 xchg esi, eax
2913 rep outsd
2914 xchg esi, eax
2915 }
2916# endif
2917}
2918#endif
2919
2920
2921/**
2922 * Reads a string of 32-bit unsigned integer items from an I/O port, ordered.
2923 *
2924 * @param Port I/O port to read from.
2925 * @param pau32 Pointer to the string buffer (output).
2926 * @param c The number of items to read.
2927 */
2928#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2929DECLASM(void) ASMInStrU32(RTIOPORT Port, uint32_t *pau32, size_t c);
2930#else
2931DECLINLINE(void) ASMInStrU32(RTIOPORT Port, uint32_t *pau32, size_t c)
2932{
2933# if RT_INLINE_ASM_GNU_STYLE
2934 __asm__ __volatile__("rep; insl\n\t"
2935 : "+D" (pau32),
2936 "+c" (c)
2937 : "d" (Port));
2938
2939# elif RT_INLINE_ASM_USES_INTRIN
2940 __indwordstring(Port, (unsigned long *)pau32, (unsigned long)c);
2941
2942# else
2943 __asm
2944 {
2945 mov dx, [Port]
2946 mov ecx, [c]
2947 mov eax, [pau32]
2948 xchg edi, eax
2949 rep insd
2950 xchg edi, eax
2951 }
2952# endif
2953}
2954#endif
2955
2956
2957/**
2958 * Invalidate page.
2959 *
2960 * @param pv Address of the page to invalidate.
2961 */
2962#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2963DECLASM(void) ASMInvalidatePage(void *pv);
2964#else
2965DECLINLINE(void) ASMInvalidatePage(void *pv)
2966{
2967# if RT_INLINE_ASM_USES_INTRIN
2968 __invlpg(pv);
2969
2970# elif RT_INLINE_ASM_GNU_STYLE
2971 __asm__ __volatile__("invlpg %0\n\t"
2972 : : "m" (*(uint8_t *)pv));
2973# else
2974 __asm
2975 {
2976# ifdef RT_ARCH_AMD64
2977 mov rax, [pv]
2978 invlpg [rax]
2979# else
2980 mov eax, [pv]
2981 invlpg [eax]
2982# endif
2983 }
2984# endif
2985}
2986#endif
2987
2988
2989/**
2990 * Write back the internal caches and invalidate them.
2991 */
2992#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2993DECLASM(void) ASMWriteBackAndInvalidateCaches(void);
2994#else
2995DECLINLINE(void) ASMWriteBackAndInvalidateCaches(void)
2996{
2997# if RT_INLINE_ASM_USES_INTRIN
2998 __wbinvd();
2999
3000# elif RT_INLINE_ASM_GNU_STYLE
3001 __asm__ __volatile__("wbinvd");
3002# else
3003 __asm
3004 {
3005 wbinvd
3006 }
3007# endif
3008}
3009#endif
3010
3011
3012/**
3013 * Invalidate internal and (perhaps) external caches without first
3014 * flushing dirty cache lines. Use with extreme care.
3015 */
3016#if RT_INLINE_ASM_EXTERNAL
3017DECLASM(void) ASMInvalidateInternalCaches(void);
3018#else
3019DECLINLINE(void) ASMInvalidateInternalCaches(void)
3020{
3021# if RT_INLINE_ASM_GNU_STYLE
3022 __asm__ __volatile__("invd");
3023# else
3024 __asm
3025 {
3026 invd
3027 }
3028# endif
3029}
3030#endif
3031
3032
3033/**
3034 * Memory load/store fence, waits for any pending writes and reads to complete.
3035 * Requires the X86_CPUID_FEATURE_EDX_SSE2 CPUID bit set.
3036 */
3037DECLINLINE(void) ASMMemoryFenceSSE2(void)
3038{
3039#if RT_INLINE_ASM_GNU_STYLE
3040 __asm__ __volatile__ (".byte 0x0f,0xae,0xf0\n\t");
3041#elif RT_INLINE_ASM_USES_INTRIN
3042 _mm_mfence();
3043#else
3044 __asm
3045 {
3046 _emit 0x0f
3047 _emit 0xae
3048 _emit 0xf0
3049 }
3050#endif
3051}
3052
3053
3054/**
3055 * Memory store fence, waits for any writes to complete.
3056 * Requires the X86_CPUID_FEATURE_EDX_SSE CPUID bit set.
3057 */
3058DECLINLINE(void) ASMWriteFenceSSE(void)
3059{
3060#if RT_INLINE_ASM_GNU_STYLE
3061 __asm__ __volatile__ (".byte 0x0f,0xae,0xf8\n\t");
3062#elif RT_INLINE_ASM_USES_INTRIN
3063 _mm_sfence();
3064#else
3065 __asm
3066 {
3067 _emit 0x0f
3068 _emit 0xae
3069 _emit 0xf8
3070 }
3071#endif
3072}
3073
3074
3075/**
3076 * Memory load fence, waits for any pending reads to complete.
3077 * Requires the X86_CPUID_FEATURE_EDX_SSE2 CPUID bit set.
3078 */
3079DECLINLINE(void) ASMReadFenceSSE2(void)
3080{
3081#if RT_INLINE_ASM_GNU_STYLE
3082 __asm__ __volatile__ (".byte 0x0f,0xae,0xe8\n\t");
3083#elif RT_INLINE_ASM_USES_INTRIN
3084 _mm_lfence();
3085#else
3086 __asm
3087 {
3088 _emit 0x0f
3089 _emit 0xae
3090 _emit 0xe8
3091 }
3092#endif
3093}
3094
3095#if !defined(_MSC_VER) || !defined(RT_ARCH_AMD64)
3096
3097/*
3098 * Clear the AC bit in the EFLAGS register.
3099 * Requires the X86_CPUID_STEXT_FEATURE_EBX_SMAP CPUID bit set.
3100 * Requires to be executed in R0.
3101 */
3102DECLINLINE(void) ASMClearAC(void)
3103{
3104#if RT_INLINE_ASM_GNU_STYLE
3105 __asm__ __volatile__ (".byte 0x0f,0x01,0xca\n\t");
3106#else
3107 __asm
3108 {
3109 _emit 0x0f
3110 _emit 0x01
3111 _emit 0xca
3112 }
3113#endif
3114}
3115
3116
3117/*
3118 * Set the AC bit in the EFLAGS register.
3119 * Requires the X86_CPUID_STEXT_FEATURE_EBX_SMAP CPUID bit set.
3120 * Requires to be executed in R0.
3121 */
3122DECLINLINE(void) ASMSetAC(void)
3123{
3124#if RT_INLINE_ASM_GNU_STYLE
3125 __asm__ __volatile__ (".byte 0x0f,0x01,0xcb\n\t");
3126#else
3127 __asm
3128 {
3129 _emit 0x0f
3130 _emit 0x01
3131 _emit 0xcb
3132 }
3133#endif
3134}
3135
3136#endif /* !_MSC_VER) || !RT_ARCH_AMD64 */
3137
3138/** @} */
3139#endif
3140
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette