VirtualBox

source: vbox/trunk/include/iprt/asm-amd64-x86.h@ 76417

Last change on this file since 76417 was 75132, checked in by vboxsync, 6 years ago

iprt/asm.h,cdefs.h: More watcom adjustments.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 76.9 KB
Line 
1/** @file
2 * IPRT - AMD64 and x86 Specific Assembly Functions.
3 */
4
5/*
6 * Copyright (C) 2006-2017 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___iprt_asm_amd64_x86_h
27#define ___iprt_asm_amd64_x86_h
28
29#include <iprt/types.h>
30#include <iprt/assert.h>
31#if !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86)
32# error "Not on AMD64 or x86"
33#endif
34
35#if defined(_MSC_VER) && RT_INLINE_ASM_USES_INTRIN
36# pragma warning(push)
37# pragma warning(disable:4668) /* Several incorrect __cplusplus uses. */
38# pragma warning(disable:4255) /* Incorrect __slwpcb prototype. */
39# include <intrin.h>
40# pragma warning(pop)
41 /* Emit the intrinsics at all optimization levels. */
42# pragma intrinsic(_ReadWriteBarrier)
43# pragma intrinsic(__cpuid)
44# pragma intrinsic(_enable)
45# pragma intrinsic(_disable)
46# pragma intrinsic(__rdtsc)
47# pragma intrinsic(__readmsr)
48# pragma intrinsic(__writemsr)
49# pragma intrinsic(__outbyte)
50# pragma intrinsic(__outbytestring)
51# pragma intrinsic(__outword)
52# pragma intrinsic(__outwordstring)
53# pragma intrinsic(__outdword)
54# pragma intrinsic(__outdwordstring)
55# pragma intrinsic(__inbyte)
56# pragma intrinsic(__inbytestring)
57# pragma intrinsic(__inword)
58# pragma intrinsic(__inwordstring)
59# pragma intrinsic(__indword)
60# pragma intrinsic(__indwordstring)
61# pragma intrinsic(__invlpg)
62# pragma intrinsic(__wbinvd)
63# pragma intrinsic(__readcr0)
64# pragma intrinsic(__readcr2)
65# pragma intrinsic(__readcr3)
66# pragma intrinsic(__readcr4)
67# pragma intrinsic(__writecr0)
68# pragma intrinsic(__writecr3)
69# pragma intrinsic(__writecr4)
70# pragma intrinsic(__readdr)
71# pragma intrinsic(__writedr)
72# ifdef RT_ARCH_AMD64
73# pragma intrinsic(__readcr8)
74# pragma intrinsic(__writecr8)
75# endif
76# if RT_INLINE_ASM_USES_INTRIN >= 14
77# pragma intrinsic(__halt)
78# endif
79# if RT_INLINE_ASM_USES_INTRIN >= 15
80# pragma intrinsic(__readeflags)
81# pragma intrinsic(__writeeflags)
82# pragma intrinsic(__rdtscp)
83# endif
84#endif
85
86
87/*
88 * Undefine all symbols we have Watcom C/C++ #pragma aux'es for.
89 */
90#if defined(__WATCOMC__) && ARCH_BITS == 16
91# include "asm-amd64-x86-watcom-16.h"
92#elif defined(__WATCOMC__) && ARCH_BITS == 32
93# include "asm-amd64-x86-watcom-32.h"
94#endif
95
96
97/** @defgroup grp_rt_asm_amd64_x86 AMD64 and x86 Specific ASM Routines
98 * @ingroup grp_rt_asm
99 * @{
100 */
101
102/** @todo find a more proper place for these structures? */
103
104#pragma pack(1)
105/** IDTR */
106typedef struct RTIDTR
107{
108 /** Size of the IDT. */
109 uint16_t cbIdt;
110 /** Address of the IDT. */
111#if ARCH_BITS != 64
112 uint32_t pIdt;
113#else
114 uint64_t pIdt;
115#endif
116} RTIDTR, RT_FAR *PRTIDTR;
117#pragma pack()
118
119#pragma pack(1)
120/** @internal */
121typedef struct RTIDTRALIGNEDINT
122{
123 /** Alignment padding. */
124 uint16_t au16Padding[ARCH_BITS == 64 ? 3 : 1];
125 /** The IDTR structure. */
126 RTIDTR Idtr;
127} RTIDTRALIGNEDINT;
128#pragma pack()
129
130/** Wrapped RTIDTR for preventing misalignment exceptions. */
131typedef union RTIDTRALIGNED
132{
133 /** Try make sure this structure has optimal alignment. */
134 uint64_t auAlignmentHack[ARCH_BITS == 64 ? 2 : 1];
135 /** Aligned structure. */
136 RTIDTRALIGNEDINT s;
137} RTIDTRALIGNED;
138AssertCompileSize(RTIDTRALIGNED, ((ARCH_BITS == 64) + 1) * 8);
139/** Pointer to a an RTIDTR alignment wrapper. */
140typedef RTIDTRALIGNED RT_FAR *PRIDTRALIGNED;
141
142
143#pragma pack(1)
144/** GDTR */
145typedef struct RTGDTR
146{
147 /** Size of the GDT. */
148 uint16_t cbGdt;
149 /** Address of the GDT. */
150#if ARCH_BITS != 64
151 uint32_t pGdt;
152#else
153 uint64_t pGdt;
154#endif
155} RTGDTR, RT_FAR *PRTGDTR;
156#pragma pack()
157
158#pragma pack(1)
159/** @internal */
160typedef struct RTGDTRALIGNEDINT
161{
162 /** Alignment padding. */
163 uint16_t au16Padding[ARCH_BITS == 64 ? 3 : 1];
164 /** The GDTR structure. */
165 RTGDTR Gdtr;
166} RTGDTRALIGNEDINT;
167#pragma pack()
168
169/** Wrapped RTGDTR for preventing misalignment exceptions. */
170typedef union RTGDTRALIGNED
171{
172 /** Try make sure this structure has optimal alignment. */
173 uint64_t auAlignmentHack[ARCH_BITS == 64 ? 2 : 1];
174 /** Aligned structure. */
175 RTGDTRALIGNEDINT s;
176} RTGDTRALIGNED;
177AssertCompileSize(RTIDTRALIGNED, ((ARCH_BITS == 64) + 1) * 8);
178/** Pointer to a an RTGDTR alignment wrapper. */
179typedef RTGDTRALIGNED RT_FAR *PRGDTRALIGNED;
180
181
182/**
183 * Gets the content of the IDTR CPU register.
184 * @param pIdtr Where to store the IDTR contents.
185 */
186#if RT_INLINE_ASM_EXTERNAL
187RT_ASM_DECL_PRAGMA_WATCOM(void) ASMGetIDTR(PRTIDTR pIdtr);
188#else
189DECLINLINE(void) ASMGetIDTR(PRTIDTR pIdtr)
190{
191# if RT_INLINE_ASM_GNU_STYLE
192 __asm__ __volatile__("sidt %0" : "=m" (*pIdtr));
193# else
194 __asm
195 {
196# ifdef RT_ARCH_AMD64
197 mov rax, [pIdtr]
198 sidt [rax]
199# else
200 mov eax, [pIdtr]
201 sidt [eax]
202# endif
203 }
204# endif
205}
206#endif
207
208
209/**
210 * Gets the content of the IDTR.LIMIT CPU register.
211 * @returns IDTR limit.
212 */
213#if RT_INLINE_ASM_EXTERNAL
214RT_ASM_DECL_PRAGMA_WATCOM(uint16_t) ASMGetIdtrLimit(void);
215#else
216DECLINLINE(uint16_t) ASMGetIdtrLimit(void)
217{
218 RTIDTRALIGNED TmpIdtr;
219# if RT_INLINE_ASM_GNU_STYLE
220 __asm__ __volatile__("sidt %0" : "=m" (TmpIdtr.s.Idtr));
221# else
222 __asm
223 {
224 sidt [TmpIdtr.s.Idtr]
225 }
226# endif
227 return TmpIdtr.s.Idtr.cbIdt;
228}
229#endif
230
231
232/**
233 * Sets the content of the IDTR CPU register.
234 * @param pIdtr Where to load the IDTR contents from
235 */
236#if RT_INLINE_ASM_EXTERNAL
237RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetIDTR(const RTIDTR RT_FAR *pIdtr);
238#else
239DECLINLINE(void) ASMSetIDTR(const RTIDTR RT_FAR *pIdtr)
240{
241# if RT_INLINE_ASM_GNU_STYLE
242 __asm__ __volatile__("lidt %0" : : "m" (*pIdtr));
243# else
244 __asm
245 {
246# ifdef RT_ARCH_AMD64
247 mov rax, [pIdtr]
248 lidt [rax]
249# else
250 mov eax, [pIdtr]
251 lidt [eax]
252# endif
253 }
254# endif
255}
256#endif
257
258
259/**
260 * Gets the content of the GDTR CPU register.
261 * @param pGdtr Where to store the GDTR contents.
262 */
263#if RT_INLINE_ASM_EXTERNAL
264RT_ASM_DECL_PRAGMA_WATCOM(void) ASMGetGDTR(PRTGDTR pGdtr);
265#else
266DECLINLINE(void) ASMGetGDTR(PRTGDTR pGdtr)
267{
268# if RT_INLINE_ASM_GNU_STYLE
269 __asm__ __volatile__("sgdt %0" : "=m" (*pGdtr));
270# else
271 __asm
272 {
273# ifdef RT_ARCH_AMD64
274 mov rax, [pGdtr]
275 sgdt [rax]
276# else
277 mov eax, [pGdtr]
278 sgdt [eax]
279# endif
280 }
281# endif
282}
283#endif
284
285
286/**
287 * Sets the content of the GDTR CPU register.
288 * @param pGdtr Where to load the GDTR contents from
289 */
290#if RT_INLINE_ASM_EXTERNAL
291RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetGDTR(const RTGDTR RT_FAR *pGdtr);
292#else
293DECLINLINE(void) ASMSetGDTR(const RTGDTR RT_FAR *pGdtr)
294{
295# if RT_INLINE_ASM_GNU_STYLE
296 __asm__ __volatile__("lgdt %0" : : "m" (*pGdtr));
297# else
298 __asm
299 {
300# ifdef RT_ARCH_AMD64
301 mov rax, [pGdtr]
302 lgdt [rax]
303# else
304 mov eax, [pGdtr]
305 lgdt [eax]
306# endif
307 }
308# endif
309}
310#endif
311
312
313
314/**
315 * Get the cs register.
316 * @returns cs.
317 */
318#if RT_INLINE_ASM_EXTERNAL
319RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetCS(void);
320#else
321DECLINLINE(RTSEL) ASMGetCS(void)
322{
323 RTSEL SelCS;
324# if RT_INLINE_ASM_GNU_STYLE
325 __asm__ __volatile__("movw %%cs, %0\n\t" : "=r" (SelCS));
326# else
327 __asm
328 {
329 mov ax, cs
330 mov [SelCS], ax
331 }
332# endif
333 return SelCS;
334}
335#endif
336
337
338/**
339 * Get the DS register.
340 * @returns DS.
341 */
342#if RT_INLINE_ASM_EXTERNAL
343RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetDS(void);
344#else
345DECLINLINE(RTSEL) ASMGetDS(void)
346{
347 RTSEL SelDS;
348# if RT_INLINE_ASM_GNU_STYLE
349 __asm__ __volatile__("movw %%ds, %0\n\t" : "=r" (SelDS));
350# else
351 __asm
352 {
353 mov ax, ds
354 mov [SelDS], ax
355 }
356# endif
357 return SelDS;
358}
359#endif
360
361
362/**
363 * Get the ES register.
364 * @returns ES.
365 */
366#if RT_INLINE_ASM_EXTERNAL
367RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetES(void);
368#else
369DECLINLINE(RTSEL) ASMGetES(void)
370{
371 RTSEL SelES;
372# if RT_INLINE_ASM_GNU_STYLE
373 __asm__ __volatile__("movw %%es, %0\n\t" : "=r" (SelES));
374# else
375 __asm
376 {
377 mov ax, es
378 mov [SelES], ax
379 }
380# endif
381 return SelES;
382}
383#endif
384
385
386/**
387 * Get the FS register.
388 * @returns FS.
389 */
390#if RT_INLINE_ASM_EXTERNAL
391RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetFS(void);
392#else
393DECLINLINE(RTSEL) ASMGetFS(void)
394{
395 RTSEL SelFS;
396# if RT_INLINE_ASM_GNU_STYLE
397 __asm__ __volatile__("movw %%fs, %0\n\t" : "=r" (SelFS));
398# else
399 __asm
400 {
401 mov ax, fs
402 mov [SelFS], ax
403 }
404# endif
405 return SelFS;
406}
407# endif
408
409
410/**
411 * Get the GS register.
412 * @returns GS.
413 */
414#if RT_INLINE_ASM_EXTERNAL
415RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetGS(void);
416#else
417DECLINLINE(RTSEL) ASMGetGS(void)
418{
419 RTSEL SelGS;
420# if RT_INLINE_ASM_GNU_STYLE
421 __asm__ __volatile__("movw %%gs, %0\n\t" : "=r" (SelGS));
422# else
423 __asm
424 {
425 mov ax, gs
426 mov [SelGS], ax
427 }
428# endif
429 return SelGS;
430}
431#endif
432
433
434/**
435 * Get the SS register.
436 * @returns SS.
437 */
438#if RT_INLINE_ASM_EXTERNAL
439RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetSS(void);
440#else
441DECLINLINE(RTSEL) ASMGetSS(void)
442{
443 RTSEL SelSS;
444# if RT_INLINE_ASM_GNU_STYLE
445 __asm__ __volatile__("movw %%ss, %0\n\t" : "=r" (SelSS));
446# else
447 __asm
448 {
449 mov ax, ss
450 mov [SelSS], ax
451 }
452# endif
453 return SelSS;
454}
455#endif
456
457
458/**
459 * Get the TR register.
460 * @returns TR.
461 */
462#if RT_INLINE_ASM_EXTERNAL
463RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetTR(void);
464#else
465DECLINLINE(RTSEL) ASMGetTR(void)
466{
467 RTSEL SelTR;
468# if RT_INLINE_ASM_GNU_STYLE
469 __asm__ __volatile__("str %w0\n\t" : "=r" (SelTR));
470# else
471 __asm
472 {
473 str ax
474 mov [SelTR], ax
475 }
476# endif
477 return SelTR;
478}
479#endif
480
481
482/**
483 * Get the LDTR register.
484 * @returns LDTR.
485 */
486#if RT_INLINE_ASM_EXTERNAL
487RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetLDTR(void);
488#else
489DECLINLINE(RTSEL) ASMGetLDTR(void)
490{
491 RTSEL SelLDTR;
492# if RT_INLINE_ASM_GNU_STYLE
493 __asm__ __volatile__("sldt %w0\n\t" : "=r" (SelLDTR));
494# else
495 __asm
496 {
497 sldt ax
498 mov [SelLDTR], ax
499 }
500# endif
501 return SelLDTR;
502}
503#endif
504
505
506/**
507 * Get the access rights for the segment selector.
508 *
509 * @returns The access rights on success or UINT32_MAX on failure.
510 * @param uSel The selector value.
511 *
512 * @remarks Using UINT32_MAX for failure is chosen because valid access rights
513 * always have bits 0:7 as 0 (on both Intel & AMD).
514 */
515#if RT_INLINE_ASM_EXTERNAL
516RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMGetSegAttr(uint32_t uSel);
517#else
518DECLINLINE(uint32_t) ASMGetSegAttr(uint32_t uSel)
519{
520 uint32_t uAttr;
521 /* LAR only accesses 16-bit of the source operand, but eax for the
522 destination operand is required for getting the full 32-bit access rights. */
523# if RT_INLINE_ASM_GNU_STYLE
524 __asm__ __volatile__("lar %1, %%eax\n\t"
525 "jz done%=\n\t"
526 "movl $0xffffffff, %%eax\n\t"
527 "done%=:\n\t"
528 "movl %%eax, %0\n\t"
529 : "=r" (uAttr)
530 : "r" (uSel)
531 : "cc", "%eax");
532# else
533 __asm
534 {
535 lar eax, [uSel]
536 jz done
537 mov eax, 0ffffffffh
538 done:
539 mov [uAttr], eax
540 }
541# endif
542 return uAttr;
543}
544#endif
545
546
547/**
548 * Get the [RE]FLAGS register.
549 * @returns [RE]FLAGS.
550 */
551#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
552RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTREG) ASMGetFlags(void);
553#else
554DECLINLINE(RTCCUINTREG) ASMGetFlags(void)
555{
556 RTCCUINTREG uFlags;
557# if RT_INLINE_ASM_GNU_STYLE
558# ifdef RT_ARCH_AMD64
559 __asm__ __volatile__("pushfq\n\t"
560 "popq %0\n\t"
561 : "=r" (uFlags));
562# else
563 __asm__ __volatile__("pushfl\n\t"
564 "popl %0\n\t"
565 : "=r" (uFlags));
566# endif
567# elif RT_INLINE_ASM_USES_INTRIN >= 15
568 uFlags = __readeflags();
569# else
570 __asm
571 {
572# ifdef RT_ARCH_AMD64
573 pushfq
574 pop [uFlags]
575# else
576 pushfd
577 pop [uFlags]
578# endif
579 }
580# endif
581 return uFlags;
582}
583#endif
584
585
586/**
587 * Set the [RE]FLAGS register.
588 * @param uFlags The new [RE]FLAGS value.
589 */
590#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
591RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetFlags(RTCCUINTREG uFlags);
592#else
593DECLINLINE(void) ASMSetFlags(RTCCUINTREG uFlags)
594{
595# if RT_INLINE_ASM_GNU_STYLE
596# ifdef RT_ARCH_AMD64
597 __asm__ __volatile__("pushq %0\n\t"
598 "popfq\n\t"
599 : : "g" (uFlags));
600# else
601 __asm__ __volatile__("pushl %0\n\t"
602 "popfl\n\t"
603 : : "g" (uFlags));
604# endif
605# elif RT_INLINE_ASM_USES_INTRIN >= 15
606 __writeeflags(uFlags);
607# else
608 __asm
609 {
610# ifdef RT_ARCH_AMD64
611 push [uFlags]
612 popfq
613# else
614 push [uFlags]
615 popfd
616# endif
617 }
618# endif
619}
620#endif
621
622
623/**
624 * Modifies the [RE]FLAGS register.
625 * @returns Original value.
626 * @param fAndEfl Flags to keep (applied first).
627 * @param fOrEfl Flags to be set.
628 */
629#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
630RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTREG) ASMChangeFlags(RTCCUINTREG fAndEfl, RTCCUINTREG fOrEfl);
631#else
632DECLINLINE(RTCCUINTREG) ASMChangeFlags(RTCCUINTREG fAndEfl, RTCCUINTREG fOrEfl)
633{
634 RTCCUINTREG fOldEfl;
635# if RT_INLINE_ASM_GNU_STYLE
636# ifdef RT_ARCH_AMD64
637 __asm__ __volatile__("pushfq\n\t"
638 "movq (%%rsp), %0\n\t"
639 "andq %0, %1\n\t"
640 "orq %3, %1\n\t"
641 "mov %1, (%%rsp)\n\t"
642 "popfq\n\t"
643 : "=&r" (fOldEfl),
644 "=r" (fAndEfl)
645 : "1" (fAndEfl),
646 "rn" (fOrEfl) );
647# else
648 __asm__ __volatile__("pushfl\n\t"
649 "movl (%%esp), %0\n\t"
650 "andl %1, (%%esp)\n\t"
651 "orl %2, (%%esp)\n\t"
652 "popfl\n\t"
653 : "=&r" (fOldEfl)
654 : "rn" (fAndEfl),
655 "rn" (fOrEfl) );
656# endif
657# elif RT_INLINE_ASM_USES_INTRIN >= 15
658 fOldEfl = __readeflags();
659 __writeeflags((fOldEfl & fAndEfl) | fOrEfl);
660# else
661 __asm
662 {
663# ifdef RT_ARCH_AMD64
664 mov rdx, [fAndEfl]
665 mov rcx, [fOrEfl]
666 pushfq
667 mov rax, [rsp]
668 and rdx, rax
669 or rdx, rcx
670 mov [rsp], rdx
671 popfq
672 mov [fOldEfl], rax
673# else
674 mov edx, [fAndEfl]
675 mov ecx, [fOrEfl]
676 pushfd
677 mov eax, [esp]
678 and edx, eax
679 or edx, ecx
680 mov [esp], edx
681 popfd
682 mov [fOldEfl], eax
683# endif
684 }
685# endif
686 return fOldEfl;
687}
688#endif
689
690
691/**
692 * Modifies the [RE]FLAGS register by ORing in one or more flags.
693 * @returns Original value.
694 * @param fOrEfl The flags to be set (ORed in).
695 */
696#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
697RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTREG) ASMAddFlags(RTCCUINTREG fOrEfl);
698#else
699DECLINLINE(RTCCUINTREG) ASMAddFlags(RTCCUINTREG fOrEfl)
700{
701 RTCCUINTREG fOldEfl;
702# if RT_INLINE_ASM_GNU_STYLE
703# ifdef RT_ARCH_AMD64
704 __asm__ __volatile__("pushfq\n\t"
705 "movq (%%rsp), %0\n\t"
706 "orq %1, (%%rsp)\n\t"
707 "popfq\n\t"
708 : "=&r" (fOldEfl)
709 : "rn" (fOrEfl) );
710# else
711 __asm__ __volatile__("pushfl\n\t"
712 "movl (%%esp), %0\n\t"
713 "orl %1, (%%esp)\n\t"
714 "popfl\n\t"
715 : "=&r" (fOldEfl)
716 : "rn" (fOrEfl) );
717# endif
718# elif RT_INLINE_ASM_USES_INTRIN >= 15
719 fOldEfl = __readeflags();
720 __writeeflags(fOldEfl | fOrEfl);
721# else
722 __asm
723 {
724# ifdef RT_ARCH_AMD64
725 mov rcx, [fOrEfl]
726 pushfq
727 mov rdx, [rsp]
728 or [rsp], rcx
729 popfq
730 mov [fOldEfl], rax
731# else
732 mov ecx, [fOrEfl]
733 pushfd
734 mov edx, [esp]
735 or [esp], ecx
736 popfd
737 mov [fOldEfl], eax
738# endif
739 }
740# endif
741 return fOldEfl;
742}
743#endif
744
745
746/**
747 * Modifies the [RE]FLAGS register by AND'ing out one or more flags.
748 * @returns Original value.
749 * @param fAndEfl The flags to keep.
750 */
751#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
752RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTREG) ASMClearFlags(RTCCUINTREG fAndEfl);
753#else
754DECLINLINE(RTCCUINTREG) ASMClearFlags(RTCCUINTREG fAndEfl)
755{
756 RTCCUINTREG fOldEfl;
757# if RT_INLINE_ASM_GNU_STYLE
758# ifdef RT_ARCH_AMD64
759 __asm__ __volatile__("pushfq\n\t"
760 "movq (%%rsp), %0\n\t"
761 "andq %1, (%%rsp)\n\t"
762 "popfq\n\t"
763 : "=&r" (fOldEfl)
764 : "rn" (fAndEfl) );
765# else
766 __asm__ __volatile__("pushfl\n\t"
767 "movl (%%esp), %0\n\t"
768 "andl %1, (%%esp)\n\t"
769 "popfl\n\t"
770 : "=&r" (fOldEfl)
771 : "rn" (fAndEfl) );
772# endif
773# elif RT_INLINE_ASM_USES_INTRIN >= 15
774 fOldEfl = __readeflags();
775 __writeeflags(fOldEfl & fAndEfl);
776# else
777 __asm
778 {
779# ifdef RT_ARCH_AMD64
780 mov rdx, [fAndEfl]
781 pushfq
782 mov rdx, [rsp]
783 and [rsp], rdx
784 popfq
785 mov [fOldEfl], rax
786# else
787 mov edx, [fAndEfl]
788 pushfd
789 mov edx, [esp]
790 and [esp], edx
791 popfd
792 mov [fOldEfl], eax
793# endif
794 }
795# endif
796 return fOldEfl;
797}
798#endif
799
800
801/**
802 * Gets the content of the CPU timestamp counter register.
803 *
804 * @returns TSC.
805 */
806#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
807RT_ASM_DECL_PRAGMA_WATCOM(uint64_t) ASMReadTSC(void);
808#else
809DECLINLINE(uint64_t) ASMReadTSC(void)
810{
811 RTUINT64U u;
812# if RT_INLINE_ASM_GNU_STYLE
813 __asm__ __volatile__("rdtsc\n\t" : "=a" (u.s.Lo), "=d" (u.s.Hi));
814# else
815# if RT_INLINE_ASM_USES_INTRIN
816 u.u = __rdtsc();
817# else
818 __asm
819 {
820 rdtsc
821 mov [u.s.Lo], eax
822 mov [u.s.Hi], edx
823 }
824# endif
825# endif
826 return u.u;
827}
828#endif
829
830
831/**
832 * Gets the content of the CPU timestamp counter register and the
833 * assoicated AUX value.
834 *
835 * @returns TSC.
836 * @param puAux Where to store the AUX value.
837 */
838#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
839RT_ASM_DECL_PRAGMA_WATCOM(uint64_t) ASMReadTscWithAux(uint32_t RT_FAR *puAux);
840#else
841DECLINLINE(uint64_t) ASMReadTscWithAux(uint32_t RT_FAR *puAux)
842{
843 RTUINT64U u;
844# if RT_INLINE_ASM_GNU_STYLE
845 /* rdtscp is not supported by ancient linux build VM of course :-( */
846 /*__asm__ __volatile__("rdtscp\n\t" : "=a" (u.s.Lo), "=d" (u.s.Hi), "=c" (*puAux)); */
847 __asm__ __volatile__(".byte 0x0f,0x01,0xf9\n\t" : "=a" (u.s.Lo), "=d" (u.s.Hi), "=c" (*puAux));
848# else
849# if RT_INLINE_ASM_USES_INTRIN >= 15
850 u.u = __rdtscp(puAux);
851# else
852 __asm
853 {
854 rdtscp
855 mov [u.s.Lo], eax
856 mov [u.s.Hi], edx
857 mov eax, [puAux]
858 mov [eax], ecx
859 }
860# endif
861# endif
862 return u.u;
863}
864#endif
865
866
867/**
868 * Performs the cpuid instruction returning all registers.
869 *
870 * @param uOperator CPUID operation (eax).
871 * @param pvEAX Where to store eax.
872 * @param pvEBX Where to store ebx.
873 * @param pvECX Where to store ecx.
874 * @param pvEDX Where to store edx.
875 * @remark We're using void pointers to ease the use of special bitfield structures and such.
876 */
877#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
878DECLASM(void) ASMCpuId(uint32_t uOperator, void RT_FAR *pvEAX, void RT_FAR *pvEBX, void RT_FAR *pvECX, void RT_FAR *pvEDX);
879#else
880DECLINLINE(void) ASMCpuId(uint32_t uOperator, void RT_FAR *pvEAX, void RT_FAR *pvEBX, void RT_FAR *pvECX, void RT_FAR *pvEDX)
881{
882# if RT_INLINE_ASM_GNU_STYLE
883# ifdef RT_ARCH_AMD64
884 RTCCUINTREG uRAX, uRBX, uRCX, uRDX;
885 __asm__ __volatile__ ("cpuid\n\t"
886 : "=a" (uRAX),
887 "=b" (uRBX),
888 "=c" (uRCX),
889 "=d" (uRDX)
890 : "0" (uOperator), "2" (0));
891 *(uint32_t RT_FAR *)pvEAX = (uint32_t)uRAX;
892 *(uint32_t RT_FAR *)pvEBX = (uint32_t)uRBX;
893 *(uint32_t RT_FAR *)pvECX = (uint32_t)uRCX;
894 *(uint32_t RT_FAR *)pvEDX = (uint32_t)uRDX;
895# else
896 __asm__ __volatile__ ("xchgl %%ebx, %1\n\t"
897 "cpuid\n\t"
898 "xchgl %%ebx, %1\n\t"
899 : "=a" (*(uint32_t *)pvEAX),
900 "=r" (*(uint32_t *)pvEBX),
901 "=c" (*(uint32_t *)pvECX),
902 "=d" (*(uint32_t *)pvEDX)
903 : "0" (uOperator), "2" (0));
904# endif
905
906# elif RT_INLINE_ASM_USES_INTRIN
907 int aInfo[4];
908 __cpuid(aInfo, uOperator);
909 *(uint32_t RT_FAR *)pvEAX = aInfo[0];
910 *(uint32_t RT_FAR *)pvEBX = aInfo[1];
911 *(uint32_t RT_FAR *)pvECX = aInfo[2];
912 *(uint32_t RT_FAR *)pvEDX = aInfo[3];
913
914# else
915 uint32_t uEAX;
916 uint32_t uEBX;
917 uint32_t uECX;
918 uint32_t uEDX;
919 __asm
920 {
921 push ebx
922 mov eax, [uOperator]
923 cpuid
924 mov [uEAX], eax
925 mov [uEBX], ebx
926 mov [uECX], ecx
927 mov [uEDX], edx
928 pop ebx
929 }
930 *(uint32_t RT_FAR *)pvEAX = uEAX;
931 *(uint32_t RT_FAR *)pvEBX = uEBX;
932 *(uint32_t RT_FAR *)pvECX = uECX;
933 *(uint32_t RT_FAR *)pvEDX = uEDX;
934# endif
935}
936#endif
937
938
939/**
940 * Performs the CPUID instruction with EAX and ECX input returning ALL output
941 * registers.
942 *
943 * @param uOperator CPUID operation (eax).
944 * @param uIdxECX ecx index
945 * @param pvEAX Where to store eax.
946 * @param pvEBX Where to store ebx.
947 * @param pvECX Where to store ecx.
948 * @param pvEDX Where to store edx.
949 * @remark We're using void pointers to ease the use of special bitfield structures and such.
950 */
951#if RT_INLINE_ASM_EXTERNAL || RT_INLINE_ASM_USES_INTRIN
952DECLASM(void) ASMCpuId_Idx_ECX(uint32_t uOperator, uint32_t uIdxECX, void RT_FAR *pvEAX, void RT_FAR *pvEBX, void RT_FAR *pvECX, void RT_FAR *pvEDX);
953#else
954DECLINLINE(void) ASMCpuId_Idx_ECX(uint32_t uOperator, uint32_t uIdxECX, void RT_FAR *pvEAX, void RT_FAR *pvEBX, void RT_FAR *pvECX, void RT_FAR *pvEDX)
955{
956# if RT_INLINE_ASM_GNU_STYLE
957# ifdef RT_ARCH_AMD64
958 RTCCUINTREG uRAX, uRBX, uRCX, uRDX;
959 __asm__ ("cpuid\n\t"
960 : "=a" (uRAX),
961 "=b" (uRBX),
962 "=c" (uRCX),
963 "=d" (uRDX)
964 : "0" (uOperator),
965 "2" (uIdxECX));
966 *(uint32_t RT_FAR *)pvEAX = (uint32_t)uRAX;
967 *(uint32_t RT_FAR *)pvEBX = (uint32_t)uRBX;
968 *(uint32_t RT_FAR *)pvECX = (uint32_t)uRCX;
969 *(uint32_t RT_FAR *)pvEDX = (uint32_t)uRDX;
970# else
971 __asm__ ("xchgl %%ebx, %1\n\t"
972 "cpuid\n\t"
973 "xchgl %%ebx, %1\n\t"
974 : "=a" (*(uint32_t *)pvEAX),
975 "=r" (*(uint32_t *)pvEBX),
976 "=c" (*(uint32_t *)pvECX),
977 "=d" (*(uint32_t *)pvEDX)
978 : "0" (uOperator),
979 "2" (uIdxECX));
980# endif
981
982# elif RT_INLINE_ASM_USES_INTRIN
983 int aInfo[4];
984 __cpuidex(aInfo, uOperator, uIdxECX);
985 *(uint32_t RT_FAR *)pvEAX = aInfo[0];
986 *(uint32_t RT_FAR *)pvEBX = aInfo[1];
987 *(uint32_t RT_FAR *)pvECX = aInfo[2];
988 *(uint32_t RT_FAR *)pvEDX = aInfo[3];
989
990# else
991 uint32_t uEAX;
992 uint32_t uEBX;
993 uint32_t uECX;
994 uint32_t uEDX;
995 __asm
996 {
997 push ebx
998 mov eax, [uOperator]
999 mov ecx, [uIdxECX]
1000 cpuid
1001 mov [uEAX], eax
1002 mov [uEBX], ebx
1003 mov [uECX], ecx
1004 mov [uEDX], edx
1005 pop ebx
1006 }
1007 *(uint32_t RT_FAR *)pvEAX = uEAX;
1008 *(uint32_t RT_FAR *)pvEBX = uEBX;
1009 *(uint32_t RT_FAR *)pvECX = uECX;
1010 *(uint32_t RT_FAR *)pvEDX = uEDX;
1011# endif
1012}
1013#endif
1014
1015
1016/**
1017 * CPUID variant that initializes all 4 registers before the CPUID instruction.
1018 *
1019 * @returns The EAX result value.
1020 * @param uOperator CPUID operation (eax).
1021 * @param uInitEBX The value to assign EBX prior to the CPUID instruction.
1022 * @param uInitECX The value to assign ECX prior to the CPUID instruction.
1023 * @param uInitEDX The value to assign EDX prior to the CPUID instruction.
1024 * @param pvEAX Where to store eax. Optional.
1025 * @param pvEBX Where to store ebx. Optional.
1026 * @param pvECX Where to store ecx. Optional.
1027 * @param pvEDX Where to store edx. Optional.
1028 */
1029DECLASM(uint32_t) ASMCpuIdExSlow(uint32_t uOperator, uint32_t uInitEBX, uint32_t uInitECX, uint32_t uInitEDX,
1030 void RT_FAR *pvEAX, void RT_FAR *pvEBX, void RT_FAR *pvECX, void RT_FAR *pvEDX);
1031
1032
1033/**
1034 * Performs the cpuid instruction returning ecx and edx.
1035 *
1036 * @param uOperator CPUID operation (eax).
1037 * @param pvECX Where to store ecx.
1038 * @param pvEDX Where to store edx.
1039 * @remark We're using void pointers to ease the use of special bitfield structures and such.
1040 */
1041#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1042RT_ASM_DECL_PRAGMA_WATCOM(void) ASMCpuId_ECX_EDX(uint32_t uOperator, void RT_FAR *pvECX, void RT_FAR *pvEDX);
1043#else
1044DECLINLINE(void) ASMCpuId_ECX_EDX(uint32_t uOperator, void RT_FAR *pvECX, void RT_FAR *pvEDX)
1045{
1046 uint32_t uEBX;
1047 ASMCpuId(uOperator, &uOperator, &uEBX, pvECX, pvEDX);
1048}
1049#endif
1050
1051
1052/**
1053 * Performs the cpuid instruction returning eax.
1054 *
1055 * @param uOperator CPUID operation (eax).
1056 * @returns EAX after cpuid operation.
1057 */
1058#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1059RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMCpuId_EAX(uint32_t uOperator);
1060#else
1061DECLINLINE(uint32_t) ASMCpuId_EAX(uint32_t uOperator)
1062{
1063 RTCCUINTREG xAX;
1064# if RT_INLINE_ASM_GNU_STYLE
1065# ifdef RT_ARCH_AMD64
1066 __asm__ ("cpuid"
1067 : "=a" (xAX)
1068 : "0" (uOperator)
1069 : "rbx", "rcx", "rdx");
1070# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1071 __asm__ ("push %%ebx\n\t"
1072 "cpuid\n\t"
1073 "pop %%ebx\n\t"
1074 : "=a" (xAX)
1075 : "0" (uOperator)
1076 : "ecx", "edx");
1077# else
1078 __asm__ ("cpuid"
1079 : "=a" (xAX)
1080 : "0" (uOperator)
1081 : "edx", "ecx", "ebx");
1082# endif
1083
1084# elif RT_INLINE_ASM_USES_INTRIN
1085 int aInfo[4];
1086 __cpuid(aInfo, uOperator);
1087 xAX = aInfo[0];
1088
1089# else
1090 __asm
1091 {
1092 push ebx
1093 mov eax, [uOperator]
1094 cpuid
1095 mov [xAX], eax
1096 pop ebx
1097 }
1098# endif
1099 return (uint32_t)xAX;
1100}
1101#endif
1102
1103
1104/**
1105 * Performs the cpuid instruction returning ebx.
1106 *
1107 * @param uOperator CPUID operation (eax).
1108 * @returns EBX after cpuid operation.
1109 */
1110#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1111RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMCpuId_EBX(uint32_t uOperator);
1112#else
1113DECLINLINE(uint32_t) ASMCpuId_EBX(uint32_t uOperator)
1114{
1115 RTCCUINTREG xBX;
1116# if RT_INLINE_ASM_GNU_STYLE
1117# ifdef RT_ARCH_AMD64
1118 RTCCUINTREG uSpill;
1119 __asm__ ("cpuid"
1120 : "=a" (uSpill),
1121 "=b" (xBX)
1122 : "0" (uOperator)
1123 : "rdx", "rcx");
1124# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1125 __asm__ ("push %%ebx\n\t"
1126 "cpuid\n\t"
1127 "mov %%ebx, %%edx\n\t"
1128 "pop %%ebx\n\t"
1129 : "=a" (uOperator),
1130 "=d" (xBX)
1131 : "0" (uOperator)
1132 : "ecx");
1133# else
1134 __asm__ ("cpuid"
1135 : "=a" (uOperator),
1136 "=b" (xBX)
1137 : "0" (uOperator)
1138 : "edx", "ecx");
1139# endif
1140
1141# elif RT_INLINE_ASM_USES_INTRIN
1142 int aInfo[4];
1143 __cpuid(aInfo, uOperator);
1144 xBX = aInfo[1];
1145
1146# else
1147 __asm
1148 {
1149 push ebx
1150 mov eax, [uOperator]
1151 cpuid
1152 mov [xBX], ebx
1153 pop ebx
1154 }
1155# endif
1156 return (uint32_t)xBX;
1157}
1158#endif
1159
1160
1161/**
1162 * Performs the cpuid instruction returning ecx.
1163 *
1164 * @param uOperator CPUID operation (eax).
1165 * @returns ECX after cpuid operation.
1166 */
1167#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1168RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMCpuId_ECX(uint32_t uOperator);
1169#else
1170DECLINLINE(uint32_t) ASMCpuId_ECX(uint32_t uOperator)
1171{
1172 RTCCUINTREG xCX;
1173# if RT_INLINE_ASM_GNU_STYLE
1174# ifdef RT_ARCH_AMD64
1175 RTCCUINTREG uSpill;
1176 __asm__ ("cpuid"
1177 : "=a" (uSpill),
1178 "=c" (xCX)
1179 : "0" (uOperator)
1180 : "rbx", "rdx");
1181# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1182 __asm__ ("push %%ebx\n\t"
1183 "cpuid\n\t"
1184 "pop %%ebx\n\t"
1185 : "=a" (uOperator),
1186 "=c" (xCX)
1187 : "0" (uOperator)
1188 : "edx");
1189# else
1190 __asm__ ("cpuid"
1191 : "=a" (uOperator),
1192 "=c" (xCX)
1193 : "0" (uOperator)
1194 : "ebx", "edx");
1195
1196# endif
1197
1198# elif RT_INLINE_ASM_USES_INTRIN
1199 int aInfo[4];
1200 __cpuid(aInfo, uOperator);
1201 xCX = aInfo[2];
1202
1203# else
1204 __asm
1205 {
1206 push ebx
1207 mov eax, [uOperator]
1208 cpuid
1209 mov [xCX], ecx
1210 pop ebx
1211 }
1212# endif
1213 return (uint32_t)xCX;
1214}
1215#endif
1216
1217
1218/**
1219 * Performs the cpuid instruction returning edx.
1220 *
1221 * @param uOperator CPUID operation (eax).
1222 * @returns EDX after cpuid operation.
1223 */
1224#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1225RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMCpuId_EDX(uint32_t uOperator);
1226#else
1227DECLINLINE(uint32_t) ASMCpuId_EDX(uint32_t uOperator)
1228{
1229 RTCCUINTREG xDX;
1230# if RT_INLINE_ASM_GNU_STYLE
1231# ifdef RT_ARCH_AMD64
1232 RTCCUINTREG uSpill;
1233 __asm__ ("cpuid"
1234 : "=a" (uSpill),
1235 "=d" (xDX)
1236 : "0" (uOperator)
1237 : "rbx", "rcx");
1238# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1239 __asm__ ("push %%ebx\n\t"
1240 "cpuid\n\t"
1241 "pop %%ebx\n\t"
1242 : "=a" (uOperator),
1243 "=d" (xDX)
1244 : "0" (uOperator)
1245 : "ecx");
1246# else
1247 __asm__ ("cpuid"
1248 : "=a" (uOperator),
1249 "=d" (xDX)
1250 : "0" (uOperator)
1251 : "ebx", "ecx");
1252# endif
1253
1254# elif RT_INLINE_ASM_USES_INTRIN
1255 int aInfo[4];
1256 __cpuid(aInfo, uOperator);
1257 xDX = aInfo[3];
1258
1259# else
1260 __asm
1261 {
1262 push ebx
1263 mov eax, [uOperator]
1264 cpuid
1265 mov [xDX], edx
1266 pop ebx
1267 }
1268# endif
1269 return (uint32_t)xDX;
1270}
1271#endif
1272
1273
1274/**
1275 * Checks if the current CPU supports CPUID.
1276 *
1277 * @returns true if CPUID is supported.
1278 */
1279#ifdef __WATCOMC__
1280DECLASM(bool) ASMHasCpuId(void);
1281#else
1282DECLINLINE(bool) ASMHasCpuId(void)
1283{
1284# ifdef RT_ARCH_AMD64
1285 return true; /* ASSUME that all amd64 compatible CPUs have cpuid. */
1286# else /* !RT_ARCH_AMD64 */
1287 bool fRet = false;
1288# if RT_INLINE_ASM_GNU_STYLE
1289 uint32_t u1;
1290 uint32_t u2;
1291 __asm__ ("pushf\n\t"
1292 "pop %1\n\t"
1293 "mov %1, %2\n\t"
1294 "xorl $0x200000, %1\n\t"
1295 "push %1\n\t"
1296 "popf\n\t"
1297 "pushf\n\t"
1298 "pop %1\n\t"
1299 "cmpl %1, %2\n\t"
1300 "setne %0\n\t"
1301 "push %2\n\t"
1302 "popf\n\t"
1303 : "=m" (fRet), "=r" (u1), "=r" (u2));
1304# else
1305 __asm
1306 {
1307 pushfd
1308 pop eax
1309 mov ebx, eax
1310 xor eax, 0200000h
1311 push eax
1312 popfd
1313 pushfd
1314 pop eax
1315 cmp eax, ebx
1316 setne fRet
1317 push ebx
1318 popfd
1319 }
1320# endif
1321 return fRet;
1322# endif /* !RT_ARCH_AMD64 */
1323}
1324#endif
1325
1326
1327/**
1328 * Gets the APIC ID of the current CPU.
1329 *
1330 * @returns the APIC ID.
1331 */
1332#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1333RT_ASM_DECL_PRAGMA_WATCOM(uint8_t) ASMGetApicId(void);
1334#else
1335DECLINLINE(uint8_t) ASMGetApicId(void)
1336{
1337 RTCCUINTREG xBX;
1338# if RT_INLINE_ASM_GNU_STYLE
1339# ifdef RT_ARCH_AMD64
1340 RTCCUINTREG uSpill;
1341 __asm__ __volatile__ ("cpuid"
1342 : "=a" (uSpill),
1343 "=b" (xBX)
1344 : "0" (1)
1345 : "rcx", "rdx");
1346# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1347 RTCCUINTREG uSpill;
1348 __asm__ __volatile__ ("mov %%ebx,%1\n\t"
1349 "cpuid\n\t"
1350 "xchgl %%ebx,%1\n\t"
1351 : "=a" (uSpill),
1352 "=rm" (xBX)
1353 : "0" (1)
1354 : "ecx", "edx");
1355# else
1356 RTCCUINTREG uSpill;
1357 __asm__ __volatile__ ("cpuid"
1358 : "=a" (uSpill),
1359 "=b" (xBX)
1360 : "0" (1)
1361 : "ecx", "edx");
1362# endif
1363
1364# elif RT_INLINE_ASM_USES_INTRIN
1365 int aInfo[4];
1366 __cpuid(aInfo, 1);
1367 xBX = aInfo[1];
1368
1369# else
1370 __asm
1371 {
1372 push ebx
1373 mov eax, 1
1374 cpuid
1375 mov [xBX], ebx
1376 pop ebx
1377 }
1378# endif
1379 return (uint8_t)(xBX >> 24);
1380}
1381#endif
1382
1383
1384/**
1385 * Tests if it a genuine Intel CPU based on the ASMCpuId(0) output.
1386 *
1387 * @returns true/false.
1388 * @param uEBX EBX return from ASMCpuId(0)
1389 * @param uECX ECX return from ASMCpuId(0)
1390 * @param uEDX EDX return from ASMCpuId(0)
1391 */
1392DECLINLINE(bool) ASMIsIntelCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1393{
1394 return uEBX == UINT32_C(0x756e6547)
1395 && uECX == UINT32_C(0x6c65746e)
1396 && uEDX == UINT32_C(0x49656e69);
1397}
1398
1399
1400/**
1401 * Tests if this is a genuine Intel CPU.
1402 *
1403 * @returns true/false.
1404 * @remarks ASSUMES that cpuid is supported by the CPU.
1405 */
1406DECLINLINE(bool) ASMIsIntelCpu(void)
1407{
1408 uint32_t uEAX, uEBX, uECX, uEDX;
1409 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1410 return ASMIsIntelCpuEx(uEBX, uECX, uEDX);
1411}
1412
1413
1414/**
1415 * Tests if it an authentic AMD CPU based on the ASMCpuId(0) output.
1416 *
1417 * @returns true/false.
1418 * @param uEBX EBX return from ASMCpuId(0)
1419 * @param uECX ECX return from ASMCpuId(0)
1420 * @param uEDX EDX return from ASMCpuId(0)
1421 */
1422DECLINLINE(bool) ASMIsAmdCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1423{
1424 return uEBX == UINT32_C(0x68747541)
1425 && uECX == UINT32_C(0x444d4163)
1426 && uEDX == UINT32_C(0x69746e65);
1427}
1428
1429
1430/**
1431 * Tests if this is an authentic AMD CPU.
1432 *
1433 * @returns true/false.
1434 * @remarks ASSUMES that cpuid is supported by the CPU.
1435 */
1436DECLINLINE(bool) ASMIsAmdCpu(void)
1437{
1438 uint32_t uEAX, uEBX, uECX, uEDX;
1439 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1440 return ASMIsAmdCpuEx(uEBX, uECX, uEDX);
1441}
1442
1443
1444/**
1445 * Tests if it a centaur hauling VIA CPU based on the ASMCpuId(0) output.
1446 *
1447 * @returns true/false.
1448 * @param uEBX EBX return from ASMCpuId(0).
1449 * @param uECX ECX return from ASMCpuId(0).
1450 * @param uEDX EDX return from ASMCpuId(0).
1451 */
1452DECLINLINE(bool) ASMIsViaCentaurCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1453{
1454 return uEBX == UINT32_C(0x746e6543)
1455 && uECX == UINT32_C(0x736c7561)
1456 && uEDX == UINT32_C(0x48727561);
1457}
1458
1459
1460/**
1461 * Tests if this is a centaur hauling VIA CPU.
1462 *
1463 * @returns true/false.
1464 * @remarks ASSUMES that cpuid is supported by the CPU.
1465 */
1466DECLINLINE(bool) ASMIsViaCentaurCpu(void)
1467{
1468 uint32_t uEAX, uEBX, uECX, uEDX;
1469 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1470 return ASMIsViaCentaurCpuEx(uEBX, uECX, uEDX);
1471}
1472
1473
1474/**
1475 * Checks whether ASMCpuId_EAX(0x00000000) indicates a valid range.
1476 *
1477 *
1478 * @returns true/false.
1479 * @param uEAX The EAX value of CPUID leaf 0x00000000.
1480 *
1481 * @note This only succeeds if there are at least two leaves in the range.
1482 * @remarks The upper range limit is just some half reasonable value we've
1483 * picked out of thin air.
1484 */
1485DECLINLINE(bool) ASMIsValidStdRange(uint32_t uEAX)
1486{
1487 return uEAX >= UINT32_C(0x00000001) && uEAX <= UINT32_C(0x000fffff);
1488}
1489
1490
1491/**
1492 * Checks whether ASMCpuId_EAX(0x80000000) indicates a valid range.
1493 *
1494 * This only succeeds if there are at least two leaves in the range.
1495 *
1496 * @returns true/false.
1497 * @param uEAX The EAX value of CPUID leaf 0x80000000.
1498 *
1499 * @note This only succeeds if there are at least two leaves in the range.
1500 * @remarks The upper range limit is just some half reasonable value we've
1501 * picked out of thin air.
1502 */
1503DECLINLINE(bool) ASMIsValidExtRange(uint32_t uEAX)
1504{
1505 return uEAX >= UINT32_C(0x80000001) && uEAX <= UINT32_C(0x800fffff);
1506}
1507
1508
1509/**
1510 * Checks whether ASMCpuId_EAX(0x40000000) indicates a valid range.
1511 *
1512 * This only succeeds if there are at least two leaves in the range.
1513 *
1514 * @returns true/false.
1515 * @param uEAX The EAX value of CPUID leaf 0x40000000.
1516 *
1517 * @note Unlike ASMIsValidStdRange() and ASMIsValidExtRange(), a single leaf
1518 * is okay here. So, you always need to check the range.
1519 * @remarks The upper range limit is take from the intel docs.
1520 */
1521DECLINLINE(bool) ASMIsValidHypervisorRange(uint32_t uEAX)
1522{
1523 return uEAX >= UINT32_C(0x40000000) && uEAX <= UINT32_C(0x4fffffff);
1524}
1525
1526
1527/**
1528 * Extracts the CPU family from ASMCpuId(1) or ASMCpuId(0x80000001)
1529 *
1530 * @returns Family.
1531 * @param uEAX EAX return from ASMCpuId(1) or ASMCpuId(0x80000001).
1532 */
1533DECLINLINE(uint32_t) ASMGetCpuFamily(uint32_t uEAX)
1534{
1535 return ((uEAX >> 8) & 0xf) == 0xf
1536 ? ((uEAX >> 20) & 0x7f) + 0xf
1537 : ((uEAX >> 8) & 0xf);
1538}
1539
1540
1541/**
1542 * Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001), Intel variant.
1543 *
1544 * @returns Model.
1545 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1546 */
1547DECLINLINE(uint32_t) ASMGetCpuModelIntel(uint32_t uEAX)
1548{
1549 return ((uEAX >> 8) & 0xf) == 0xf || (((uEAX >> 8) & 0xf) == 0x6) /* family! */
1550 ? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
1551 : ((uEAX >> 4) & 0xf);
1552}
1553
1554
1555/**
1556 * Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001), AMD variant.
1557 *
1558 * @returns Model.
1559 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1560 */
1561DECLINLINE(uint32_t) ASMGetCpuModelAMD(uint32_t uEAX)
1562{
1563 return ((uEAX >> 8) & 0xf) == 0xf
1564 ? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
1565 : ((uEAX >> 4) & 0xf);
1566}
1567
1568
1569/**
1570 * Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001)
1571 *
1572 * @returns Model.
1573 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1574 * @param fIntel Whether it's an intel CPU. Use ASMIsIntelCpuEx() or ASMIsIntelCpu().
1575 */
1576DECLINLINE(uint32_t) ASMGetCpuModel(uint32_t uEAX, bool fIntel)
1577{
1578 return ((uEAX >> 8) & 0xf) == 0xf || (((uEAX >> 8) & 0xf) == 0x6 && fIntel) /* family! */
1579 ? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
1580 : ((uEAX >> 4) & 0xf);
1581}
1582
1583
1584/**
1585 * Extracts the CPU stepping from ASMCpuId(1) or ASMCpuId(0x80000001)
1586 *
1587 * @returns Model.
1588 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1589 */
1590DECLINLINE(uint32_t) ASMGetCpuStepping(uint32_t uEAX)
1591{
1592 return uEAX & 0xf;
1593}
1594
1595
1596/**
1597 * Get cr0.
1598 * @returns cr0.
1599 */
1600#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1601RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetCR0(void);
1602#else
1603DECLINLINE(RTCCUINTXREG) ASMGetCR0(void)
1604{
1605 RTCCUINTXREG uCR0;
1606# if RT_INLINE_ASM_USES_INTRIN
1607 uCR0 = __readcr0();
1608
1609# elif RT_INLINE_ASM_GNU_STYLE
1610# ifdef RT_ARCH_AMD64
1611 __asm__ __volatile__("movq %%cr0, %0\t\n" : "=r" (uCR0));
1612# else
1613 __asm__ __volatile__("movl %%cr0, %0\t\n" : "=r" (uCR0));
1614# endif
1615# else
1616 __asm
1617 {
1618# ifdef RT_ARCH_AMD64
1619 mov rax, cr0
1620 mov [uCR0], rax
1621# else
1622 mov eax, cr0
1623 mov [uCR0], eax
1624# endif
1625 }
1626# endif
1627 return uCR0;
1628}
1629#endif
1630
1631
1632/**
1633 * Sets the CR0 register.
1634 * @param uCR0 The new CR0 value.
1635 */
1636#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1637RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetCR0(RTCCUINTXREG uCR0);
1638#else
1639DECLINLINE(void) ASMSetCR0(RTCCUINTXREG uCR0)
1640{
1641# if RT_INLINE_ASM_USES_INTRIN
1642 __writecr0(uCR0);
1643
1644# elif RT_INLINE_ASM_GNU_STYLE
1645# ifdef RT_ARCH_AMD64
1646 __asm__ __volatile__("movq %0, %%cr0\n\t" :: "r" (uCR0));
1647# else
1648 __asm__ __volatile__("movl %0, %%cr0\n\t" :: "r" (uCR0));
1649# endif
1650# else
1651 __asm
1652 {
1653# ifdef RT_ARCH_AMD64
1654 mov rax, [uCR0]
1655 mov cr0, rax
1656# else
1657 mov eax, [uCR0]
1658 mov cr0, eax
1659# endif
1660 }
1661# endif
1662}
1663#endif
1664
1665
1666/**
1667 * Get cr2.
1668 * @returns cr2.
1669 */
1670#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1671RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetCR2(void);
1672#else
1673DECLINLINE(RTCCUINTXREG) ASMGetCR2(void)
1674{
1675 RTCCUINTXREG uCR2;
1676# if RT_INLINE_ASM_USES_INTRIN
1677 uCR2 = __readcr2();
1678
1679# elif RT_INLINE_ASM_GNU_STYLE
1680# ifdef RT_ARCH_AMD64
1681 __asm__ __volatile__("movq %%cr2, %0\t\n" : "=r" (uCR2));
1682# else
1683 __asm__ __volatile__("movl %%cr2, %0\t\n" : "=r" (uCR2));
1684# endif
1685# else
1686 __asm
1687 {
1688# ifdef RT_ARCH_AMD64
1689 mov rax, cr2
1690 mov [uCR2], rax
1691# else
1692 mov eax, cr2
1693 mov [uCR2], eax
1694# endif
1695 }
1696# endif
1697 return uCR2;
1698}
1699#endif
1700
1701
1702/**
1703 * Sets the CR2 register.
1704 * @param uCR2 The new CR0 value.
1705 */
1706#if RT_INLINE_ASM_EXTERNAL
1707RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetCR2(RTCCUINTXREG uCR2);
1708#else
1709DECLINLINE(void) ASMSetCR2(RTCCUINTXREG uCR2)
1710{
1711# if RT_INLINE_ASM_GNU_STYLE
1712# ifdef RT_ARCH_AMD64
1713 __asm__ __volatile__("movq %0, %%cr2\n\t" :: "r" (uCR2));
1714# else
1715 __asm__ __volatile__("movl %0, %%cr2\n\t" :: "r" (uCR2));
1716# endif
1717# else
1718 __asm
1719 {
1720# ifdef RT_ARCH_AMD64
1721 mov rax, [uCR2]
1722 mov cr2, rax
1723# else
1724 mov eax, [uCR2]
1725 mov cr2, eax
1726# endif
1727 }
1728# endif
1729}
1730#endif
1731
1732
1733/**
1734 * Get cr3.
1735 * @returns cr3.
1736 */
1737#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1738RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetCR3(void);
1739#else
1740DECLINLINE(RTCCUINTXREG) ASMGetCR3(void)
1741{
1742 RTCCUINTXREG uCR3;
1743# if RT_INLINE_ASM_USES_INTRIN
1744 uCR3 = __readcr3();
1745
1746# elif RT_INLINE_ASM_GNU_STYLE
1747# ifdef RT_ARCH_AMD64
1748 __asm__ __volatile__("movq %%cr3, %0\t\n" : "=r" (uCR3));
1749# else
1750 __asm__ __volatile__("movl %%cr3, %0\t\n" : "=r" (uCR3));
1751# endif
1752# else
1753 __asm
1754 {
1755# ifdef RT_ARCH_AMD64
1756 mov rax, cr3
1757 mov [uCR3], rax
1758# else
1759 mov eax, cr3
1760 mov [uCR3], eax
1761# endif
1762 }
1763# endif
1764 return uCR3;
1765}
1766#endif
1767
1768
1769/**
1770 * Sets the CR3 register.
1771 *
1772 * @param uCR3 New CR3 value.
1773 */
1774#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1775RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetCR3(RTCCUINTXREG uCR3);
1776#else
1777DECLINLINE(void) ASMSetCR3(RTCCUINTXREG uCR3)
1778{
1779# if RT_INLINE_ASM_USES_INTRIN
1780 __writecr3(uCR3);
1781
1782# elif RT_INLINE_ASM_GNU_STYLE
1783# ifdef RT_ARCH_AMD64
1784 __asm__ __volatile__("movq %0, %%cr3\n\t" : : "r" (uCR3));
1785# else
1786 __asm__ __volatile__("movl %0, %%cr3\n\t" : : "r" (uCR3));
1787# endif
1788# else
1789 __asm
1790 {
1791# ifdef RT_ARCH_AMD64
1792 mov rax, [uCR3]
1793 mov cr3, rax
1794# else
1795 mov eax, [uCR3]
1796 mov cr3, eax
1797# endif
1798 }
1799# endif
1800}
1801#endif
1802
1803
1804/**
1805 * Reloads the CR3 register.
1806 */
1807#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1808RT_ASM_DECL_PRAGMA_WATCOM(void) ASMReloadCR3(void);
1809#else
1810DECLINLINE(void) ASMReloadCR3(void)
1811{
1812# if RT_INLINE_ASM_USES_INTRIN
1813 __writecr3(__readcr3());
1814
1815# elif RT_INLINE_ASM_GNU_STYLE
1816 RTCCUINTXREG u;
1817# ifdef RT_ARCH_AMD64
1818 __asm__ __volatile__("movq %%cr3, %0\n\t"
1819 "movq %0, %%cr3\n\t"
1820 : "=r" (u));
1821# else
1822 __asm__ __volatile__("movl %%cr3, %0\n\t"
1823 "movl %0, %%cr3\n\t"
1824 : "=r" (u));
1825# endif
1826# else
1827 __asm
1828 {
1829# ifdef RT_ARCH_AMD64
1830 mov rax, cr3
1831 mov cr3, rax
1832# else
1833 mov eax, cr3
1834 mov cr3, eax
1835# endif
1836 }
1837# endif
1838}
1839#endif
1840
1841
1842/**
1843 * Get cr4.
1844 * @returns cr4.
1845 */
1846#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1847RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetCR4(void);
1848#else
1849DECLINLINE(RTCCUINTXREG) ASMGetCR4(void)
1850{
1851 RTCCUINTXREG uCR4;
1852# if RT_INLINE_ASM_USES_INTRIN
1853 uCR4 = __readcr4();
1854
1855# elif RT_INLINE_ASM_GNU_STYLE
1856# ifdef RT_ARCH_AMD64
1857 __asm__ __volatile__("movq %%cr4, %0\t\n" : "=r" (uCR4));
1858# else
1859 __asm__ __volatile__("movl %%cr4, %0\t\n" : "=r" (uCR4));
1860# endif
1861# else
1862 __asm
1863 {
1864# ifdef RT_ARCH_AMD64
1865 mov rax, cr4
1866 mov [uCR4], rax
1867# else
1868 push eax /* just in case */
1869 /*mov eax, cr4*/
1870 _emit 0x0f
1871 _emit 0x20
1872 _emit 0xe0
1873 mov [uCR4], eax
1874 pop eax
1875# endif
1876 }
1877# endif
1878 return uCR4;
1879}
1880#endif
1881
1882
1883/**
1884 * Sets the CR4 register.
1885 *
1886 * @param uCR4 New CR4 value.
1887 */
1888#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1889RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetCR4(RTCCUINTXREG uCR4);
1890#else
1891DECLINLINE(void) ASMSetCR4(RTCCUINTXREG uCR4)
1892{
1893# if RT_INLINE_ASM_USES_INTRIN
1894 __writecr4(uCR4);
1895
1896# elif RT_INLINE_ASM_GNU_STYLE
1897# ifdef RT_ARCH_AMD64
1898 __asm__ __volatile__("movq %0, %%cr4\n\t" : : "r" (uCR4));
1899# else
1900 __asm__ __volatile__("movl %0, %%cr4\n\t" : : "r" (uCR4));
1901# endif
1902# else
1903 __asm
1904 {
1905# ifdef RT_ARCH_AMD64
1906 mov rax, [uCR4]
1907 mov cr4, rax
1908# else
1909 mov eax, [uCR4]
1910 _emit 0x0F
1911 _emit 0x22
1912 _emit 0xE0 /* mov cr4, eax */
1913# endif
1914 }
1915# endif
1916}
1917#endif
1918
1919
1920/**
1921 * Get cr8.
1922 * @returns cr8.
1923 * @remark The lock prefix hack for access from non-64-bit modes is NOT used and 0 is returned.
1924 */
1925#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1926DECLASM(RTCCUINTXREG) ASMGetCR8(void);
1927#else
1928DECLINLINE(RTCCUINTXREG) ASMGetCR8(void)
1929{
1930# ifdef RT_ARCH_AMD64
1931 RTCCUINTXREG uCR8;
1932# if RT_INLINE_ASM_USES_INTRIN
1933 uCR8 = __readcr8();
1934
1935# elif RT_INLINE_ASM_GNU_STYLE
1936 __asm__ __volatile__("movq %%cr8, %0\t\n" : "=r" (uCR8));
1937# else
1938 __asm
1939 {
1940 mov rax, cr8
1941 mov [uCR8], rax
1942 }
1943# endif
1944 return uCR8;
1945# else /* !RT_ARCH_AMD64 */
1946 return 0;
1947# endif /* !RT_ARCH_AMD64 */
1948}
1949#endif
1950
1951
1952/**
1953 * Get XCR0 (eXtended feature Control Register 0).
1954 * @returns xcr0.
1955 */
1956DECLASM(uint64_t) ASMGetXcr0(void);
1957
1958/**
1959 * Sets the XCR0 register.
1960 * @param uXcr0 The new XCR0 value.
1961 */
1962DECLASM(void) ASMSetXcr0(uint64_t uXcr0);
1963
1964struct X86XSAVEAREA;
1965/**
1966 * Save extended CPU state.
1967 * @param pXStateArea Where to save the state.
1968 * @param fComponents Which state components to save.
1969 */
1970DECLASM(void) ASMXSave(struct X86XSAVEAREA RT_FAR *pXStateArea, uint64_t fComponents);
1971
1972/**
1973 * Loads extended CPU state.
1974 * @param pXStateArea Where to load the state from.
1975 * @param fComponents Which state components to load.
1976 */
1977DECLASM(void) ASMXRstor(struct X86XSAVEAREA const RT_FAR *pXStateArea, uint64_t fComponents);
1978
1979
1980struct X86FXSTATE;
1981/**
1982 * Save FPU and SSE CPU state.
1983 * @param pXStateArea Where to save the state.
1984 */
1985DECLASM(void) ASMFxSave(struct X86FXSTATE RT_FAR *pXStateArea);
1986
1987/**
1988 * Load FPU and SSE CPU state.
1989 * @param pXStateArea Where to load the state from.
1990 */
1991DECLASM(void) ASMFxRstor(struct X86FXSTATE const RT_FAR *pXStateArea);
1992
1993
1994/**
1995 * Enables interrupts (EFLAGS.IF).
1996 */
1997#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1998RT_ASM_DECL_PRAGMA_WATCOM(void) ASMIntEnable(void);
1999#else
2000DECLINLINE(void) ASMIntEnable(void)
2001{
2002# if RT_INLINE_ASM_GNU_STYLE
2003 __asm("sti\n");
2004# elif RT_INLINE_ASM_USES_INTRIN
2005 _enable();
2006# else
2007 __asm sti
2008# endif
2009}
2010#endif
2011
2012
2013/**
2014 * Disables interrupts (!EFLAGS.IF).
2015 */
2016#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2017RT_ASM_DECL_PRAGMA_WATCOM(void) ASMIntDisable(void);
2018#else
2019DECLINLINE(void) ASMIntDisable(void)
2020{
2021# if RT_INLINE_ASM_GNU_STYLE
2022 __asm("cli\n");
2023# elif RT_INLINE_ASM_USES_INTRIN
2024 _disable();
2025# else
2026 __asm cli
2027# endif
2028}
2029#endif
2030
2031
2032/**
2033 * Disables interrupts and returns previous xFLAGS.
2034 */
2035#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2036RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTREG) ASMIntDisableFlags(void);
2037#else
2038DECLINLINE(RTCCUINTREG) ASMIntDisableFlags(void)
2039{
2040 RTCCUINTREG xFlags;
2041# if RT_INLINE_ASM_GNU_STYLE
2042# ifdef RT_ARCH_AMD64
2043 __asm__ __volatile__("pushfq\n\t"
2044 "cli\n\t"
2045 "popq %0\n\t"
2046 : "=r" (xFlags));
2047# else
2048 __asm__ __volatile__("pushfl\n\t"
2049 "cli\n\t"
2050 "popl %0\n\t"
2051 : "=r" (xFlags));
2052# endif
2053# elif RT_INLINE_ASM_USES_INTRIN && !defined(RT_ARCH_X86)
2054 xFlags = ASMGetFlags();
2055 _disable();
2056# else
2057 __asm {
2058 pushfd
2059 cli
2060 pop [xFlags]
2061 }
2062# endif
2063 return xFlags;
2064}
2065#endif
2066
2067
2068/**
2069 * Are interrupts enabled?
2070 *
2071 * @returns true / false.
2072 */
2073DECLINLINE(bool) ASMIntAreEnabled(void)
2074{
2075 RTCCUINTREG uFlags = ASMGetFlags();
2076 return uFlags & 0x200 /* X86_EFL_IF */ ? true : false;
2077}
2078
2079
2080/**
2081 * Halts the CPU until interrupted.
2082 */
2083#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 14
2084RT_ASM_DECL_PRAGMA_WATCOM(void) ASMHalt(void);
2085#else
2086DECLINLINE(void) ASMHalt(void)
2087{
2088# if RT_INLINE_ASM_GNU_STYLE
2089 __asm__ __volatile__("hlt\n\t");
2090# elif RT_INLINE_ASM_USES_INTRIN
2091 __halt();
2092# else
2093 __asm {
2094 hlt
2095 }
2096# endif
2097}
2098#endif
2099
2100
2101/**
2102 * Reads a machine specific register.
2103 *
2104 * @returns Register content.
2105 * @param uRegister Register to read.
2106 */
2107#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2108RT_ASM_DECL_PRAGMA_WATCOM(uint64_t) ASMRdMsr(uint32_t uRegister);
2109#else
2110DECLINLINE(uint64_t) ASMRdMsr(uint32_t uRegister)
2111{
2112 RTUINT64U u;
2113# if RT_INLINE_ASM_GNU_STYLE
2114 __asm__ __volatile__("rdmsr\n\t"
2115 : "=a" (u.s.Lo),
2116 "=d" (u.s.Hi)
2117 : "c" (uRegister));
2118
2119# elif RT_INLINE_ASM_USES_INTRIN
2120 u.u = __readmsr(uRegister);
2121
2122# else
2123 __asm
2124 {
2125 mov ecx, [uRegister]
2126 rdmsr
2127 mov [u.s.Lo], eax
2128 mov [u.s.Hi], edx
2129 }
2130# endif
2131
2132 return u.u;
2133}
2134#endif
2135
2136
2137/**
2138 * Writes a machine specific register.
2139 *
2140 * @returns Register content.
2141 * @param uRegister Register to write to.
2142 * @param u64Val Value to write.
2143 */
2144#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2145RT_ASM_DECL_PRAGMA_WATCOM(void) ASMWrMsr(uint32_t uRegister, uint64_t u64Val);
2146#else
2147DECLINLINE(void) ASMWrMsr(uint32_t uRegister, uint64_t u64Val)
2148{
2149 RTUINT64U u;
2150
2151 u.u = u64Val;
2152# if RT_INLINE_ASM_GNU_STYLE
2153 __asm__ __volatile__("wrmsr\n\t"
2154 ::"a" (u.s.Lo),
2155 "d" (u.s.Hi),
2156 "c" (uRegister));
2157
2158# elif RT_INLINE_ASM_USES_INTRIN
2159 __writemsr(uRegister, u.u);
2160
2161# else
2162 __asm
2163 {
2164 mov ecx, [uRegister]
2165 mov edx, [u.s.Hi]
2166 mov eax, [u.s.Lo]
2167 wrmsr
2168 }
2169# endif
2170}
2171#endif
2172
2173
2174/**
2175 * Reads a machine specific register, extended version (for AMD).
2176 *
2177 * @returns Register content.
2178 * @param uRegister Register to read.
2179 * @param uXDI RDI/EDI value.
2180 */
2181#if RT_INLINE_ASM_EXTERNAL
2182RT_ASM_DECL_PRAGMA_WATCOM(uint64_t) ASMRdMsrEx(uint32_t uRegister, RTCCUINTXREG uXDI);
2183#else
2184DECLINLINE(uint64_t) ASMRdMsrEx(uint32_t uRegister, RTCCUINTXREG uXDI)
2185{
2186 RTUINT64U u;
2187# if RT_INLINE_ASM_GNU_STYLE
2188 __asm__ __volatile__("rdmsr\n\t"
2189 : "=a" (u.s.Lo),
2190 "=d" (u.s.Hi)
2191 : "c" (uRegister),
2192 "D" (uXDI));
2193
2194# else
2195 __asm
2196 {
2197 mov ecx, [uRegister]
2198 xchg edi, [uXDI]
2199 rdmsr
2200 mov [u.s.Lo], eax
2201 mov [u.s.Hi], edx
2202 xchg edi, [uXDI]
2203 }
2204# endif
2205
2206 return u.u;
2207}
2208#endif
2209
2210
2211/**
2212 * Writes a machine specific register, extended version (for AMD).
2213 *
2214 * @returns Register content.
2215 * @param uRegister Register to write to.
2216 * @param uXDI RDI/EDI value.
2217 * @param u64Val Value to write.
2218 */
2219#if RT_INLINE_ASM_EXTERNAL
2220RT_ASM_DECL_PRAGMA_WATCOM(void) ASMWrMsrEx(uint32_t uRegister, RTCCUINTXREG uXDI, uint64_t u64Val);
2221#else
2222DECLINLINE(void) ASMWrMsrEx(uint32_t uRegister, RTCCUINTXREG uXDI, uint64_t u64Val)
2223{
2224 RTUINT64U u;
2225
2226 u.u = u64Val;
2227# if RT_INLINE_ASM_GNU_STYLE
2228 __asm__ __volatile__("wrmsr\n\t"
2229 ::"a" (u.s.Lo),
2230 "d" (u.s.Hi),
2231 "c" (uRegister),
2232 "D" (uXDI));
2233
2234# else
2235 __asm
2236 {
2237 mov ecx, [uRegister]
2238 xchg edi, [uXDI]
2239 mov edx, [u.s.Hi]
2240 mov eax, [u.s.Lo]
2241 wrmsr
2242 xchg edi, [uXDI]
2243 }
2244# endif
2245}
2246#endif
2247
2248
2249
2250/**
2251 * Reads low part of a machine specific register.
2252 *
2253 * @returns Register content.
2254 * @param uRegister Register to read.
2255 */
2256#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2257RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMRdMsr_Low(uint32_t uRegister);
2258#else
2259DECLINLINE(uint32_t) ASMRdMsr_Low(uint32_t uRegister)
2260{
2261 uint32_t u32;
2262# if RT_INLINE_ASM_GNU_STYLE
2263 __asm__ __volatile__("rdmsr\n\t"
2264 : "=a" (u32)
2265 : "c" (uRegister)
2266 : "edx");
2267
2268# elif RT_INLINE_ASM_USES_INTRIN
2269 u32 = (uint32_t)__readmsr(uRegister);
2270
2271#else
2272 __asm
2273 {
2274 mov ecx, [uRegister]
2275 rdmsr
2276 mov [u32], eax
2277 }
2278# endif
2279
2280 return u32;
2281}
2282#endif
2283
2284
2285/**
2286 * Reads high part of a machine specific register.
2287 *
2288 * @returns Register content.
2289 * @param uRegister Register to read.
2290 */
2291#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2292RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMRdMsr_High(uint32_t uRegister);
2293#else
2294DECLINLINE(uint32_t) ASMRdMsr_High(uint32_t uRegister)
2295{
2296 uint32_t u32;
2297# if RT_INLINE_ASM_GNU_STYLE
2298 __asm__ __volatile__("rdmsr\n\t"
2299 : "=d" (u32)
2300 : "c" (uRegister)
2301 : "eax");
2302
2303# elif RT_INLINE_ASM_USES_INTRIN
2304 u32 = (uint32_t)(__readmsr(uRegister) >> 32);
2305
2306# else
2307 __asm
2308 {
2309 mov ecx, [uRegister]
2310 rdmsr
2311 mov [u32], edx
2312 }
2313# endif
2314
2315 return u32;
2316}
2317#endif
2318
2319
2320/**
2321 * Gets dr0.
2322 *
2323 * @returns dr0.
2324 */
2325#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2326RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetDR0(void);
2327#else
2328DECLINLINE(RTCCUINTXREG) ASMGetDR0(void)
2329{
2330 RTCCUINTXREG uDR0;
2331# if RT_INLINE_ASM_USES_INTRIN
2332 uDR0 = __readdr(0);
2333# elif RT_INLINE_ASM_GNU_STYLE
2334# ifdef RT_ARCH_AMD64
2335 __asm__ __volatile__("movq %%dr0, %0\n\t" : "=r" (uDR0));
2336# else
2337 __asm__ __volatile__("movl %%dr0, %0\n\t" : "=r" (uDR0));
2338# endif
2339# else
2340 __asm
2341 {
2342# ifdef RT_ARCH_AMD64
2343 mov rax, dr0
2344 mov [uDR0], rax
2345# else
2346 mov eax, dr0
2347 mov [uDR0], eax
2348# endif
2349 }
2350# endif
2351 return uDR0;
2352}
2353#endif
2354
2355
2356/**
2357 * Gets dr1.
2358 *
2359 * @returns dr1.
2360 */
2361#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2362RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetDR1(void);
2363#else
2364DECLINLINE(RTCCUINTXREG) ASMGetDR1(void)
2365{
2366 RTCCUINTXREG uDR1;
2367# if RT_INLINE_ASM_USES_INTRIN
2368 uDR1 = __readdr(1);
2369# elif RT_INLINE_ASM_GNU_STYLE
2370# ifdef RT_ARCH_AMD64
2371 __asm__ __volatile__("movq %%dr1, %0\n\t" : "=r" (uDR1));
2372# else
2373 __asm__ __volatile__("movl %%dr1, %0\n\t" : "=r" (uDR1));
2374# endif
2375# else
2376 __asm
2377 {
2378# ifdef RT_ARCH_AMD64
2379 mov rax, dr1
2380 mov [uDR1], rax
2381# else
2382 mov eax, dr1
2383 mov [uDR1], eax
2384# endif
2385 }
2386# endif
2387 return uDR1;
2388}
2389#endif
2390
2391
2392/**
2393 * Gets dr2.
2394 *
2395 * @returns dr2.
2396 */
2397#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2398RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetDR2(void);
2399#else
2400DECLINLINE(RTCCUINTXREG) ASMGetDR2(void)
2401{
2402 RTCCUINTXREG uDR2;
2403# if RT_INLINE_ASM_USES_INTRIN
2404 uDR2 = __readdr(2);
2405# elif RT_INLINE_ASM_GNU_STYLE
2406# ifdef RT_ARCH_AMD64
2407 __asm__ __volatile__("movq %%dr2, %0\n\t" : "=r" (uDR2));
2408# else
2409 __asm__ __volatile__("movl %%dr2, %0\n\t" : "=r" (uDR2));
2410# endif
2411# else
2412 __asm
2413 {
2414# ifdef RT_ARCH_AMD64
2415 mov rax, dr2
2416 mov [uDR2], rax
2417# else
2418 mov eax, dr2
2419 mov [uDR2], eax
2420# endif
2421 }
2422# endif
2423 return uDR2;
2424}
2425#endif
2426
2427
2428/**
2429 * Gets dr3.
2430 *
2431 * @returns dr3.
2432 */
2433#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2434RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetDR3(void);
2435#else
2436DECLINLINE(RTCCUINTXREG) ASMGetDR3(void)
2437{
2438 RTCCUINTXREG uDR3;
2439# if RT_INLINE_ASM_USES_INTRIN
2440 uDR3 = __readdr(3);
2441# elif RT_INLINE_ASM_GNU_STYLE
2442# ifdef RT_ARCH_AMD64
2443 __asm__ __volatile__("movq %%dr3, %0\n\t" : "=r" (uDR3));
2444# else
2445 __asm__ __volatile__("movl %%dr3, %0\n\t" : "=r" (uDR3));
2446# endif
2447# else
2448 __asm
2449 {
2450# ifdef RT_ARCH_AMD64
2451 mov rax, dr3
2452 mov [uDR3], rax
2453# else
2454 mov eax, dr3
2455 mov [uDR3], eax
2456# endif
2457 }
2458# endif
2459 return uDR3;
2460}
2461#endif
2462
2463
2464/**
2465 * Gets dr6.
2466 *
2467 * @returns dr6.
2468 */
2469#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2470RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetDR6(void);
2471#else
2472DECLINLINE(RTCCUINTXREG) ASMGetDR6(void)
2473{
2474 RTCCUINTXREG uDR6;
2475# if RT_INLINE_ASM_USES_INTRIN
2476 uDR6 = __readdr(6);
2477# elif RT_INLINE_ASM_GNU_STYLE
2478# ifdef RT_ARCH_AMD64
2479 __asm__ __volatile__("movq %%dr6, %0\n\t" : "=r" (uDR6));
2480# else
2481 __asm__ __volatile__("movl %%dr6, %0\n\t" : "=r" (uDR6));
2482# endif
2483# else
2484 __asm
2485 {
2486# ifdef RT_ARCH_AMD64
2487 mov rax, dr6
2488 mov [uDR6], rax
2489# else
2490 mov eax, dr6
2491 mov [uDR6], eax
2492# endif
2493 }
2494# endif
2495 return uDR6;
2496}
2497#endif
2498
2499
2500/**
2501 * Reads and clears DR6.
2502 *
2503 * @returns DR6.
2504 */
2505#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2506RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetAndClearDR6(void);
2507#else
2508DECLINLINE(RTCCUINTXREG) ASMGetAndClearDR6(void)
2509{
2510 RTCCUINTXREG uDR6;
2511# if RT_INLINE_ASM_USES_INTRIN
2512 uDR6 = __readdr(6);
2513 __writedr(6, 0xffff0ff0U); /* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
2514# elif RT_INLINE_ASM_GNU_STYLE
2515 RTCCUINTXREG uNewValue = 0xffff0ff0U;/* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
2516# ifdef RT_ARCH_AMD64
2517 __asm__ __volatile__("movq %%dr6, %0\n\t"
2518 "movq %1, %%dr6\n\t"
2519 : "=r" (uDR6)
2520 : "r" (uNewValue));
2521# else
2522 __asm__ __volatile__("movl %%dr6, %0\n\t"
2523 "movl %1, %%dr6\n\t"
2524 : "=r" (uDR6)
2525 : "r" (uNewValue));
2526# endif
2527# else
2528 __asm
2529 {
2530# ifdef RT_ARCH_AMD64
2531 mov rax, dr6
2532 mov [uDR6], rax
2533 mov rcx, rax
2534 mov ecx, 0ffff0ff0h; /* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
2535 mov dr6, rcx
2536# else
2537 mov eax, dr6
2538 mov [uDR6], eax
2539 mov ecx, 0ffff0ff0h; /* 31-16 and 4-11 are 1's, 12 is zero. */
2540 mov dr6, ecx
2541# endif
2542 }
2543# endif
2544 return uDR6;
2545}
2546#endif
2547
2548
2549/**
2550 * Gets dr7.
2551 *
2552 * @returns dr7.
2553 */
2554#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2555RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetDR7(void);
2556#else
2557DECLINLINE(RTCCUINTXREG) ASMGetDR7(void)
2558{
2559 RTCCUINTXREG uDR7;
2560# if RT_INLINE_ASM_USES_INTRIN
2561 uDR7 = __readdr(7);
2562# elif RT_INLINE_ASM_GNU_STYLE
2563# ifdef RT_ARCH_AMD64
2564 __asm__ __volatile__("movq %%dr7, %0\n\t" : "=r" (uDR7));
2565# else
2566 __asm__ __volatile__("movl %%dr7, %0\n\t" : "=r" (uDR7));
2567# endif
2568# else
2569 __asm
2570 {
2571# ifdef RT_ARCH_AMD64
2572 mov rax, dr7
2573 mov [uDR7], rax
2574# else
2575 mov eax, dr7
2576 mov [uDR7], eax
2577# endif
2578 }
2579# endif
2580 return uDR7;
2581}
2582#endif
2583
2584
2585/**
2586 * Sets dr0.
2587 *
2588 * @param uDRVal Debug register value to write
2589 */
2590#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2591RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetDR0(RTCCUINTXREG uDRVal);
2592#else
2593DECLINLINE(void) ASMSetDR0(RTCCUINTXREG uDRVal)
2594{
2595# if RT_INLINE_ASM_USES_INTRIN
2596 __writedr(0, uDRVal);
2597# elif RT_INLINE_ASM_GNU_STYLE
2598# ifdef RT_ARCH_AMD64
2599 __asm__ __volatile__("movq %0, %%dr0\n\t" : : "r" (uDRVal));
2600# else
2601 __asm__ __volatile__("movl %0, %%dr0\n\t" : : "r" (uDRVal));
2602# endif
2603# else
2604 __asm
2605 {
2606# ifdef RT_ARCH_AMD64
2607 mov rax, [uDRVal]
2608 mov dr0, rax
2609# else
2610 mov eax, [uDRVal]
2611 mov dr0, eax
2612# endif
2613 }
2614# endif
2615}
2616#endif
2617
2618
2619/**
2620 * Sets dr1.
2621 *
2622 * @param uDRVal Debug register value to write
2623 */
2624#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2625RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetDR1(RTCCUINTXREG uDRVal);
2626#else
2627DECLINLINE(void) ASMSetDR1(RTCCUINTXREG uDRVal)
2628{
2629# if RT_INLINE_ASM_USES_INTRIN
2630 __writedr(1, uDRVal);
2631# elif RT_INLINE_ASM_GNU_STYLE
2632# ifdef RT_ARCH_AMD64
2633 __asm__ __volatile__("movq %0, %%dr1\n\t" : : "r" (uDRVal));
2634# else
2635 __asm__ __volatile__("movl %0, %%dr1\n\t" : : "r" (uDRVal));
2636# endif
2637# else
2638 __asm
2639 {
2640# ifdef RT_ARCH_AMD64
2641 mov rax, [uDRVal]
2642 mov dr1, rax
2643# else
2644 mov eax, [uDRVal]
2645 mov dr1, eax
2646# endif
2647 }
2648# endif
2649}
2650#endif
2651
2652
2653/**
2654 * Sets dr2.
2655 *
2656 * @param uDRVal Debug register value to write
2657 */
2658#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2659RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetDR2(RTCCUINTXREG uDRVal);
2660#else
2661DECLINLINE(void) ASMSetDR2(RTCCUINTXREG uDRVal)
2662{
2663# if RT_INLINE_ASM_USES_INTRIN
2664 __writedr(2, uDRVal);
2665# elif RT_INLINE_ASM_GNU_STYLE
2666# ifdef RT_ARCH_AMD64
2667 __asm__ __volatile__("movq %0, %%dr2\n\t" : : "r" (uDRVal));
2668# else
2669 __asm__ __volatile__("movl %0, %%dr2\n\t" : : "r" (uDRVal));
2670# endif
2671# else
2672 __asm
2673 {
2674# ifdef RT_ARCH_AMD64
2675 mov rax, [uDRVal]
2676 mov dr2, rax
2677# else
2678 mov eax, [uDRVal]
2679 mov dr2, eax
2680# endif
2681 }
2682# endif
2683}
2684#endif
2685
2686
2687/**
2688 * Sets dr3.
2689 *
2690 * @param uDRVal Debug register value to write
2691 */
2692#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2693RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetDR3(RTCCUINTXREG uDRVal);
2694#else
2695DECLINLINE(void) ASMSetDR3(RTCCUINTXREG uDRVal)
2696{
2697# if RT_INLINE_ASM_USES_INTRIN
2698 __writedr(3, uDRVal);
2699# elif RT_INLINE_ASM_GNU_STYLE
2700# ifdef RT_ARCH_AMD64
2701 __asm__ __volatile__("movq %0, %%dr3\n\t" : : "r" (uDRVal));
2702# else
2703 __asm__ __volatile__("movl %0, %%dr3\n\t" : : "r" (uDRVal));
2704# endif
2705# else
2706 __asm
2707 {
2708# ifdef RT_ARCH_AMD64
2709 mov rax, [uDRVal]
2710 mov dr3, rax
2711# else
2712 mov eax, [uDRVal]
2713 mov dr3, eax
2714# endif
2715 }
2716# endif
2717}
2718#endif
2719
2720
2721/**
2722 * Sets dr6.
2723 *
2724 * @param uDRVal Debug register value to write
2725 */
2726#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2727RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetDR6(RTCCUINTXREG uDRVal);
2728#else
2729DECLINLINE(void) ASMSetDR6(RTCCUINTXREG uDRVal)
2730{
2731# if RT_INLINE_ASM_USES_INTRIN
2732 __writedr(6, uDRVal);
2733# elif RT_INLINE_ASM_GNU_STYLE
2734# ifdef RT_ARCH_AMD64
2735 __asm__ __volatile__("movq %0, %%dr6\n\t" : : "r" (uDRVal));
2736# else
2737 __asm__ __volatile__("movl %0, %%dr6\n\t" : : "r" (uDRVal));
2738# endif
2739# else
2740 __asm
2741 {
2742# ifdef RT_ARCH_AMD64
2743 mov rax, [uDRVal]
2744 mov dr6, rax
2745# else
2746 mov eax, [uDRVal]
2747 mov dr6, eax
2748# endif
2749 }
2750# endif
2751}
2752#endif
2753
2754
2755/**
2756 * Sets dr7.
2757 *
2758 * @param uDRVal Debug register value to write
2759 */
2760#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2761RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetDR7(RTCCUINTXREG uDRVal);
2762#else
2763DECLINLINE(void) ASMSetDR7(RTCCUINTXREG uDRVal)
2764{
2765# if RT_INLINE_ASM_USES_INTRIN
2766 __writedr(7, uDRVal);
2767# elif RT_INLINE_ASM_GNU_STYLE
2768# ifdef RT_ARCH_AMD64
2769 __asm__ __volatile__("movq %0, %%dr7\n\t" : : "r" (uDRVal));
2770# else
2771 __asm__ __volatile__("movl %0, %%dr7\n\t" : : "r" (uDRVal));
2772# endif
2773# else
2774 __asm
2775 {
2776# ifdef RT_ARCH_AMD64
2777 mov rax, [uDRVal]
2778 mov dr7, rax
2779# else
2780 mov eax, [uDRVal]
2781 mov dr7, eax
2782# endif
2783 }
2784# endif
2785}
2786#endif
2787
2788
2789/**
2790 * Writes a 8-bit unsigned integer to an I/O port, ordered.
2791 *
2792 * @param Port I/O port to write to.
2793 * @param u8 8-bit integer to write.
2794 */
2795#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2796RT_ASM_DECL_PRAGMA_WATCOM(void) ASMOutU8(RTIOPORT Port, uint8_t u8);
2797#else
2798DECLINLINE(void) ASMOutU8(RTIOPORT Port, uint8_t u8)
2799{
2800# if RT_INLINE_ASM_GNU_STYLE
2801 __asm__ __volatile__("outb %b1, %w0\n\t"
2802 :: "Nd" (Port),
2803 "a" (u8));
2804
2805# elif RT_INLINE_ASM_USES_INTRIN
2806 __outbyte(Port, u8);
2807
2808# else
2809 __asm
2810 {
2811 mov dx, [Port]
2812 mov al, [u8]
2813 out dx, al
2814 }
2815# endif
2816}
2817#endif
2818
2819
2820/**
2821 * Reads a 8-bit unsigned integer from an I/O port, ordered.
2822 *
2823 * @returns 8-bit integer.
2824 * @param Port I/O port to read from.
2825 */
2826#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2827RT_ASM_DECL_PRAGMA_WATCOM(uint8_t) ASMInU8(RTIOPORT Port);
2828#else
2829DECLINLINE(uint8_t) ASMInU8(RTIOPORT Port)
2830{
2831 uint8_t u8;
2832# if RT_INLINE_ASM_GNU_STYLE
2833 __asm__ __volatile__("inb %w1, %b0\n\t"
2834 : "=a" (u8)
2835 : "Nd" (Port));
2836
2837# elif RT_INLINE_ASM_USES_INTRIN
2838 u8 = __inbyte(Port);
2839
2840# else
2841 __asm
2842 {
2843 mov dx, [Port]
2844 in al, dx
2845 mov [u8], al
2846 }
2847# endif
2848 return u8;
2849}
2850#endif
2851
2852
2853/**
2854 * Writes a 16-bit unsigned integer to an I/O port, ordered.
2855 *
2856 * @param Port I/O port to write to.
2857 * @param u16 16-bit integer to write.
2858 */
2859#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2860RT_ASM_DECL_PRAGMA_WATCOM(void) ASMOutU16(RTIOPORT Port, uint16_t u16);
2861#else
2862DECLINLINE(void) ASMOutU16(RTIOPORT Port, uint16_t u16)
2863{
2864# if RT_INLINE_ASM_GNU_STYLE
2865 __asm__ __volatile__("outw %w1, %w0\n\t"
2866 :: "Nd" (Port),
2867 "a" (u16));
2868
2869# elif RT_INLINE_ASM_USES_INTRIN
2870 __outword(Port, u16);
2871
2872# else
2873 __asm
2874 {
2875 mov dx, [Port]
2876 mov ax, [u16]
2877 out dx, ax
2878 }
2879# endif
2880}
2881#endif
2882
2883
2884/**
2885 * Reads a 16-bit unsigned integer from an I/O port, ordered.
2886 *
2887 * @returns 16-bit integer.
2888 * @param Port I/O port to read from.
2889 */
2890#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2891RT_ASM_DECL_PRAGMA_WATCOM(uint16_t) ASMInU16(RTIOPORT Port);
2892#else
2893DECLINLINE(uint16_t) ASMInU16(RTIOPORT Port)
2894{
2895 uint16_t u16;
2896# if RT_INLINE_ASM_GNU_STYLE
2897 __asm__ __volatile__("inw %w1, %w0\n\t"
2898 : "=a" (u16)
2899 : "Nd" (Port));
2900
2901# elif RT_INLINE_ASM_USES_INTRIN
2902 u16 = __inword(Port);
2903
2904# else
2905 __asm
2906 {
2907 mov dx, [Port]
2908 in ax, dx
2909 mov [u16], ax
2910 }
2911# endif
2912 return u16;
2913}
2914#endif
2915
2916
2917/**
2918 * Writes a 32-bit unsigned integer to an I/O port, ordered.
2919 *
2920 * @param Port I/O port to write to.
2921 * @param u32 32-bit integer to write.
2922 */
2923#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2924RT_ASM_DECL_PRAGMA_WATCOM(void) ASMOutU32(RTIOPORT Port, uint32_t u32);
2925#else
2926DECLINLINE(void) ASMOutU32(RTIOPORT Port, uint32_t u32)
2927{
2928# if RT_INLINE_ASM_GNU_STYLE
2929 __asm__ __volatile__("outl %1, %w0\n\t"
2930 :: "Nd" (Port),
2931 "a" (u32));
2932
2933# elif RT_INLINE_ASM_USES_INTRIN
2934 __outdword(Port, u32);
2935
2936# else
2937 __asm
2938 {
2939 mov dx, [Port]
2940 mov eax, [u32]
2941 out dx, eax
2942 }
2943# endif
2944}
2945#endif
2946
2947
2948/**
2949 * Reads a 32-bit unsigned integer from an I/O port, ordered.
2950 *
2951 * @returns 32-bit integer.
2952 * @param Port I/O port to read from.
2953 */
2954#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2955RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMInU32(RTIOPORT Port);
2956#else
2957DECLINLINE(uint32_t) ASMInU32(RTIOPORT Port)
2958{
2959 uint32_t u32;
2960# if RT_INLINE_ASM_GNU_STYLE
2961 __asm__ __volatile__("inl %w1, %0\n\t"
2962 : "=a" (u32)
2963 : "Nd" (Port));
2964
2965# elif RT_INLINE_ASM_USES_INTRIN
2966 u32 = __indword(Port);
2967
2968# else
2969 __asm
2970 {
2971 mov dx, [Port]
2972 in eax, dx
2973 mov [u32], eax
2974 }
2975# endif
2976 return u32;
2977}
2978#endif
2979
2980
2981/**
2982 * Writes a string of 8-bit unsigned integer items to an I/O port, ordered.
2983 *
2984 * @param Port I/O port to write to.
2985 * @param pau8 Pointer to the string buffer.
2986 * @param c The number of items to write.
2987 */
2988#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2989RT_ASM_DECL_PRAGMA_WATCOM(void) ASMOutStrU8(RTIOPORT Port, uint8_t const RT_FAR *pau8, size_t c);
2990#else
2991DECLINLINE(void) ASMOutStrU8(RTIOPORT Port, uint8_t const RT_FAR *pau8, size_t c)
2992{
2993# if RT_INLINE_ASM_GNU_STYLE
2994 __asm__ __volatile__("rep; outsb\n\t"
2995 : "+S" (pau8),
2996 "+c" (c)
2997 : "d" (Port));
2998
2999# elif RT_INLINE_ASM_USES_INTRIN
3000 __outbytestring(Port, (unsigned char RT_FAR *)pau8, (unsigned long)c);
3001
3002# else
3003 __asm
3004 {
3005 mov dx, [Port]
3006 mov ecx, [c]
3007 mov eax, [pau8]
3008 xchg esi, eax
3009 rep outsb
3010 xchg esi, eax
3011 }
3012# endif
3013}
3014#endif
3015
3016
3017/**
3018 * Reads a string of 8-bit unsigned integer items from an I/O port, ordered.
3019 *
3020 * @param Port I/O port to read from.
3021 * @param pau8 Pointer to the string buffer (output).
3022 * @param c The number of items to read.
3023 */
3024#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3025RT_ASM_DECL_PRAGMA_WATCOM(void) ASMInStrU8(RTIOPORT Port, uint8_t RT_FAR *pau8, size_t c);
3026#else
3027DECLINLINE(void) ASMInStrU8(RTIOPORT Port, uint8_t RT_FAR *pau8, size_t c)
3028{
3029# if RT_INLINE_ASM_GNU_STYLE
3030 __asm__ __volatile__("rep; insb\n\t"
3031 : "+D" (pau8),
3032 "+c" (c)
3033 : "d" (Port));
3034
3035# elif RT_INLINE_ASM_USES_INTRIN
3036 __inbytestring(Port, pau8, (unsigned long)c);
3037
3038# else
3039 __asm
3040 {
3041 mov dx, [Port]
3042 mov ecx, [c]
3043 mov eax, [pau8]
3044 xchg edi, eax
3045 rep insb
3046 xchg edi, eax
3047 }
3048# endif
3049}
3050#endif
3051
3052
3053/**
3054 * Writes a string of 16-bit unsigned integer items to an I/O port, ordered.
3055 *
3056 * @param Port I/O port to write to.
3057 * @param pau16 Pointer to the string buffer.
3058 * @param c The number of items to write.
3059 */
3060#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3061RT_ASM_DECL_PRAGMA_WATCOM(void) ASMOutStrU16(RTIOPORT Port, uint16_t const RT_FAR *pau16, size_t c);
3062#else
3063DECLINLINE(void) ASMOutStrU16(RTIOPORT Port, uint16_t const RT_FAR *pau16, size_t c)
3064{
3065# if RT_INLINE_ASM_GNU_STYLE
3066 __asm__ __volatile__("rep; outsw\n\t"
3067 : "+S" (pau16),
3068 "+c" (c)
3069 : "d" (Port));
3070
3071# elif RT_INLINE_ASM_USES_INTRIN
3072 __outwordstring(Port, (unsigned short RT_FAR *)pau16, (unsigned long)c);
3073
3074# else
3075 __asm
3076 {
3077 mov dx, [Port]
3078 mov ecx, [c]
3079 mov eax, [pau16]
3080 xchg esi, eax
3081 rep outsw
3082 xchg esi, eax
3083 }
3084# endif
3085}
3086#endif
3087
3088
3089/**
3090 * Reads a string of 16-bit unsigned integer items from an I/O port, ordered.
3091 *
3092 * @param Port I/O port to read from.
3093 * @param pau16 Pointer to the string buffer (output).
3094 * @param c The number of items to read.
3095 */
3096#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3097RT_ASM_DECL_PRAGMA_WATCOM(void) ASMInStrU16(RTIOPORT Port, uint16_t RT_FAR *pau16, size_t c);
3098#else
3099DECLINLINE(void) ASMInStrU16(RTIOPORT Port, uint16_t RT_FAR *pau16, size_t c)
3100{
3101# if RT_INLINE_ASM_GNU_STYLE
3102 __asm__ __volatile__("rep; insw\n\t"
3103 : "+D" (pau16),
3104 "+c" (c)
3105 : "d" (Port));
3106
3107# elif RT_INLINE_ASM_USES_INTRIN
3108 __inwordstring(Port, pau16, (unsigned long)c);
3109
3110# else
3111 __asm
3112 {
3113 mov dx, [Port]
3114 mov ecx, [c]
3115 mov eax, [pau16]
3116 xchg edi, eax
3117 rep insw
3118 xchg edi, eax
3119 }
3120# endif
3121}
3122#endif
3123
3124
3125/**
3126 * Writes a string of 32-bit unsigned integer items to an I/O port, ordered.
3127 *
3128 * @param Port I/O port to write to.
3129 * @param pau32 Pointer to the string buffer.
3130 * @param c The number of items to write.
3131 */
3132#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3133RT_ASM_DECL_PRAGMA_WATCOM(void) ASMOutStrU32(RTIOPORT Port, uint32_t const RT_FAR *pau32, size_t c);
3134#else
3135DECLINLINE(void) ASMOutStrU32(RTIOPORT Port, uint32_t const RT_FAR *pau32, size_t c)
3136{
3137# if RT_INLINE_ASM_GNU_STYLE
3138 __asm__ __volatile__("rep; outsl\n\t"
3139 : "+S" (pau32),
3140 "+c" (c)
3141 : "d" (Port));
3142
3143# elif RT_INLINE_ASM_USES_INTRIN
3144 __outdwordstring(Port, (unsigned long RT_FAR *)pau32, (unsigned long)c);
3145
3146# else
3147 __asm
3148 {
3149 mov dx, [Port]
3150 mov ecx, [c]
3151 mov eax, [pau32]
3152 xchg esi, eax
3153 rep outsd
3154 xchg esi, eax
3155 }
3156# endif
3157}
3158#endif
3159
3160
3161/**
3162 * Reads a string of 32-bit unsigned integer items from an I/O port, ordered.
3163 *
3164 * @param Port I/O port to read from.
3165 * @param pau32 Pointer to the string buffer (output).
3166 * @param c The number of items to read.
3167 */
3168#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3169RT_ASM_DECL_PRAGMA_WATCOM(void) ASMInStrU32(RTIOPORT Port, uint32_t RT_FAR *pau32, size_t c);
3170#else
3171DECLINLINE(void) ASMInStrU32(RTIOPORT Port, uint32_t RT_FAR *pau32, size_t c)
3172{
3173# if RT_INLINE_ASM_GNU_STYLE
3174 __asm__ __volatile__("rep; insl\n\t"
3175 : "+D" (pau32),
3176 "+c" (c)
3177 : "d" (Port));
3178
3179# elif RT_INLINE_ASM_USES_INTRIN
3180 __indwordstring(Port, (unsigned long RT_FAR *)pau32, (unsigned long)c);
3181
3182# else
3183 __asm
3184 {
3185 mov dx, [Port]
3186 mov ecx, [c]
3187 mov eax, [pau32]
3188 xchg edi, eax
3189 rep insd
3190 xchg edi, eax
3191 }
3192# endif
3193}
3194#endif
3195
3196
3197/**
3198 * Invalidate page.
3199 *
3200 * @param uPtr Address of the page to invalidate.
3201 */
3202#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3203RT_ASM_DECL_PRAGMA_WATCOM(void) ASMInvalidatePage(RTCCUINTXREG uPtr);
3204#else
3205DECLINLINE(void) ASMInvalidatePage(RTCCUINTXREG uPtr)
3206{
3207# if RT_INLINE_ASM_USES_INTRIN
3208 __invlpg((void RT_FAR *)uPtr);
3209
3210# elif RT_INLINE_ASM_GNU_STYLE
3211 __asm__ __volatile__("invlpg %0\n\t"
3212 : : "m" (*(uint8_t RT_FAR *)(uintptr_t)uPtr));
3213# else
3214 __asm
3215 {
3216# ifdef RT_ARCH_AMD64
3217 mov rax, [uPtr]
3218 invlpg [rax]
3219# else
3220 mov eax, [uPtr]
3221 invlpg [eax]
3222# endif
3223 }
3224# endif
3225}
3226#endif
3227
3228
3229/**
3230 * Write back the internal caches and invalidate them.
3231 */
3232#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3233RT_ASM_DECL_PRAGMA_WATCOM(void) ASMWriteBackAndInvalidateCaches(void);
3234#else
3235DECLINLINE(void) ASMWriteBackAndInvalidateCaches(void)
3236{
3237# if RT_INLINE_ASM_USES_INTRIN
3238 __wbinvd();
3239
3240# elif RT_INLINE_ASM_GNU_STYLE
3241 __asm__ __volatile__("wbinvd");
3242# else
3243 __asm
3244 {
3245 wbinvd
3246 }
3247# endif
3248}
3249#endif
3250
3251
3252/**
3253 * Invalidate internal and (perhaps) external caches without first
3254 * flushing dirty cache lines. Use with extreme care.
3255 */
3256#if RT_INLINE_ASM_EXTERNAL
3257RT_ASM_DECL_PRAGMA_WATCOM(void) ASMInvalidateInternalCaches(void);
3258#else
3259DECLINLINE(void) ASMInvalidateInternalCaches(void)
3260{
3261# if RT_INLINE_ASM_GNU_STYLE
3262 __asm__ __volatile__("invd");
3263# else
3264 __asm
3265 {
3266 invd
3267 }
3268# endif
3269}
3270#endif
3271
3272
3273/**
3274 * Memory load/store fence, waits for any pending writes and reads to complete.
3275 * Requires the X86_CPUID_FEATURE_EDX_SSE2 CPUID bit set.
3276 */
3277DECLINLINE(void) ASMMemoryFenceSSE2(void)
3278{
3279#if RT_INLINE_ASM_GNU_STYLE
3280 __asm__ __volatile__ (".byte 0x0f,0xae,0xf0\n\t");
3281#elif RT_INLINE_ASM_USES_INTRIN
3282 _mm_mfence();
3283#else
3284 __asm
3285 {
3286 _emit 0x0f
3287 _emit 0xae
3288 _emit 0xf0
3289 }
3290#endif
3291}
3292
3293
3294/**
3295 * Memory store fence, waits for any writes to complete.
3296 * Requires the X86_CPUID_FEATURE_EDX_SSE CPUID bit set.
3297 */
3298DECLINLINE(void) ASMWriteFenceSSE(void)
3299{
3300#if RT_INLINE_ASM_GNU_STYLE
3301 __asm__ __volatile__ (".byte 0x0f,0xae,0xf8\n\t");
3302#elif RT_INLINE_ASM_USES_INTRIN
3303 _mm_sfence();
3304#else
3305 __asm
3306 {
3307 _emit 0x0f
3308 _emit 0xae
3309 _emit 0xf8
3310 }
3311#endif
3312}
3313
3314
3315/**
3316 * Memory load fence, waits for any pending reads to complete.
3317 * Requires the X86_CPUID_FEATURE_EDX_SSE2 CPUID bit set.
3318 */
3319DECLINLINE(void) ASMReadFenceSSE2(void)
3320{
3321#if RT_INLINE_ASM_GNU_STYLE
3322 __asm__ __volatile__ (".byte 0x0f,0xae,0xe8\n\t");
3323#elif RT_INLINE_ASM_USES_INTRIN
3324 _mm_lfence();
3325#else
3326 __asm
3327 {
3328 _emit 0x0f
3329 _emit 0xae
3330 _emit 0xe8
3331 }
3332#endif
3333}
3334
3335#if !defined(_MSC_VER) || !defined(RT_ARCH_AMD64)
3336
3337/*
3338 * Clear the AC bit in the EFLAGS register.
3339 * Requires the X86_CPUID_STEXT_FEATURE_EBX_SMAP CPUID bit set.
3340 * Requires to be executed in R0.
3341 */
3342DECLINLINE(void) ASMClearAC(void)
3343{
3344#if RT_INLINE_ASM_GNU_STYLE
3345 __asm__ __volatile__ (".byte 0x0f,0x01,0xca\n\t");
3346#else
3347 __asm
3348 {
3349 _emit 0x0f
3350 _emit 0x01
3351 _emit 0xca
3352 }
3353#endif
3354}
3355
3356
3357/*
3358 * Set the AC bit in the EFLAGS register.
3359 * Requires the X86_CPUID_STEXT_FEATURE_EBX_SMAP CPUID bit set.
3360 * Requires to be executed in R0.
3361 */
3362DECLINLINE(void) ASMSetAC(void)
3363{
3364#if RT_INLINE_ASM_GNU_STYLE
3365 __asm__ __volatile__ (".byte 0x0f,0x01,0xcb\n\t");
3366#else
3367 __asm
3368 {
3369 _emit 0x0f
3370 _emit 0x01
3371 _emit 0xcb
3372 }
3373#endif
3374}
3375
3376#endif /* !_MSC_VER || !RT_ARCH_AMD64 */
3377
3378
3379/*
3380 * Include #pragma aux definitions for Watcom C/C++.
3381 */
3382#if defined(__WATCOMC__) && ARCH_BITS == 16
3383# define IPRT_ASM_AMD64_X86_WATCOM_16_INSTANTIATE
3384# undef ___iprt_asm_amd64_x86_watcom_16_h
3385# include "asm-amd64-x86-watcom-16.h"
3386#elif defined(__WATCOMC__) && ARCH_BITS == 32
3387# define IPRT_ASM_AMD64_X86_WATCOM_32_INSTANTIATE
3388# undef ___iprt_asm_amd64_x86_watcom_32_h
3389# include "asm-amd64-x86-watcom-32.h"
3390#endif
3391
3392
3393/** @} */
3394#endif
3395
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette