VirtualBox

source: vbox/trunk/include/iprt/asm-amd64-x86.h@ 83488

Last change on this file since 83488 was 82968, checked in by vboxsync, 5 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 81.3 KB
Line 
1/** @file
2 * IPRT - AMD64 and x86 Specific Assembly Functions.
3 */
4
5/*
6 * Copyright (C) 2006-2020 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef IPRT_INCLUDED_asm_amd64_x86_h
27#define IPRT_INCLUDED_asm_amd64_x86_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <iprt/types.h>
33#include <iprt/assert.h>
34#if !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86)
35# error "Not on AMD64 or x86"
36#endif
37
38#if defined(_MSC_VER) && RT_INLINE_ASM_USES_INTRIN
39# pragma warning(push)
40# pragma warning(disable:4668) /* Several incorrect __cplusplus uses. */
41# pragma warning(disable:4255) /* Incorrect __slwpcb prototype. */
42# include <intrin.h>
43# pragma warning(pop)
44 /* Emit the intrinsics at all optimization levels. */
45# pragma intrinsic(_ReadWriteBarrier)
46# pragma intrinsic(__cpuid)
47# if RT_INLINE_ASM_USES_INTRIN >= 16 /*?*/
48# pragma intrinsic(__cpuidex)
49# endif
50# pragma intrinsic(_enable)
51# pragma intrinsic(_disable)
52# pragma intrinsic(__rdtsc)
53# pragma intrinsic(__readmsr)
54# pragma intrinsic(__writemsr)
55# pragma intrinsic(__outbyte)
56# pragma intrinsic(__outbytestring)
57# pragma intrinsic(__outword)
58# pragma intrinsic(__outwordstring)
59# pragma intrinsic(__outdword)
60# pragma intrinsic(__outdwordstring)
61# pragma intrinsic(__inbyte)
62# pragma intrinsic(__inbytestring)
63# pragma intrinsic(__inword)
64# pragma intrinsic(__inwordstring)
65# pragma intrinsic(__indword)
66# pragma intrinsic(__indwordstring)
67# pragma intrinsic(__invlpg)
68# pragma intrinsic(__wbinvd)
69# pragma intrinsic(__readcr0)
70# pragma intrinsic(__readcr2)
71# pragma intrinsic(__readcr3)
72# pragma intrinsic(__readcr4)
73# pragma intrinsic(__writecr0)
74# pragma intrinsic(__writecr3)
75# pragma intrinsic(__writecr4)
76# pragma intrinsic(__readdr)
77# pragma intrinsic(__writedr)
78# ifdef RT_ARCH_AMD64
79# pragma intrinsic(__readcr8)
80# pragma intrinsic(__writecr8)
81# endif
82# if RT_INLINE_ASM_USES_INTRIN >= 14
83# pragma intrinsic(__halt)
84# endif
85# if RT_INLINE_ASM_USES_INTRIN >= 15
86/*# pragma intrinsic(__readeflags) - buggy intrinsics in VC++ 2010, reordering/optimizers issues
87# pragma intrinsic(__writeeflags) */
88# pragma intrinsic(__rdtscp)
89# endif
90#endif
91
92
93/*
94 * Undefine all symbols we have Watcom C/C++ #pragma aux'es for.
95 */
96#if defined(__WATCOMC__) && ARCH_BITS == 16
97# include "asm-amd64-x86-watcom-16.h"
98#elif defined(__WATCOMC__) && ARCH_BITS == 32
99# include "asm-amd64-x86-watcom-32.h"
100#endif
101
102
103/** @defgroup grp_rt_asm_amd64_x86 AMD64 and x86 Specific ASM Routines
104 * @ingroup grp_rt_asm
105 * @{
106 */
107
108/** @todo find a more proper place for these structures? */
109
110#pragma pack(1)
111/** IDTR */
112typedef struct RTIDTR
113{
114 /** Size of the IDT. */
115 uint16_t cbIdt;
116 /** Address of the IDT. */
117#if ARCH_BITS != 64
118 uint32_t pIdt;
119#else
120 uint64_t pIdt;
121#endif
122} RTIDTR, RT_FAR *PRTIDTR;
123#pragma pack()
124
125#pragma pack(1)
126/** @internal */
127typedef struct RTIDTRALIGNEDINT
128{
129 /** Alignment padding. */
130 uint16_t au16Padding[ARCH_BITS == 64 ? 3 : 1];
131 /** The IDTR structure. */
132 RTIDTR Idtr;
133} RTIDTRALIGNEDINT;
134#pragma pack()
135
136/** Wrapped RTIDTR for preventing misalignment exceptions. */
137typedef union RTIDTRALIGNED
138{
139 /** Try make sure this structure has optimal alignment. */
140 uint64_t auAlignmentHack[ARCH_BITS == 64 ? 2 : 1];
141 /** Aligned structure. */
142 RTIDTRALIGNEDINT s;
143} RTIDTRALIGNED;
144AssertCompileSize(RTIDTRALIGNED, ((ARCH_BITS == 64) + 1) * 8);
145/** Pointer to a an RTIDTR alignment wrapper. */
146typedef RTIDTRALIGNED RT_FAR *PRIDTRALIGNED;
147
148
149#pragma pack(1)
150/** GDTR */
151typedef struct RTGDTR
152{
153 /** Size of the GDT. */
154 uint16_t cbGdt;
155 /** Address of the GDT. */
156#if ARCH_BITS != 64
157 uint32_t pGdt;
158#else
159 uint64_t pGdt;
160#endif
161} RTGDTR, RT_FAR *PRTGDTR;
162#pragma pack()
163
164#pragma pack(1)
165/** @internal */
166typedef struct RTGDTRALIGNEDINT
167{
168 /** Alignment padding. */
169 uint16_t au16Padding[ARCH_BITS == 64 ? 3 : 1];
170 /** The GDTR structure. */
171 RTGDTR Gdtr;
172} RTGDTRALIGNEDINT;
173#pragma pack()
174
175/** Wrapped RTGDTR for preventing misalignment exceptions. */
176typedef union RTGDTRALIGNED
177{
178 /** Try make sure this structure has optimal alignment. */
179 uint64_t auAlignmentHack[ARCH_BITS == 64 ? 2 : 1];
180 /** Aligned structure. */
181 RTGDTRALIGNEDINT s;
182} RTGDTRALIGNED;
183AssertCompileSize(RTIDTRALIGNED, ((ARCH_BITS == 64) + 1) * 8);
184/** Pointer to a an RTGDTR alignment wrapper. */
185typedef RTGDTRALIGNED RT_FAR *PRGDTRALIGNED;
186
187
188/**
189 * Gets the content of the IDTR CPU register.
190 * @param pIdtr Where to store the IDTR contents.
191 */
192#if RT_INLINE_ASM_EXTERNAL
193RT_ASM_DECL_PRAGMA_WATCOM(void) ASMGetIDTR(PRTIDTR pIdtr);
194#else
195DECLINLINE(void) ASMGetIDTR(PRTIDTR pIdtr)
196{
197# if RT_INLINE_ASM_GNU_STYLE
198 __asm__ __volatile__("sidt %0" : "=m" (*pIdtr));
199# else
200 __asm
201 {
202# ifdef RT_ARCH_AMD64
203 mov rax, [pIdtr]
204 sidt [rax]
205# else
206 mov eax, [pIdtr]
207 sidt [eax]
208# endif
209 }
210# endif
211}
212#endif
213
214
215/**
216 * Gets the content of the IDTR.LIMIT CPU register.
217 * @returns IDTR limit.
218 */
219#if RT_INLINE_ASM_EXTERNAL
220RT_ASM_DECL_PRAGMA_WATCOM(uint16_t) ASMGetIdtrLimit(void);
221#else
222DECLINLINE(uint16_t) ASMGetIdtrLimit(void)
223{
224 RTIDTRALIGNED TmpIdtr;
225# if RT_INLINE_ASM_GNU_STYLE
226 __asm__ __volatile__("sidt %0" : "=m" (TmpIdtr.s.Idtr));
227# else
228 __asm
229 {
230 sidt [TmpIdtr.s.Idtr]
231 }
232# endif
233 return TmpIdtr.s.Idtr.cbIdt;
234}
235#endif
236
237
238/**
239 * Sets the content of the IDTR CPU register.
240 * @param pIdtr Where to load the IDTR contents from
241 */
242#if RT_INLINE_ASM_EXTERNAL
243RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetIDTR(const RTIDTR RT_FAR *pIdtr);
244#else
245DECLINLINE(void) ASMSetIDTR(const RTIDTR RT_FAR *pIdtr)
246{
247# if RT_INLINE_ASM_GNU_STYLE
248 __asm__ __volatile__("lidt %0" : : "m" (*pIdtr));
249# else
250 __asm
251 {
252# ifdef RT_ARCH_AMD64
253 mov rax, [pIdtr]
254 lidt [rax]
255# else
256 mov eax, [pIdtr]
257 lidt [eax]
258# endif
259 }
260# endif
261}
262#endif
263
264
265/**
266 * Gets the content of the GDTR CPU register.
267 * @param pGdtr Where to store the GDTR contents.
268 */
269#if RT_INLINE_ASM_EXTERNAL
270RT_ASM_DECL_PRAGMA_WATCOM(void) ASMGetGDTR(PRTGDTR pGdtr);
271#else
272DECLINLINE(void) ASMGetGDTR(PRTGDTR pGdtr)
273{
274# if RT_INLINE_ASM_GNU_STYLE
275 __asm__ __volatile__("sgdt %0" : "=m" (*pGdtr));
276# else
277 __asm
278 {
279# ifdef RT_ARCH_AMD64
280 mov rax, [pGdtr]
281 sgdt [rax]
282# else
283 mov eax, [pGdtr]
284 sgdt [eax]
285# endif
286 }
287# endif
288}
289#endif
290
291
292/**
293 * Sets the content of the GDTR CPU register.
294 * @param pGdtr Where to load the GDTR contents from
295 */
296#if RT_INLINE_ASM_EXTERNAL
297RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetGDTR(const RTGDTR RT_FAR *pGdtr);
298#else
299DECLINLINE(void) ASMSetGDTR(const RTGDTR RT_FAR *pGdtr)
300{
301# if RT_INLINE_ASM_GNU_STYLE
302 __asm__ __volatile__("lgdt %0" : : "m" (*pGdtr));
303# else
304 __asm
305 {
306# ifdef RT_ARCH_AMD64
307 mov rax, [pGdtr]
308 lgdt [rax]
309# else
310 mov eax, [pGdtr]
311 lgdt [eax]
312# endif
313 }
314# endif
315}
316#endif
317
318
319
320/**
321 * Get the cs register.
322 * @returns cs.
323 */
324#if RT_INLINE_ASM_EXTERNAL
325RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetCS(void);
326#else
327DECLINLINE(RTSEL) ASMGetCS(void)
328{
329 RTSEL SelCS;
330# if RT_INLINE_ASM_GNU_STYLE
331 __asm__ __volatile__("movw %%cs, %0\n\t" : "=r" (SelCS));
332# else
333 __asm
334 {
335 mov ax, cs
336 mov [SelCS], ax
337 }
338# endif
339 return SelCS;
340}
341#endif
342
343
344/**
345 * Get the DS register.
346 * @returns DS.
347 */
348#if RT_INLINE_ASM_EXTERNAL
349RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetDS(void);
350#else
351DECLINLINE(RTSEL) ASMGetDS(void)
352{
353 RTSEL SelDS;
354# if RT_INLINE_ASM_GNU_STYLE
355 __asm__ __volatile__("movw %%ds, %0\n\t" : "=r" (SelDS));
356# else
357 __asm
358 {
359 mov ax, ds
360 mov [SelDS], ax
361 }
362# endif
363 return SelDS;
364}
365#endif
366
367
368/**
369 * Get the ES register.
370 * @returns ES.
371 */
372#if RT_INLINE_ASM_EXTERNAL
373RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetES(void);
374#else
375DECLINLINE(RTSEL) ASMGetES(void)
376{
377 RTSEL SelES;
378# if RT_INLINE_ASM_GNU_STYLE
379 __asm__ __volatile__("movw %%es, %0\n\t" : "=r" (SelES));
380# else
381 __asm
382 {
383 mov ax, es
384 mov [SelES], ax
385 }
386# endif
387 return SelES;
388}
389#endif
390
391
392/**
393 * Get the FS register.
394 * @returns FS.
395 */
396#if RT_INLINE_ASM_EXTERNAL
397RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetFS(void);
398#else
399DECLINLINE(RTSEL) ASMGetFS(void)
400{
401 RTSEL SelFS;
402# if RT_INLINE_ASM_GNU_STYLE
403 __asm__ __volatile__("movw %%fs, %0\n\t" : "=r" (SelFS));
404# else
405 __asm
406 {
407 mov ax, fs
408 mov [SelFS], ax
409 }
410# endif
411 return SelFS;
412}
413# endif
414
415
416/**
417 * Get the GS register.
418 * @returns GS.
419 */
420#if RT_INLINE_ASM_EXTERNAL
421RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetGS(void);
422#else
423DECLINLINE(RTSEL) ASMGetGS(void)
424{
425 RTSEL SelGS;
426# if RT_INLINE_ASM_GNU_STYLE
427 __asm__ __volatile__("movw %%gs, %0\n\t" : "=r" (SelGS));
428# else
429 __asm
430 {
431 mov ax, gs
432 mov [SelGS], ax
433 }
434# endif
435 return SelGS;
436}
437#endif
438
439
440/**
441 * Get the SS register.
442 * @returns SS.
443 */
444#if RT_INLINE_ASM_EXTERNAL
445RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetSS(void);
446#else
447DECLINLINE(RTSEL) ASMGetSS(void)
448{
449 RTSEL SelSS;
450# if RT_INLINE_ASM_GNU_STYLE
451 __asm__ __volatile__("movw %%ss, %0\n\t" : "=r" (SelSS));
452# else
453 __asm
454 {
455 mov ax, ss
456 mov [SelSS], ax
457 }
458# endif
459 return SelSS;
460}
461#endif
462
463
464/**
465 * Get the TR register.
466 * @returns TR.
467 */
468#if RT_INLINE_ASM_EXTERNAL
469RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetTR(void);
470#else
471DECLINLINE(RTSEL) ASMGetTR(void)
472{
473 RTSEL SelTR;
474# if RT_INLINE_ASM_GNU_STYLE
475 __asm__ __volatile__("str %w0\n\t" : "=r" (SelTR));
476# else
477 __asm
478 {
479 str ax
480 mov [SelTR], ax
481 }
482# endif
483 return SelTR;
484}
485#endif
486
487
488/**
489 * Get the LDTR register.
490 * @returns LDTR.
491 */
492#if RT_INLINE_ASM_EXTERNAL
493RT_ASM_DECL_PRAGMA_WATCOM(RTSEL) ASMGetLDTR(void);
494#else
495DECLINLINE(RTSEL) ASMGetLDTR(void)
496{
497 RTSEL SelLDTR;
498# if RT_INLINE_ASM_GNU_STYLE
499 __asm__ __volatile__("sldt %w0\n\t" : "=r" (SelLDTR));
500# else
501 __asm
502 {
503 sldt ax
504 mov [SelLDTR], ax
505 }
506# endif
507 return SelLDTR;
508}
509#endif
510
511
512/**
513 * Get the access rights for the segment selector.
514 *
515 * @returns The access rights on success or UINT32_MAX on failure.
516 * @param uSel The selector value.
517 *
518 * @remarks Using UINT32_MAX for failure is chosen because valid access rights
519 * always have bits 0:7 as 0 (on both Intel & AMD).
520 */
521#if RT_INLINE_ASM_EXTERNAL
522RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMGetSegAttr(uint32_t uSel);
523#else
524DECLINLINE(uint32_t) ASMGetSegAttr(uint32_t uSel)
525{
526 uint32_t uAttr;
527 /* LAR only accesses 16-bit of the source operand, but eax for the
528 destination operand is required for getting the full 32-bit access rights. */
529# if RT_INLINE_ASM_GNU_STYLE
530 __asm__ __volatile__("lar %1, %%eax\n\t"
531 "jz done%=\n\t"
532 "movl $0xffffffff, %%eax\n\t"
533 "done%=:\n\t"
534 "movl %%eax, %0\n\t"
535 : "=r" (uAttr)
536 : "r" (uSel)
537 : "cc", "%eax");
538# else
539 __asm
540 {
541 lar eax, [uSel]
542 jz done
543 mov eax, 0ffffffffh
544 done:
545 mov [uAttr], eax
546 }
547# endif
548 return uAttr;
549}
550#endif
551
552
553/**
554 * Get the [RE]FLAGS register.
555 * @returns [RE]FLAGS.
556 */
557#if RT_INLINE_ASM_EXTERNAL /*&& RT_INLINE_ASM_USES_INTRIN < 15 - buggy intrinsics in VC++ 2010, reordering/optimizers issues. */
558RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTREG) ASMGetFlags(void);
559#else
560DECLINLINE(RTCCUINTREG) ASMGetFlags(void)
561{
562 RTCCUINTREG uFlags;
563# if RT_INLINE_ASM_GNU_STYLE
564# ifdef RT_ARCH_AMD64
565 __asm__ __volatile__("pushfq\n\t"
566 "popq %0\n\t"
567 : "=r" (uFlags));
568# else
569 __asm__ __volatile__("pushfl\n\t"
570 "popl %0\n\t"
571 : "=r" (uFlags));
572# endif
573# elif RT_INLINE_ASM_USES_INTRIN >= 15
574 uFlags = __readeflags();
575# else
576 __asm
577 {
578# ifdef RT_ARCH_AMD64
579 pushfq
580 pop [uFlags]
581# else
582 pushfd
583 pop [uFlags]
584# endif
585 }
586# endif
587 return uFlags;
588}
589#endif
590
591
592/**
593 * Set the [RE]FLAGS register.
594 * @param uFlags The new [RE]FLAGS value.
595 */
596#if RT_INLINE_ASM_EXTERNAL /*&& RT_INLINE_ASM_USES_INTRIN < 15 - see __readeflags() above. */
597RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetFlags(RTCCUINTREG uFlags);
598#else
599DECLINLINE(void) ASMSetFlags(RTCCUINTREG uFlags)
600{
601# if RT_INLINE_ASM_GNU_STYLE
602# ifdef RT_ARCH_AMD64
603 __asm__ __volatile__("pushq %0\n\t"
604 "popfq\n\t"
605 : : "g" (uFlags));
606# else
607 __asm__ __volatile__("pushl %0\n\t"
608 "popfl\n\t"
609 : : "g" (uFlags));
610# endif
611# elif RT_INLINE_ASM_USES_INTRIN >= 15
612 __writeeflags(uFlags);
613# else
614 __asm
615 {
616# ifdef RT_ARCH_AMD64
617 push [uFlags]
618 popfq
619# else
620 push [uFlags]
621 popfd
622# endif
623 }
624# endif
625}
626#endif
627
628
629/**
630 * Modifies the [RE]FLAGS register.
631 * @returns Original value.
632 * @param fAndEfl Flags to keep (applied first).
633 * @param fOrEfl Flags to be set.
634 */
635#if RT_INLINE_ASM_EXTERNAL /*&& RT_INLINE_ASM_USES_INTRIN < 15 - buggy intrinsics in VC++ 2010, reordering/optimizers issues. */
636RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTREG) ASMChangeFlags(RTCCUINTREG fAndEfl, RTCCUINTREG fOrEfl);
637#else
638DECLINLINE(RTCCUINTREG) ASMChangeFlags(RTCCUINTREG fAndEfl, RTCCUINTREG fOrEfl)
639{
640 RTCCUINTREG fOldEfl;
641# if RT_INLINE_ASM_GNU_STYLE
642# ifdef RT_ARCH_AMD64
643 __asm__ __volatile__("pushfq\n\t"
644 "movq (%%rsp), %0\n\t"
645 "andq %0, %1\n\t"
646 "orq %3, %1\n\t"
647 "mov %1, (%%rsp)\n\t"
648 "popfq\n\t"
649 : "=&r" (fOldEfl),
650 "=r" (fAndEfl)
651 : "1" (fAndEfl),
652 "rn" (fOrEfl) );
653# else
654 __asm__ __volatile__("pushfl\n\t"
655 "movl (%%esp), %0\n\t"
656 "andl %1, (%%esp)\n\t"
657 "orl %2, (%%esp)\n\t"
658 "popfl\n\t"
659 : "=&r" (fOldEfl)
660 : "rn" (fAndEfl),
661 "rn" (fOrEfl) );
662# endif
663# elif RT_INLINE_ASM_USES_INTRIN >= 15
664 fOldEfl = __readeflags();
665 __writeeflags((fOldEfl & fAndEfl) | fOrEfl);
666# else
667 __asm
668 {
669# ifdef RT_ARCH_AMD64
670 mov rdx, [fAndEfl]
671 mov rcx, [fOrEfl]
672 pushfq
673 mov rax, [rsp]
674 and rdx, rax
675 or rdx, rcx
676 mov [rsp], rdx
677 popfq
678 mov [fOldEfl], rax
679# else
680 mov edx, [fAndEfl]
681 mov ecx, [fOrEfl]
682 pushfd
683 mov eax, [esp]
684 and edx, eax
685 or edx, ecx
686 mov [esp], edx
687 popfd
688 mov [fOldEfl], eax
689# endif
690 }
691# endif
692 return fOldEfl;
693}
694#endif
695
696
697/**
698 * Modifies the [RE]FLAGS register by ORing in one or more flags.
699 * @returns Original value.
700 * @param fOrEfl The flags to be set (ORed in).
701 */
702#if RT_INLINE_ASM_EXTERNAL /*&& RT_INLINE_ASM_USES_INTRIN < 15 - buggy intrinsics in VC++ 2010, reordering/optimizers issues. */
703RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTREG) ASMAddFlags(RTCCUINTREG fOrEfl);
704#else
705DECLINLINE(RTCCUINTREG) ASMAddFlags(RTCCUINTREG fOrEfl)
706{
707 RTCCUINTREG fOldEfl;
708# if RT_INLINE_ASM_GNU_STYLE
709# ifdef RT_ARCH_AMD64
710 __asm__ __volatile__("pushfq\n\t"
711 "movq (%%rsp), %0\n\t"
712 "orq %1, (%%rsp)\n\t"
713 "popfq\n\t"
714 : "=&r" (fOldEfl)
715 : "rn" (fOrEfl) );
716# else
717 __asm__ __volatile__("pushfl\n\t"
718 "movl (%%esp), %0\n\t"
719 "orl %1, (%%esp)\n\t"
720 "popfl\n\t"
721 : "=&r" (fOldEfl)
722 : "rn" (fOrEfl) );
723# endif
724# elif RT_INLINE_ASM_USES_INTRIN >= 15
725 fOldEfl = __readeflags();
726 __writeeflags(fOldEfl | fOrEfl);
727# else
728 __asm
729 {
730# ifdef RT_ARCH_AMD64
731 mov rcx, [fOrEfl]
732 pushfq
733 mov rdx, [rsp]
734 or [rsp], rcx
735 popfq
736 mov [fOldEfl], rax
737# else
738 mov ecx, [fOrEfl]
739 pushfd
740 mov edx, [esp]
741 or [esp], ecx
742 popfd
743 mov [fOldEfl], eax
744# endif
745 }
746# endif
747 return fOldEfl;
748}
749#endif
750
751
752/**
753 * Modifies the [RE]FLAGS register by AND'ing out one or more flags.
754 * @returns Original value.
755 * @param fAndEfl The flags to keep.
756 */
757#if RT_INLINE_ASM_EXTERNAL /*&& RT_INLINE_ASM_USES_INTRIN < 15 - buggy intrinsics in VC++ 2010, reordering/optimizers issues. */
758RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTREG) ASMClearFlags(RTCCUINTREG fAndEfl);
759#else
760DECLINLINE(RTCCUINTREG) ASMClearFlags(RTCCUINTREG fAndEfl)
761{
762 RTCCUINTREG fOldEfl;
763# if RT_INLINE_ASM_GNU_STYLE
764# ifdef RT_ARCH_AMD64
765 __asm__ __volatile__("pushfq\n\t"
766 "movq (%%rsp), %0\n\t"
767 "andq %1, (%%rsp)\n\t"
768 "popfq\n\t"
769 : "=&r" (fOldEfl)
770 : "rn" (fAndEfl) );
771# else
772 __asm__ __volatile__("pushfl\n\t"
773 "movl (%%esp), %0\n\t"
774 "andl %1, (%%esp)\n\t"
775 "popfl\n\t"
776 : "=&r" (fOldEfl)
777 : "rn" (fAndEfl) );
778# endif
779# elif RT_INLINE_ASM_USES_INTRIN >= 15
780 fOldEfl = __readeflags();
781 __writeeflags(fOldEfl & fAndEfl);
782# else
783 __asm
784 {
785# ifdef RT_ARCH_AMD64
786 mov rdx, [fAndEfl]
787 pushfq
788 mov rdx, [rsp]
789 and [rsp], rdx
790 popfq
791 mov [fOldEfl], rax
792# else
793 mov edx, [fAndEfl]
794 pushfd
795 mov edx, [esp]
796 and [esp], edx
797 popfd
798 mov [fOldEfl], eax
799# endif
800 }
801# endif
802 return fOldEfl;
803}
804#endif
805
806
807/**
808 * Gets the content of the CPU timestamp counter register.
809 *
810 * @returns TSC.
811 */
812#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
813RT_ASM_DECL_PRAGMA_WATCOM(uint64_t) ASMReadTSC(void);
814#else
815DECLINLINE(uint64_t) ASMReadTSC(void)
816{
817 RTUINT64U u;
818# if RT_INLINE_ASM_GNU_STYLE
819 __asm__ __volatile__("rdtsc\n\t" : "=a" (u.s.Lo), "=d" (u.s.Hi));
820# else
821# if RT_INLINE_ASM_USES_INTRIN
822 u.u = __rdtsc();
823# else
824 __asm
825 {
826 rdtsc
827 mov [u.s.Lo], eax
828 mov [u.s.Hi], edx
829 }
830# endif
831# endif
832 return u.u;
833}
834#endif
835
836
837/**
838 * Gets the content of the CPU timestamp counter register and the
839 * assoicated AUX value.
840 *
841 * @returns TSC.
842 * @param puAux Where to store the AUX value.
843 */
844#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
845RT_ASM_DECL_PRAGMA_WATCOM(uint64_t) ASMReadTscWithAux(uint32_t RT_FAR *puAux);
846#else
847DECLINLINE(uint64_t) ASMReadTscWithAux(uint32_t RT_FAR *puAux)
848{
849 RTUINT64U u;
850# if RT_INLINE_ASM_GNU_STYLE
851 /* rdtscp is not supported by ancient linux build VM of course :-( */
852 /*__asm__ __volatile__("rdtscp\n\t" : "=a" (u.s.Lo), "=d" (u.s.Hi), "=c" (*puAux)); */
853 __asm__ __volatile__(".byte 0x0f,0x01,0xf9\n\t" : "=a" (u.s.Lo), "=d" (u.s.Hi), "=c" (*puAux));
854# else
855# if RT_INLINE_ASM_USES_INTRIN >= 15
856 u.u = __rdtscp(puAux);
857# else
858 __asm
859 {
860 rdtscp
861 mov [u.s.Lo], eax
862 mov [u.s.Hi], edx
863 mov eax, [puAux]
864 mov [eax], ecx
865 }
866# endif
867# endif
868 return u.u;
869}
870#endif
871
872
873/**
874 * Performs the cpuid instruction returning all registers.
875 *
876 * @param uOperator CPUID operation (eax).
877 * @param pvEAX Where to store eax.
878 * @param pvEBX Where to store ebx.
879 * @param pvECX Where to store ecx.
880 * @param pvEDX Where to store edx.
881 * @remark We're using void pointers to ease the use of special bitfield structures and such.
882 */
883#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
884DECLASM(void) ASMCpuId(uint32_t uOperator, void RT_FAR *pvEAX, void RT_FAR *pvEBX, void RT_FAR *pvECX, void RT_FAR *pvEDX);
885#else
886DECLINLINE(void) ASMCpuId(uint32_t uOperator, void RT_FAR *pvEAX, void RT_FAR *pvEBX, void RT_FAR *pvECX, void RT_FAR *pvEDX)
887{
888# if RT_INLINE_ASM_GNU_STYLE
889# ifdef RT_ARCH_AMD64
890 RTCCUINTREG uRAX, uRBX, uRCX, uRDX;
891 __asm__ __volatile__ ("cpuid\n\t"
892 : "=a" (uRAX),
893 "=b" (uRBX),
894 "=c" (uRCX),
895 "=d" (uRDX)
896 : "0" (uOperator), "2" (0));
897 *(uint32_t RT_FAR *)pvEAX = (uint32_t)uRAX;
898 *(uint32_t RT_FAR *)pvEBX = (uint32_t)uRBX;
899 *(uint32_t RT_FAR *)pvECX = (uint32_t)uRCX;
900 *(uint32_t RT_FAR *)pvEDX = (uint32_t)uRDX;
901# else
902 __asm__ __volatile__ ("xchgl %%ebx, %1\n\t"
903 "cpuid\n\t"
904 "xchgl %%ebx, %1\n\t"
905 : "=a" (*(uint32_t *)pvEAX),
906 "=r" (*(uint32_t *)pvEBX),
907 "=c" (*(uint32_t *)pvECX),
908 "=d" (*(uint32_t *)pvEDX)
909 : "0" (uOperator), "2" (0));
910# endif
911
912# elif RT_INLINE_ASM_USES_INTRIN
913 int aInfo[4];
914 __cpuid(aInfo, uOperator);
915 *(uint32_t RT_FAR *)pvEAX = aInfo[0];
916 *(uint32_t RT_FAR *)pvEBX = aInfo[1];
917 *(uint32_t RT_FAR *)pvECX = aInfo[2];
918 *(uint32_t RT_FAR *)pvEDX = aInfo[3];
919
920# else
921 uint32_t uEAX;
922 uint32_t uEBX;
923 uint32_t uECX;
924 uint32_t uEDX;
925 __asm
926 {
927 push ebx
928 mov eax, [uOperator]
929 cpuid
930 mov [uEAX], eax
931 mov [uEBX], ebx
932 mov [uECX], ecx
933 mov [uEDX], edx
934 pop ebx
935 }
936 *(uint32_t RT_FAR *)pvEAX = uEAX;
937 *(uint32_t RT_FAR *)pvEBX = uEBX;
938 *(uint32_t RT_FAR *)pvECX = uECX;
939 *(uint32_t RT_FAR *)pvEDX = uEDX;
940# endif
941}
942#endif
943
944
945/**
946 * Performs the CPUID instruction with EAX and ECX input returning ALL output
947 * registers.
948 *
949 * @param uOperator CPUID operation (eax).
950 * @param uIdxECX ecx index
951 * @param pvEAX Where to store eax.
952 * @param pvEBX Where to store ebx.
953 * @param pvECX Where to store ecx.
954 * @param pvEDX Where to store edx.
955 * @remark We're using void pointers to ease the use of special bitfield structures and such.
956 */
957#if RT_INLINE_ASM_EXTERNAL || RT_INLINE_ASM_USES_INTRIN
958DECLASM(void) ASMCpuId_Idx_ECX(uint32_t uOperator, uint32_t uIdxECX, void RT_FAR *pvEAX, void RT_FAR *pvEBX, void RT_FAR *pvECX, void RT_FAR *pvEDX);
959#else
960DECLINLINE(void) ASMCpuId_Idx_ECX(uint32_t uOperator, uint32_t uIdxECX, void RT_FAR *pvEAX, void RT_FAR *pvEBX, void RT_FAR *pvECX, void RT_FAR *pvEDX)
961{
962# if RT_INLINE_ASM_GNU_STYLE
963# ifdef RT_ARCH_AMD64
964 RTCCUINTREG uRAX, uRBX, uRCX, uRDX;
965 __asm__ ("cpuid\n\t"
966 : "=a" (uRAX),
967 "=b" (uRBX),
968 "=c" (uRCX),
969 "=d" (uRDX)
970 : "0" (uOperator),
971 "2" (uIdxECX));
972 *(uint32_t RT_FAR *)pvEAX = (uint32_t)uRAX;
973 *(uint32_t RT_FAR *)pvEBX = (uint32_t)uRBX;
974 *(uint32_t RT_FAR *)pvECX = (uint32_t)uRCX;
975 *(uint32_t RT_FAR *)pvEDX = (uint32_t)uRDX;
976# else
977 __asm__ ("xchgl %%ebx, %1\n\t"
978 "cpuid\n\t"
979 "xchgl %%ebx, %1\n\t"
980 : "=a" (*(uint32_t *)pvEAX),
981 "=r" (*(uint32_t *)pvEBX),
982 "=c" (*(uint32_t *)pvECX),
983 "=d" (*(uint32_t *)pvEDX)
984 : "0" (uOperator),
985 "2" (uIdxECX));
986# endif
987
988# elif RT_INLINE_ASM_USES_INTRIN
989 int aInfo[4];
990 __cpuidex(aInfo, uOperator, uIdxECX);
991 *(uint32_t RT_FAR *)pvEAX = aInfo[0];
992 *(uint32_t RT_FAR *)pvEBX = aInfo[1];
993 *(uint32_t RT_FAR *)pvECX = aInfo[2];
994 *(uint32_t RT_FAR *)pvEDX = aInfo[3];
995
996# else
997 uint32_t uEAX;
998 uint32_t uEBX;
999 uint32_t uECX;
1000 uint32_t uEDX;
1001 __asm
1002 {
1003 push ebx
1004 mov eax, [uOperator]
1005 mov ecx, [uIdxECX]
1006 cpuid
1007 mov [uEAX], eax
1008 mov [uEBX], ebx
1009 mov [uECX], ecx
1010 mov [uEDX], edx
1011 pop ebx
1012 }
1013 *(uint32_t RT_FAR *)pvEAX = uEAX;
1014 *(uint32_t RT_FAR *)pvEBX = uEBX;
1015 *(uint32_t RT_FAR *)pvECX = uECX;
1016 *(uint32_t RT_FAR *)pvEDX = uEDX;
1017# endif
1018}
1019#endif
1020
1021
1022/**
1023 * CPUID variant that initializes all 4 registers before the CPUID instruction.
1024 *
1025 * @returns The EAX result value.
1026 * @param uOperator CPUID operation (eax).
1027 * @param uInitEBX The value to assign EBX prior to the CPUID instruction.
1028 * @param uInitECX The value to assign ECX prior to the CPUID instruction.
1029 * @param uInitEDX The value to assign EDX prior to the CPUID instruction.
1030 * @param pvEAX Where to store eax. Optional.
1031 * @param pvEBX Where to store ebx. Optional.
1032 * @param pvECX Where to store ecx. Optional.
1033 * @param pvEDX Where to store edx. Optional.
1034 */
1035DECLASM(uint32_t) ASMCpuIdExSlow(uint32_t uOperator, uint32_t uInitEBX, uint32_t uInitECX, uint32_t uInitEDX,
1036 void RT_FAR *pvEAX, void RT_FAR *pvEBX, void RT_FAR *pvECX, void RT_FAR *pvEDX);
1037
1038
1039/**
1040 * Performs the cpuid instruction returning ecx and edx.
1041 *
1042 * @param uOperator CPUID operation (eax).
1043 * @param pvECX Where to store ecx.
1044 * @param pvEDX Where to store edx.
1045 * @remark We're using void pointers to ease the use of special bitfield structures and such.
1046 */
1047#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1048RT_ASM_DECL_PRAGMA_WATCOM(void) ASMCpuId_ECX_EDX(uint32_t uOperator, void RT_FAR *pvECX, void RT_FAR *pvEDX);
1049#else
1050DECLINLINE(void) ASMCpuId_ECX_EDX(uint32_t uOperator, void RT_FAR *pvECX, void RT_FAR *pvEDX)
1051{
1052 uint32_t uEBX;
1053 ASMCpuId(uOperator, &uOperator, &uEBX, pvECX, pvEDX);
1054}
1055#endif
1056
1057
1058/**
1059 * Performs the cpuid instruction returning eax.
1060 *
1061 * @param uOperator CPUID operation (eax).
1062 * @returns EAX after cpuid operation.
1063 */
1064#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1065RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMCpuId_EAX(uint32_t uOperator);
1066#else
1067DECLINLINE(uint32_t) ASMCpuId_EAX(uint32_t uOperator)
1068{
1069 RTCCUINTREG xAX;
1070# if RT_INLINE_ASM_GNU_STYLE
1071# ifdef RT_ARCH_AMD64
1072 __asm__ ("cpuid"
1073 : "=a" (xAX)
1074 : "0" (uOperator)
1075 : "rbx", "rcx", "rdx");
1076# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1077 __asm__ ("push %%ebx\n\t"
1078 "cpuid\n\t"
1079 "pop %%ebx\n\t"
1080 : "=a" (xAX)
1081 : "0" (uOperator)
1082 : "ecx", "edx");
1083# else
1084 __asm__ ("cpuid"
1085 : "=a" (xAX)
1086 : "0" (uOperator)
1087 : "edx", "ecx", "ebx");
1088# endif
1089
1090# elif RT_INLINE_ASM_USES_INTRIN
1091 int aInfo[4];
1092 __cpuid(aInfo, uOperator);
1093 xAX = aInfo[0];
1094
1095# else
1096 __asm
1097 {
1098 push ebx
1099 mov eax, [uOperator]
1100 cpuid
1101 mov [xAX], eax
1102 pop ebx
1103 }
1104# endif
1105 return (uint32_t)xAX;
1106}
1107#endif
1108
1109
1110/**
1111 * Performs the cpuid instruction returning ebx.
1112 *
1113 * @param uOperator CPUID operation (eax).
1114 * @returns EBX after cpuid operation.
1115 */
1116#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1117RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMCpuId_EBX(uint32_t uOperator);
1118#else
1119DECLINLINE(uint32_t) ASMCpuId_EBX(uint32_t uOperator)
1120{
1121 RTCCUINTREG xBX;
1122# if RT_INLINE_ASM_GNU_STYLE
1123# ifdef RT_ARCH_AMD64
1124 RTCCUINTREG uSpill;
1125 __asm__ ("cpuid"
1126 : "=a" (uSpill),
1127 "=b" (xBX)
1128 : "0" (uOperator)
1129 : "rdx", "rcx");
1130# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1131 __asm__ ("push %%ebx\n\t"
1132 "cpuid\n\t"
1133 "mov %%ebx, %%edx\n\t"
1134 "pop %%ebx\n\t"
1135 : "=a" (uOperator),
1136 "=d" (xBX)
1137 : "0" (uOperator)
1138 : "ecx");
1139# else
1140 __asm__ ("cpuid"
1141 : "=a" (uOperator),
1142 "=b" (xBX)
1143 : "0" (uOperator)
1144 : "edx", "ecx");
1145# endif
1146
1147# elif RT_INLINE_ASM_USES_INTRIN
1148 int aInfo[4];
1149 __cpuid(aInfo, uOperator);
1150 xBX = aInfo[1];
1151
1152# else
1153 __asm
1154 {
1155 push ebx
1156 mov eax, [uOperator]
1157 cpuid
1158 mov [xBX], ebx
1159 pop ebx
1160 }
1161# endif
1162 return (uint32_t)xBX;
1163}
1164#endif
1165
1166
1167/**
1168 * Performs the cpuid instruction returning ecx.
1169 *
1170 * @param uOperator CPUID operation (eax).
1171 * @returns ECX after cpuid operation.
1172 */
1173#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1174RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMCpuId_ECX(uint32_t uOperator);
1175#else
1176DECLINLINE(uint32_t) ASMCpuId_ECX(uint32_t uOperator)
1177{
1178 RTCCUINTREG xCX;
1179# if RT_INLINE_ASM_GNU_STYLE
1180# ifdef RT_ARCH_AMD64
1181 RTCCUINTREG uSpill;
1182 __asm__ ("cpuid"
1183 : "=a" (uSpill),
1184 "=c" (xCX)
1185 : "0" (uOperator)
1186 : "rbx", "rdx");
1187# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1188 __asm__ ("push %%ebx\n\t"
1189 "cpuid\n\t"
1190 "pop %%ebx\n\t"
1191 : "=a" (uOperator),
1192 "=c" (xCX)
1193 : "0" (uOperator)
1194 : "edx");
1195# else
1196 __asm__ ("cpuid"
1197 : "=a" (uOperator),
1198 "=c" (xCX)
1199 : "0" (uOperator)
1200 : "ebx", "edx");
1201
1202# endif
1203
1204# elif RT_INLINE_ASM_USES_INTRIN
1205 int aInfo[4];
1206 __cpuid(aInfo, uOperator);
1207 xCX = aInfo[2];
1208
1209# else
1210 __asm
1211 {
1212 push ebx
1213 mov eax, [uOperator]
1214 cpuid
1215 mov [xCX], ecx
1216 pop ebx
1217 }
1218# endif
1219 return (uint32_t)xCX;
1220}
1221#endif
1222
1223
1224/**
1225 * Performs the cpuid instruction returning edx.
1226 *
1227 * @param uOperator CPUID operation (eax).
1228 * @returns EDX after cpuid operation.
1229 */
1230#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1231RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMCpuId_EDX(uint32_t uOperator);
1232#else
1233DECLINLINE(uint32_t) ASMCpuId_EDX(uint32_t uOperator)
1234{
1235 RTCCUINTREG xDX;
1236# if RT_INLINE_ASM_GNU_STYLE
1237# ifdef RT_ARCH_AMD64
1238 RTCCUINTREG uSpill;
1239 __asm__ ("cpuid"
1240 : "=a" (uSpill),
1241 "=d" (xDX)
1242 : "0" (uOperator)
1243 : "rbx", "rcx");
1244# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1245 __asm__ ("push %%ebx\n\t"
1246 "cpuid\n\t"
1247 "pop %%ebx\n\t"
1248 : "=a" (uOperator),
1249 "=d" (xDX)
1250 : "0" (uOperator)
1251 : "ecx");
1252# else
1253 __asm__ ("cpuid"
1254 : "=a" (uOperator),
1255 "=d" (xDX)
1256 : "0" (uOperator)
1257 : "ebx", "ecx");
1258# endif
1259
1260# elif RT_INLINE_ASM_USES_INTRIN
1261 int aInfo[4];
1262 __cpuid(aInfo, uOperator);
1263 xDX = aInfo[3];
1264
1265# else
1266 __asm
1267 {
1268 push ebx
1269 mov eax, [uOperator]
1270 cpuid
1271 mov [xDX], edx
1272 pop ebx
1273 }
1274# endif
1275 return (uint32_t)xDX;
1276}
1277#endif
1278
1279
1280/**
1281 * Checks if the current CPU supports CPUID.
1282 *
1283 * @returns true if CPUID is supported.
1284 */
1285#ifdef __WATCOMC__
1286DECLASM(bool) ASMHasCpuId(void);
1287#else
1288DECLINLINE(bool) ASMHasCpuId(void)
1289{
1290# ifdef RT_ARCH_AMD64
1291 return true; /* ASSUME that all amd64 compatible CPUs have cpuid. */
1292# else /* !RT_ARCH_AMD64 */
1293 bool fRet = false;
1294# if RT_INLINE_ASM_GNU_STYLE
1295 uint32_t u1;
1296 uint32_t u2;
1297 __asm__ ("pushf\n\t"
1298 "pop %1\n\t"
1299 "mov %1, %2\n\t"
1300 "xorl $0x200000, %1\n\t"
1301 "push %1\n\t"
1302 "popf\n\t"
1303 "pushf\n\t"
1304 "pop %1\n\t"
1305 "cmpl %1, %2\n\t"
1306 "setne %0\n\t"
1307 "push %2\n\t"
1308 "popf\n\t"
1309 : "=m" (fRet), "=r" (u1), "=r" (u2));
1310# else
1311 __asm
1312 {
1313 pushfd
1314 pop eax
1315 mov ebx, eax
1316 xor eax, 0200000h
1317 push eax
1318 popfd
1319 pushfd
1320 pop eax
1321 cmp eax, ebx
1322 setne fRet
1323 push ebx
1324 popfd
1325 }
1326# endif
1327 return fRet;
1328# endif /* !RT_ARCH_AMD64 */
1329}
1330#endif
1331
1332
1333/**
1334 * Gets the APIC ID of the current CPU.
1335 *
1336 * @returns the APIC ID.
1337 */
1338#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1339RT_ASM_DECL_PRAGMA_WATCOM(uint8_t) ASMGetApicId(void);
1340#else
1341DECLINLINE(uint8_t) ASMGetApicId(void)
1342{
1343 RTCCUINTREG xBX;
1344# if RT_INLINE_ASM_GNU_STYLE
1345# ifdef RT_ARCH_AMD64
1346 RTCCUINTREG uSpill;
1347 __asm__ __volatile__ ("cpuid"
1348 : "=a" (uSpill),
1349 "=b" (xBX)
1350 : "0" (1)
1351 : "rcx", "rdx");
1352# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1353 RTCCUINTREG uSpill;
1354 __asm__ __volatile__ ("mov %%ebx,%1\n\t"
1355 "cpuid\n\t"
1356 "xchgl %%ebx,%1\n\t"
1357 : "=a" (uSpill),
1358 "=rm" (xBX)
1359 : "0" (1)
1360 : "ecx", "edx");
1361# else
1362 RTCCUINTREG uSpill;
1363 __asm__ __volatile__ ("cpuid"
1364 : "=a" (uSpill),
1365 "=b" (xBX)
1366 : "0" (1)
1367 : "ecx", "edx");
1368# endif
1369
1370# elif RT_INLINE_ASM_USES_INTRIN
1371 int aInfo[4];
1372 __cpuid(aInfo, 1);
1373 xBX = aInfo[1];
1374
1375# else
1376 __asm
1377 {
1378 push ebx
1379 mov eax, 1
1380 cpuid
1381 mov [xBX], ebx
1382 pop ebx
1383 }
1384# endif
1385 return (uint8_t)(xBX >> 24);
1386}
1387#endif
1388
1389
1390/**
1391 * Gets the APIC ID of the current CPU using leaf 0xb.
1392 *
1393 * @returns the APIC ID.
1394 */
1395#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 16 /*?*/
1396RT_ASM_DECL_PRAGMA_WATCOM(uint8_t) ASMGetApicIdExt0B(void);
1397#else
1398DECLINLINE(uint32_t) ASMGetApicIdExt0B(void)
1399{
1400# if RT_INLINE_ASM_GNU_STYLE
1401 RTCCUINTREG xDX;
1402# ifdef RT_ARCH_AMD64
1403 RTCCUINTREG uSpillEax, uSpillEcx;
1404 __asm__ __volatile__ ("cpuid"
1405 : "=a" (uSpillEax),
1406 "=c" (uSpillEcx),
1407 "=d" (xDX)
1408 : "0" (0xb),
1409 "1" (0)
1410 : "rbx");
1411# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1412 RTCCUINTREG uSpillEax, uSpillEcx, uSpillEbx;
1413 __asm__ __volatile__ ("mov %%ebx,%2\n\t"
1414 "cpuid\n\t"
1415 "xchgl %%ebx,%2\n\t"
1416 : "=a" (uSpillEax),
1417 "=c" (uSpillEcx),
1418 "=rm" (uSpillEbx),
1419 "=d" (xDX)
1420 : "0" (0xb),
1421 "1" (0));
1422# else
1423 RTCCUINTREG uSpillEax, uSpillEcx;
1424 __asm__ __volatile__ ("cpuid"
1425 : "=a" (uSpillEax),
1426 "=c" (uSpillEcx),
1427 "=d" (xDX)
1428 : "0" (0xb),
1429 "1" (0)
1430 : "ebx");
1431# endif
1432 return (uint32_t)xDX;
1433
1434# elif RT_INLINE_ASM_USES_INTRIN >= 16 /*?*/
1435
1436 int aInfo[4];
1437 __cpuidex(aInfo, 0xb, 0);
1438 return aInfo[3];
1439
1440# else
1441 RTCCUINTREG xDX;
1442 __asm
1443 {
1444 push ebx
1445 mov eax, 0xb
1446 xor ecx, ecx
1447 cpuid
1448 mov [xDX], edx
1449 pop ebx
1450 }
1451 return (uint32_t)xDX;
1452# endif
1453}
1454#endif
1455
1456
1457/**
1458 * Gets the APIC ID of the current CPU using leaf 8000001E.
1459 *
1460 * @returns the APIC ID.
1461 */
1462DECLINLINE(uint32_t) ASMGetApicIdExt8000001E(void)
1463{
1464 return ASMCpuId_EAX(0x8000001e);
1465}
1466
1467
1468/**
1469 * Tests if it a genuine Intel CPU based on the ASMCpuId(0) output.
1470 *
1471 * @returns true/false.
1472 * @param uEBX EBX return from ASMCpuId(0)
1473 * @param uECX ECX return from ASMCpuId(0)
1474 * @param uEDX EDX return from ASMCpuId(0)
1475 */
1476DECLINLINE(bool) ASMIsIntelCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1477{
1478 /* 'GenuineIntel' */
1479 return uEBX == UINT32_C(0x756e6547) /* 'Genu' */
1480 && uEDX == UINT32_C(0x49656e69) /* 'ineI' */
1481 && uECX == UINT32_C(0x6c65746e); /* 'ntel' */
1482}
1483
1484
1485/**
1486 * Tests if this is a genuine Intel CPU.
1487 *
1488 * @returns true/false.
1489 * @remarks ASSUMES that cpuid is supported by the CPU.
1490 */
1491DECLINLINE(bool) ASMIsIntelCpu(void)
1492{
1493 uint32_t uEAX, uEBX, uECX, uEDX;
1494 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1495 return ASMIsIntelCpuEx(uEBX, uECX, uEDX);
1496}
1497
1498
1499/**
1500 * Tests if it an authentic AMD CPU based on the ASMCpuId(0) output.
1501 *
1502 * @returns true/false.
1503 * @param uEBX EBX return from ASMCpuId(0)
1504 * @param uECX ECX return from ASMCpuId(0)
1505 * @param uEDX EDX return from ASMCpuId(0)
1506 */
1507DECLINLINE(bool) ASMIsAmdCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1508{
1509 /* 'AuthenticAMD' */
1510 return uEBX == UINT32_C(0x68747541) /* 'Auth' */
1511 && uEDX == UINT32_C(0x69746e65) /* 'enti' */
1512 && uECX == UINT32_C(0x444d4163); /* 'dAMD' */
1513}
1514
1515
1516/**
1517 * Tests if this is an authentic AMD CPU.
1518 *
1519 * @returns true/false.
1520 * @remarks ASSUMES that cpuid is supported by the CPU.
1521 */
1522DECLINLINE(bool) ASMIsAmdCpu(void)
1523{
1524 uint32_t uEAX, uEBX, uECX, uEDX;
1525 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1526 return ASMIsAmdCpuEx(uEBX, uECX, uEDX);
1527}
1528
1529
1530/**
1531 * Tests if it a centaur hauling VIA CPU based on the ASMCpuId(0) output.
1532 *
1533 * @returns true/false.
1534 * @param uEBX EBX return from ASMCpuId(0).
1535 * @param uECX ECX return from ASMCpuId(0).
1536 * @param uEDX EDX return from ASMCpuId(0).
1537 */
1538DECLINLINE(bool) ASMIsViaCentaurCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1539{
1540 /* 'CentaurHauls' */
1541 return uEBX == UINT32_C(0x746e6543) /* 'Cent' */
1542 && uEDX == UINT32_C(0x48727561) /* 'aurH' */
1543 && uECX == UINT32_C(0x736c7561); /* 'auls' */
1544}
1545
1546
1547/**
1548 * Tests if this is a centaur hauling VIA CPU.
1549 *
1550 * @returns true/false.
1551 * @remarks ASSUMES that cpuid is supported by the CPU.
1552 */
1553DECLINLINE(bool) ASMIsViaCentaurCpu(void)
1554{
1555 uint32_t uEAX, uEBX, uECX, uEDX;
1556 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1557 return ASMIsViaCentaurCpuEx(uEBX, uECX, uEDX);
1558}
1559
1560
1561/**
1562 * Tests if it a Shanghai CPU based on the ASMCpuId(0) output.
1563 *
1564 * @returns true/false.
1565 * @param uEBX EBX return from ASMCpuId(0).
1566 * @param uECX ECX return from ASMCpuId(0).
1567 * @param uEDX EDX return from ASMCpuId(0).
1568 */
1569DECLINLINE(bool) ASMIsShanghaiCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1570{
1571 /* ' Shanghai ' */
1572 return uEBX == UINT32_C(0x68532020) /* ' Sh' */
1573 && uEDX == UINT32_C(0x68676e61) /* 'angh' */
1574 && uECX == UINT32_C(0x20206961); /* 'ai ' */
1575}
1576
1577
1578/**
1579 * Tests if this is a Shanghai CPU.
1580 *
1581 * @returns true/false.
1582 * @remarks ASSUMES that cpuid is supported by the CPU.
1583 */
1584DECLINLINE(bool) ASMIsShanghaiCpu(void)
1585{
1586 uint32_t uEAX, uEBX, uECX, uEDX;
1587 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1588 return ASMIsShanghaiCpuEx(uEBX, uECX, uEDX);
1589}
1590
1591
1592/**
1593 * Tests if it a genuine Hygon CPU based on the ASMCpuId(0) output.
1594 *
1595 * @returns true/false.
1596 * @param uEBX EBX return from ASMCpuId(0)
1597 * @param uECX ECX return from ASMCpuId(0)
1598 * @param uEDX EDX return from ASMCpuId(0)
1599 */
1600DECLINLINE(bool) ASMIsHygonCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1601{
1602 /* 'HygonGenuine' */
1603 return uEBX == UINT32_C(0x6f677948) /* Hygo */
1604 && uECX == UINT32_C(0x656e6975) /* uine */
1605 && uEDX == UINT32_C(0x6e65476e); /* nGen */
1606}
1607
1608
1609/**
1610 * Tests if this is a genuine Hygon CPU.
1611 *
1612 * @returns true/false.
1613 * @remarks ASSUMES that cpuid is supported by the CPU.
1614 */
1615DECLINLINE(bool) ASMIsHygonCpu(void)
1616{
1617 uint32_t uEAX, uEBX, uECX, uEDX;
1618 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1619 return ASMIsHygonCpuEx(uEBX, uECX, uEDX);
1620}
1621
1622
1623/**
1624 * Checks whether ASMCpuId_EAX(0x00000000) indicates a valid range.
1625 *
1626 *
1627 * @returns true/false.
1628 * @param uEAX The EAX value of CPUID leaf 0x00000000.
1629 *
1630 * @note This only succeeds if there are at least two leaves in the range.
1631 * @remarks The upper range limit is just some half reasonable value we've
1632 * picked out of thin air.
1633 */
1634DECLINLINE(bool) ASMIsValidStdRange(uint32_t uEAX)
1635{
1636 return uEAX >= UINT32_C(0x00000001) && uEAX <= UINT32_C(0x000fffff);
1637}
1638
1639
1640/**
1641 * Checks whether ASMCpuId_EAX(0x80000000) indicates a valid range.
1642 *
1643 * This only succeeds if there are at least two leaves in the range.
1644 *
1645 * @returns true/false.
1646 * @param uEAX The EAX value of CPUID leaf 0x80000000.
1647 *
1648 * @note This only succeeds if there are at least two leaves in the range.
1649 * @remarks The upper range limit is just some half reasonable value we've
1650 * picked out of thin air.
1651 */
1652DECLINLINE(bool) ASMIsValidExtRange(uint32_t uEAX)
1653{
1654 return uEAX >= UINT32_C(0x80000001) && uEAX <= UINT32_C(0x800fffff);
1655}
1656
1657
1658/**
1659 * Checks whether ASMCpuId_EAX(0x40000000) indicates a valid range.
1660 *
1661 * This only succeeds if there are at least two leaves in the range.
1662 *
1663 * @returns true/false.
1664 * @param uEAX The EAX value of CPUID leaf 0x40000000.
1665 *
1666 * @note Unlike ASMIsValidStdRange() and ASMIsValidExtRange(), a single leaf
1667 * is okay here. So, you always need to check the range.
1668 * @remarks The upper range limit is take from the intel docs.
1669 */
1670DECLINLINE(bool) ASMIsValidHypervisorRange(uint32_t uEAX)
1671{
1672 return uEAX >= UINT32_C(0x40000000) && uEAX <= UINT32_C(0x4fffffff);
1673}
1674
1675
1676/**
1677 * Extracts the CPU family from ASMCpuId(1) or ASMCpuId(0x80000001)
1678 *
1679 * @returns Family.
1680 * @param uEAX EAX return from ASMCpuId(1) or ASMCpuId(0x80000001).
1681 */
1682DECLINLINE(uint32_t) ASMGetCpuFamily(uint32_t uEAX)
1683{
1684 return ((uEAX >> 8) & 0xf) == 0xf
1685 ? ((uEAX >> 20) & 0x7f) + 0xf
1686 : ((uEAX >> 8) & 0xf);
1687}
1688
1689
1690/**
1691 * Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001), Intel variant.
1692 *
1693 * @returns Model.
1694 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1695 */
1696DECLINLINE(uint32_t) ASMGetCpuModelIntel(uint32_t uEAX)
1697{
1698 return ((uEAX >> 8) & 0xf) == 0xf || (((uEAX >> 8) & 0xf) == 0x6) /* family! */
1699 ? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
1700 : ((uEAX >> 4) & 0xf);
1701}
1702
1703
1704/**
1705 * Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001), AMD variant.
1706 *
1707 * @returns Model.
1708 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1709 */
1710DECLINLINE(uint32_t) ASMGetCpuModelAMD(uint32_t uEAX)
1711{
1712 return ((uEAX >> 8) & 0xf) == 0xf
1713 ? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
1714 : ((uEAX >> 4) & 0xf);
1715}
1716
1717
1718/**
1719 * Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001)
1720 *
1721 * @returns Model.
1722 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1723 * @param fIntel Whether it's an intel CPU. Use ASMIsIntelCpuEx() or ASMIsIntelCpu().
1724 */
1725DECLINLINE(uint32_t) ASMGetCpuModel(uint32_t uEAX, bool fIntel)
1726{
1727 return ((uEAX >> 8) & 0xf) == 0xf || (((uEAX >> 8) & 0xf) == 0x6 && fIntel) /* family! */
1728 ? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
1729 : ((uEAX >> 4) & 0xf);
1730}
1731
1732
1733/**
1734 * Extracts the CPU stepping from ASMCpuId(1) or ASMCpuId(0x80000001)
1735 *
1736 * @returns Model.
1737 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1738 */
1739DECLINLINE(uint32_t) ASMGetCpuStepping(uint32_t uEAX)
1740{
1741 return uEAX & 0xf;
1742}
1743
1744
1745/**
1746 * Get cr0.
1747 * @returns cr0.
1748 */
1749#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1750RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetCR0(void);
1751#else
1752DECLINLINE(RTCCUINTXREG) ASMGetCR0(void)
1753{
1754 RTCCUINTXREG uCR0;
1755# if RT_INLINE_ASM_USES_INTRIN
1756 uCR0 = __readcr0();
1757
1758# elif RT_INLINE_ASM_GNU_STYLE
1759# ifdef RT_ARCH_AMD64
1760 __asm__ __volatile__("movq %%cr0, %0\t\n" : "=r" (uCR0));
1761# else
1762 __asm__ __volatile__("movl %%cr0, %0\t\n" : "=r" (uCR0));
1763# endif
1764# else
1765 __asm
1766 {
1767# ifdef RT_ARCH_AMD64
1768 mov rax, cr0
1769 mov [uCR0], rax
1770# else
1771 mov eax, cr0
1772 mov [uCR0], eax
1773# endif
1774 }
1775# endif
1776 return uCR0;
1777}
1778#endif
1779
1780
1781/**
1782 * Sets the CR0 register.
1783 * @param uCR0 The new CR0 value.
1784 */
1785#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1786RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetCR0(RTCCUINTXREG uCR0);
1787#else
1788DECLINLINE(void) ASMSetCR0(RTCCUINTXREG uCR0)
1789{
1790# if RT_INLINE_ASM_USES_INTRIN
1791 __writecr0(uCR0);
1792
1793# elif RT_INLINE_ASM_GNU_STYLE
1794# ifdef RT_ARCH_AMD64
1795 __asm__ __volatile__("movq %0, %%cr0\n\t" :: "r" (uCR0));
1796# else
1797 __asm__ __volatile__("movl %0, %%cr0\n\t" :: "r" (uCR0));
1798# endif
1799# else
1800 __asm
1801 {
1802# ifdef RT_ARCH_AMD64
1803 mov rax, [uCR0]
1804 mov cr0, rax
1805# else
1806 mov eax, [uCR0]
1807 mov cr0, eax
1808# endif
1809 }
1810# endif
1811}
1812#endif
1813
1814
1815/**
1816 * Get cr2.
1817 * @returns cr2.
1818 */
1819#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1820RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetCR2(void);
1821#else
1822DECLINLINE(RTCCUINTXREG) ASMGetCR2(void)
1823{
1824 RTCCUINTXREG uCR2;
1825# if RT_INLINE_ASM_USES_INTRIN
1826 uCR2 = __readcr2();
1827
1828# elif RT_INLINE_ASM_GNU_STYLE
1829# ifdef RT_ARCH_AMD64
1830 __asm__ __volatile__("movq %%cr2, %0\t\n" : "=r" (uCR2));
1831# else
1832 __asm__ __volatile__("movl %%cr2, %0\t\n" : "=r" (uCR2));
1833# endif
1834# else
1835 __asm
1836 {
1837# ifdef RT_ARCH_AMD64
1838 mov rax, cr2
1839 mov [uCR2], rax
1840# else
1841 mov eax, cr2
1842 mov [uCR2], eax
1843# endif
1844 }
1845# endif
1846 return uCR2;
1847}
1848#endif
1849
1850
1851/**
1852 * Sets the CR2 register.
1853 * @param uCR2 The new CR0 value.
1854 */
1855#if RT_INLINE_ASM_EXTERNAL
1856RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetCR2(RTCCUINTXREG uCR2);
1857#else
1858DECLINLINE(void) ASMSetCR2(RTCCUINTXREG uCR2)
1859{
1860# if RT_INLINE_ASM_GNU_STYLE
1861# ifdef RT_ARCH_AMD64
1862 __asm__ __volatile__("movq %0, %%cr2\n\t" :: "r" (uCR2));
1863# else
1864 __asm__ __volatile__("movl %0, %%cr2\n\t" :: "r" (uCR2));
1865# endif
1866# else
1867 __asm
1868 {
1869# ifdef RT_ARCH_AMD64
1870 mov rax, [uCR2]
1871 mov cr2, rax
1872# else
1873 mov eax, [uCR2]
1874 mov cr2, eax
1875# endif
1876 }
1877# endif
1878}
1879#endif
1880
1881
1882/**
1883 * Get cr3.
1884 * @returns cr3.
1885 */
1886#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1887RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetCR3(void);
1888#else
1889DECLINLINE(RTCCUINTXREG) ASMGetCR3(void)
1890{
1891 RTCCUINTXREG uCR3;
1892# if RT_INLINE_ASM_USES_INTRIN
1893 uCR3 = __readcr3();
1894
1895# elif RT_INLINE_ASM_GNU_STYLE
1896# ifdef RT_ARCH_AMD64
1897 __asm__ __volatile__("movq %%cr3, %0\t\n" : "=r" (uCR3));
1898# else
1899 __asm__ __volatile__("movl %%cr3, %0\t\n" : "=r" (uCR3));
1900# endif
1901# else
1902 __asm
1903 {
1904# ifdef RT_ARCH_AMD64
1905 mov rax, cr3
1906 mov [uCR3], rax
1907# else
1908 mov eax, cr3
1909 mov [uCR3], eax
1910# endif
1911 }
1912# endif
1913 return uCR3;
1914}
1915#endif
1916
1917
1918/**
1919 * Sets the CR3 register.
1920 *
1921 * @param uCR3 New CR3 value.
1922 */
1923#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1924RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetCR3(RTCCUINTXREG uCR3);
1925#else
1926DECLINLINE(void) ASMSetCR3(RTCCUINTXREG uCR3)
1927{
1928# if RT_INLINE_ASM_USES_INTRIN
1929 __writecr3(uCR3);
1930
1931# elif RT_INLINE_ASM_GNU_STYLE
1932# ifdef RT_ARCH_AMD64
1933 __asm__ __volatile__("movq %0, %%cr3\n\t" : : "r" (uCR3));
1934# else
1935 __asm__ __volatile__("movl %0, %%cr3\n\t" : : "r" (uCR3));
1936# endif
1937# else
1938 __asm
1939 {
1940# ifdef RT_ARCH_AMD64
1941 mov rax, [uCR3]
1942 mov cr3, rax
1943# else
1944 mov eax, [uCR3]
1945 mov cr3, eax
1946# endif
1947 }
1948# endif
1949}
1950#endif
1951
1952
1953/**
1954 * Reloads the CR3 register.
1955 */
1956#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1957RT_ASM_DECL_PRAGMA_WATCOM(void) ASMReloadCR3(void);
1958#else
1959DECLINLINE(void) ASMReloadCR3(void)
1960{
1961# if RT_INLINE_ASM_USES_INTRIN
1962 __writecr3(__readcr3());
1963
1964# elif RT_INLINE_ASM_GNU_STYLE
1965 RTCCUINTXREG u;
1966# ifdef RT_ARCH_AMD64
1967 __asm__ __volatile__("movq %%cr3, %0\n\t"
1968 "movq %0, %%cr3\n\t"
1969 : "=r" (u));
1970# else
1971 __asm__ __volatile__("movl %%cr3, %0\n\t"
1972 "movl %0, %%cr3\n\t"
1973 : "=r" (u));
1974# endif
1975# else
1976 __asm
1977 {
1978# ifdef RT_ARCH_AMD64
1979 mov rax, cr3
1980 mov cr3, rax
1981# else
1982 mov eax, cr3
1983 mov cr3, eax
1984# endif
1985 }
1986# endif
1987}
1988#endif
1989
1990
1991/**
1992 * Get cr4.
1993 * @returns cr4.
1994 */
1995#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1996RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetCR4(void);
1997#else
1998DECLINLINE(RTCCUINTXREG) ASMGetCR4(void)
1999{
2000 RTCCUINTXREG uCR4;
2001# if RT_INLINE_ASM_USES_INTRIN
2002 uCR4 = __readcr4();
2003
2004# elif RT_INLINE_ASM_GNU_STYLE
2005# ifdef RT_ARCH_AMD64
2006 __asm__ __volatile__("movq %%cr4, %0\t\n" : "=r" (uCR4));
2007# else
2008 __asm__ __volatile__("movl %%cr4, %0\t\n" : "=r" (uCR4));
2009# endif
2010# else
2011 __asm
2012 {
2013# ifdef RT_ARCH_AMD64
2014 mov rax, cr4
2015 mov [uCR4], rax
2016# else
2017 push eax /* just in case */
2018 /*mov eax, cr4*/
2019 _emit 0x0f
2020 _emit 0x20
2021 _emit 0xe0
2022 mov [uCR4], eax
2023 pop eax
2024# endif
2025 }
2026# endif
2027 return uCR4;
2028}
2029#endif
2030
2031
2032/**
2033 * Sets the CR4 register.
2034 *
2035 * @param uCR4 New CR4 value.
2036 */
2037#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2038RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetCR4(RTCCUINTXREG uCR4);
2039#else
2040DECLINLINE(void) ASMSetCR4(RTCCUINTXREG uCR4)
2041{
2042# if RT_INLINE_ASM_USES_INTRIN
2043 __writecr4(uCR4);
2044
2045# elif RT_INLINE_ASM_GNU_STYLE
2046# ifdef RT_ARCH_AMD64
2047 __asm__ __volatile__("movq %0, %%cr4\n\t" : : "r" (uCR4));
2048# else
2049 __asm__ __volatile__("movl %0, %%cr4\n\t" : : "r" (uCR4));
2050# endif
2051# else
2052 __asm
2053 {
2054# ifdef RT_ARCH_AMD64
2055 mov rax, [uCR4]
2056 mov cr4, rax
2057# else
2058 mov eax, [uCR4]
2059 _emit 0x0F
2060 _emit 0x22
2061 _emit 0xE0 /* mov cr4, eax */
2062# endif
2063 }
2064# endif
2065}
2066#endif
2067
2068
2069/**
2070 * Get cr8.
2071 * @returns cr8.
2072 * @remark The lock prefix hack for access from non-64-bit modes is NOT used and 0 is returned.
2073 */
2074#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2075DECLASM(RTCCUINTXREG) ASMGetCR8(void);
2076#else
2077DECLINLINE(RTCCUINTXREG) ASMGetCR8(void)
2078{
2079# ifdef RT_ARCH_AMD64
2080 RTCCUINTXREG uCR8;
2081# if RT_INLINE_ASM_USES_INTRIN
2082 uCR8 = __readcr8();
2083
2084# elif RT_INLINE_ASM_GNU_STYLE
2085 __asm__ __volatile__("movq %%cr8, %0\t\n" : "=r" (uCR8));
2086# else
2087 __asm
2088 {
2089 mov rax, cr8
2090 mov [uCR8], rax
2091 }
2092# endif
2093 return uCR8;
2094# else /* !RT_ARCH_AMD64 */
2095 return 0;
2096# endif /* !RT_ARCH_AMD64 */
2097}
2098#endif
2099
2100
2101/**
2102 * Get XCR0 (eXtended feature Control Register 0).
2103 * @returns xcr0.
2104 */
2105DECLASM(uint64_t) ASMGetXcr0(void);
2106
2107/**
2108 * Sets the XCR0 register.
2109 * @param uXcr0 The new XCR0 value.
2110 */
2111DECLASM(void) ASMSetXcr0(uint64_t uXcr0);
2112
2113struct X86XSAVEAREA;
2114/**
2115 * Save extended CPU state.
2116 * @param pXStateArea Where to save the state.
2117 * @param fComponents Which state components to save.
2118 */
2119DECLASM(void) ASMXSave(struct X86XSAVEAREA RT_FAR *pXStateArea, uint64_t fComponents);
2120
2121/**
2122 * Loads extended CPU state.
2123 * @param pXStateArea Where to load the state from.
2124 * @param fComponents Which state components to load.
2125 */
2126DECLASM(void) ASMXRstor(struct X86XSAVEAREA const RT_FAR *pXStateArea, uint64_t fComponents);
2127
2128
2129struct X86FXSTATE;
2130/**
2131 * Save FPU and SSE CPU state.
2132 * @param pXStateArea Where to save the state.
2133 */
2134DECLASM(void) ASMFxSave(struct X86FXSTATE RT_FAR *pXStateArea);
2135
2136/**
2137 * Load FPU and SSE CPU state.
2138 * @param pXStateArea Where to load the state from.
2139 */
2140DECLASM(void) ASMFxRstor(struct X86FXSTATE const RT_FAR *pXStateArea);
2141
2142
2143/**
2144 * Enables interrupts (EFLAGS.IF).
2145 */
2146#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2147RT_ASM_DECL_PRAGMA_WATCOM(void) ASMIntEnable(void);
2148#else
2149DECLINLINE(void) ASMIntEnable(void)
2150{
2151# if RT_INLINE_ASM_GNU_STYLE
2152 __asm("sti\n");
2153# elif RT_INLINE_ASM_USES_INTRIN
2154 _enable();
2155# else
2156 __asm sti
2157# endif
2158}
2159#endif
2160
2161
2162/**
2163 * Disables interrupts (!EFLAGS.IF).
2164 */
2165#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2166RT_ASM_DECL_PRAGMA_WATCOM(void) ASMIntDisable(void);
2167#else
2168DECLINLINE(void) ASMIntDisable(void)
2169{
2170# if RT_INLINE_ASM_GNU_STYLE
2171 __asm("cli\n");
2172# elif RT_INLINE_ASM_USES_INTRIN
2173 _disable();
2174# else
2175 __asm cli
2176# endif
2177}
2178#endif
2179
2180
2181/**
2182 * Disables interrupts and returns previous xFLAGS.
2183 */
2184#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2185RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTREG) ASMIntDisableFlags(void);
2186#else
2187DECLINLINE(RTCCUINTREG) ASMIntDisableFlags(void)
2188{
2189 RTCCUINTREG xFlags;
2190# if RT_INLINE_ASM_GNU_STYLE
2191# ifdef RT_ARCH_AMD64
2192 __asm__ __volatile__("pushfq\n\t"
2193 "cli\n\t"
2194 "popq %0\n\t"
2195 : "=r" (xFlags));
2196# else
2197 __asm__ __volatile__("pushfl\n\t"
2198 "cli\n\t"
2199 "popl %0\n\t"
2200 : "=r" (xFlags));
2201# endif
2202# elif RT_INLINE_ASM_USES_INTRIN && !defined(RT_ARCH_X86)
2203 xFlags = ASMGetFlags();
2204 _disable();
2205# else
2206 __asm {
2207 pushfd
2208 cli
2209 pop [xFlags]
2210 }
2211# endif
2212 return xFlags;
2213}
2214#endif
2215
2216
2217/**
2218 * Are interrupts enabled?
2219 *
2220 * @returns true / false.
2221 */
2222DECLINLINE(bool) ASMIntAreEnabled(void)
2223{
2224 RTCCUINTREG uFlags = ASMGetFlags();
2225 return uFlags & 0x200 /* X86_EFL_IF */ ? true : false;
2226}
2227
2228
2229/**
2230 * Halts the CPU until interrupted.
2231 */
2232#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 14
2233RT_ASM_DECL_PRAGMA_WATCOM(void) ASMHalt(void);
2234#else
2235DECLINLINE(void) ASMHalt(void)
2236{
2237# if RT_INLINE_ASM_GNU_STYLE
2238 __asm__ __volatile__("hlt\n\t");
2239# elif RT_INLINE_ASM_USES_INTRIN
2240 __halt();
2241# else
2242 __asm {
2243 hlt
2244 }
2245# endif
2246}
2247#endif
2248
2249
2250/**
2251 * Reads a machine specific register.
2252 *
2253 * @returns Register content.
2254 * @param uRegister Register to read.
2255 */
2256#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2257RT_ASM_DECL_PRAGMA_WATCOM(uint64_t) ASMRdMsr(uint32_t uRegister);
2258#else
2259DECLINLINE(uint64_t) ASMRdMsr(uint32_t uRegister)
2260{
2261 RTUINT64U u;
2262# if RT_INLINE_ASM_GNU_STYLE
2263 __asm__ __volatile__("rdmsr\n\t"
2264 : "=a" (u.s.Lo),
2265 "=d" (u.s.Hi)
2266 : "c" (uRegister));
2267
2268# elif RT_INLINE_ASM_USES_INTRIN
2269 u.u = __readmsr(uRegister);
2270
2271# else
2272 __asm
2273 {
2274 mov ecx, [uRegister]
2275 rdmsr
2276 mov [u.s.Lo], eax
2277 mov [u.s.Hi], edx
2278 }
2279# endif
2280
2281 return u.u;
2282}
2283#endif
2284
2285
2286/**
2287 * Writes a machine specific register.
2288 *
2289 * @returns Register content.
2290 * @param uRegister Register to write to.
2291 * @param u64Val Value to write.
2292 */
2293#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2294RT_ASM_DECL_PRAGMA_WATCOM(void) ASMWrMsr(uint32_t uRegister, uint64_t u64Val);
2295#else
2296DECLINLINE(void) ASMWrMsr(uint32_t uRegister, uint64_t u64Val)
2297{
2298 RTUINT64U u;
2299
2300 u.u = u64Val;
2301# if RT_INLINE_ASM_GNU_STYLE
2302 __asm__ __volatile__("wrmsr\n\t"
2303 ::"a" (u.s.Lo),
2304 "d" (u.s.Hi),
2305 "c" (uRegister));
2306
2307# elif RT_INLINE_ASM_USES_INTRIN
2308 __writemsr(uRegister, u.u);
2309
2310# else
2311 __asm
2312 {
2313 mov ecx, [uRegister]
2314 mov edx, [u.s.Hi]
2315 mov eax, [u.s.Lo]
2316 wrmsr
2317 }
2318# endif
2319}
2320#endif
2321
2322
2323/**
2324 * Reads a machine specific register, extended version (for AMD).
2325 *
2326 * @returns Register content.
2327 * @param uRegister Register to read.
2328 * @param uXDI RDI/EDI value.
2329 */
2330#if RT_INLINE_ASM_EXTERNAL
2331RT_ASM_DECL_PRAGMA_WATCOM(uint64_t) ASMRdMsrEx(uint32_t uRegister, RTCCUINTXREG uXDI);
2332#else
2333DECLINLINE(uint64_t) ASMRdMsrEx(uint32_t uRegister, RTCCUINTXREG uXDI)
2334{
2335 RTUINT64U u;
2336# if RT_INLINE_ASM_GNU_STYLE
2337 __asm__ __volatile__("rdmsr\n\t"
2338 : "=a" (u.s.Lo),
2339 "=d" (u.s.Hi)
2340 : "c" (uRegister),
2341 "D" (uXDI));
2342
2343# else
2344 __asm
2345 {
2346 mov ecx, [uRegister]
2347 xchg edi, [uXDI]
2348 rdmsr
2349 mov [u.s.Lo], eax
2350 mov [u.s.Hi], edx
2351 xchg edi, [uXDI]
2352 }
2353# endif
2354
2355 return u.u;
2356}
2357#endif
2358
2359
2360/**
2361 * Writes a machine specific register, extended version (for AMD).
2362 *
2363 * @returns Register content.
2364 * @param uRegister Register to write to.
2365 * @param uXDI RDI/EDI value.
2366 * @param u64Val Value to write.
2367 */
2368#if RT_INLINE_ASM_EXTERNAL
2369RT_ASM_DECL_PRAGMA_WATCOM(void) ASMWrMsrEx(uint32_t uRegister, RTCCUINTXREG uXDI, uint64_t u64Val);
2370#else
2371DECLINLINE(void) ASMWrMsrEx(uint32_t uRegister, RTCCUINTXREG uXDI, uint64_t u64Val)
2372{
2373 RTUINT64U u;
2374
2375 u.u = u64Val;
2376# if RT_INLINE_ASM_GNU_STYLE
2377 __asm__ __volatile__("wrmsr\n\t"
2378 ::"a" (u.s.Lo),
2379 "d" (u.s.Hi),
2380 "c" (uRegister),
2381 "D" (uXDI));
2382
2383# else
2384 __asm
2385 {
2386 mov ecx, [uRegister]
2387 xchg edi, [uXDI]
2388 mov edx, [u.s.Hi]
2389 mov eax, [u.s.Lo]
2390 wrmsr
2391 xchg edi, [uXDI]
2392 }
2393# endif
2394}
2395#endif
2396
2397
2398
2399/**
2400 * Reads low part of a machine specific register.
2401 *
2402 * @returns Register content.
2403 * @param uRegister Register to read.
2404 */
2405#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2406RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMRdMsr_Low(uint32_t uRegister);
2407#else
2408DECLINLINE(uint32_t) ASMRdMsr_Low(uint32_t uRegister)
2409{
2410 uint32_t u32;
2411# if RT_INLINE_ASM_GNU_STYLE
2412 __asm__ __volatile__("rdmsr\n\t"
2413 : "=a" (u32)
2414 : "c" (uRegister)
2415 : "edx");
2416
2417# elif RT_INLINE_ASM_USES_INTRIN
2418 u32 = (uint32_t)__readmsr(uRegister);
2419
2420#else
2421 __asm
2422 {
2423 mov ecx, [uRegister]
2424 rdmsr
2425 mov [u32], eax
2426 }
2427# endif
2428
2429 return u32;
2430}
2431#endif
2432
2433
2434/**
2435 * Reads high part of a machine specific register.
2436 *
2437 * @returns Register content.
2438 * @param uRegister Register to read.
2439 */
2440#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2441RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMRdMsr_High(uint32_t uRegister);
2442#else
2443DECLINLINE(uint32_t) ASMRdMsr_High(uint32_t uRegister)
2444{
2445 uint32_t u32;
2446# if RT_INLINE_ASM_GNU_STYLE
2447 __asm__ __volatile__("rdmsr\n\t"
2448 : "=d" (u32)
2449 : "c" (uRegister)
2450 : "eax");
2451
2452# elif RT_INLINE_ASM_USES_INTRIN
2453 u32 = (uint32_t)(__readmsr(uRegister) >> 32);
2454
2455# else
2456 __asm
2457 {
2458 mov ecx, [uRegister]
2459 rdmsr
2460 mov [u32], edx
2461 }
2462# endif
2463
2464 return u32;
2465}
2466#endif
2467
2468
2469/**
2470 * Gets dr0.
2471 *
2472 * @returns dr0.
2473 */
2474#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2475RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetDR0(void);
2476#else
2477DECLINLINE(RTCCUINTXREG) ASMGetDR0(void)
2478{
2479 RTCCUINTXREG uDR0;
2480# if RT_INLINE_ASM_USES_INTRIN
2481 uDR0 = __readdr(0);
2482# elif RT_INLINE_ASM_GNU_STYLE
2483# ifdef RT_ARCH_AMD64
2484 __asm__ __volatile__("movq %%dr0, %0\n\t" : "=r" (uDR0));
2485# else
2486 __asm__ __volatile__("movl %%dr0, %0\n\t" : "=r" (uDR0));
2487# endif
2488# else
2489 __asm
2490 {
2491# ifdef RT_ARCH_AMD64
2492 mov rax, dr0
2493 mov [uDR0], rax
2494# else
2495 mov eax, dr0
2496 mov [uDR0], eax
2497# endif
2498 }
2499# endif
2500 return uDR0;
2501}
2502#endif
2503
2504
2505/**
2506 * Gets dr1.
2507 *
2508 * @returns dr1.
2509 */
2510#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2511RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetDR1(void);
2512#else
2513DECLINLINE(RTCCUINTXREG) ASMGetDR1(void)
2514{
2515 RTCCUINTXREG uDR1;
2516# if RT_INLINE_ASM_USES_INTRIN
2517 uDR1 = __readdr(1);
2518# elif RT_INLINE_ASM_GNU_STYLE
2519# ifdef RT_ARCH_AMD64
2520 __asm__ __volatile__("movq %%dr1, %0\n\t" : "=r" (uDR1));
2521# else
2522 __asm__ __volatile__("movl %%dr1, %0\n\t" : "=r" (uDR1));
2523# endif
2524# else
2525 __asm
2526 {
2527# ifdef RT_ARCH_AMD64
2528 mov rax, dr1
2529 mov [uDR1], rax
2530# else
2531 mov eax, dr1
2532 mov [uDR1], eax
2533# endif
2534 }
2535# endif
2536 return uDR1;
2537}
2538#endif
2539
2540
2541/**
2542 * Gets dr2.
2543 *
2544 * @returns dr2.
2545 */
2546#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2547RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetDR2(void);
2548#else
2549DECLINLINE(RTCCUINTXREG) ASMGetDR2(void)
2550{
2551 RTCCUINTXREG uDR2;
2552# if RT_INLINE_ASM_USES_INTRIN
2553 uDR2 = __readdr(2);
2554# elif RT_INLINE_ASM_GNU_STYLE
2555# ifdef RT_ARCH_AMD64
2556 __asm__ __volatile__("movq %%dr2, %0\n\t" : "=r" (uDR2));
2557# else
2558 __asm__ __volatile__("movl %%dr2, %0\n\t" : "=r" (uDR2));
2559# endif
2560# else
2561 __asm
2562 {
2563# ifdef RT_ARCH_AMD64
2564 mov rax, dr2
2565 mov [uDR2], rax
2566# else
2567 mov eax, dr2
2568 mov [uDR2], eax
2569# endif
2570 }
2571# endif
2572 return uDR2;
2573}
2574#endif
2575
2576
2577/**
2578 * Gets dr3.
2579 *
2580 * @returns dr3.
2581 */
2582#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2583RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetDR3(void);
2584#else
2585DECLINLINE(RTCCUINTXREG) ASMGetDR3(void)
2586{
2587 RTCCUINTXREG uDR3;
2588# if RT_INLINE_ASM_USES_INTRIN
2589 uDR3 = __readdr(3);
2590# elif RT_INLINE_ASM_GNU_STYLE
2591# ifdef RT_ARCH_AMD64
2592 __asm__ __volatile__("movq %%dr3, %0\n\t" : "=r" (uDR3));
2593# else
2594 __asm__ __volatile__("movl %%dr3, %0\n\t" : "=r" (uDR3));
2595# endif
2596# else
2597 __asm
2598 {
2599# ifdef RT_ARCH_AMD64
2600 mov rax, dr3
2601 mov [uDR3], rax
2602# else
2603 mov eax, dr3
2604 mov [uDR3], eax
2605# endif
2606 }
2607# endif
2608 return uDR3;
2609}
2610#endif
2611
2612
2613/**
2614 * Gets dr6.
2615 *
2616 * @returns dr6.
2617 */
2618#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2619RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetDR6(void);
2620#else
2621DECLINLINE(RTCCUINTXREG) ASMGetDR6(void)
2622{
2623 RTCCUINTXREG uDR6;
2624# if RT_INLINE_ASM_USES_INTRIN
2625 uDR6 = __readdr(6);
2626# elif RT_INLINE_ASM_GNU_STYLE
2627# ifdef RT_ARCH_AMD64
2628 __asm__ __volatile__("movq %%dr6, %0\n\t" : "=r" (uDR6));
2629# else
2630 __asm__ __volatile__("movl %%dr6, %0\n\t" : "=r" (uDR6));
2631# endif
2632# else
2633 __asm
2634 {
2635# ifdef RT_ARCH_AMD64
2636 mov rax, dr6
2637 mov [uDR6], rax
2638# else
2639 mov eax, dr6
2640 mov [uDR6], eax
2641# endif
2642 }
2643# endif
2644 return uDR6;
2645}
2646#endif
2647
2648
2649/**
2650 * Reads and clears DR6.
2651 *
2652 * @returns DR6.
2653 */
2654#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2655RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetAndClearDR6(void);
2656#else
2657DECLINLINE(RTCCUINTXREG) ASMGetAndClearDR6(void)
2658{
2659 RTCCUINTXREG uDR6;
2660# if RT_INLINE_ASM_USES_INTRIN
2661 uDR6 = __readdr(6);
2662 __writedr(6, 0xffff0ff0U); /* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
2663# elif RT_INLINE_ASM_GNU_STYLE
2664 RTCCUINTXREG uNewValue = 0xffff0ff0U;/* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
2665# ifdef RT_ARCH_AMD64
2666 __asm__ __volatile__("movq %%dr6, %0\n\t"
2667 "movq %1, %%dr6\n\t"
2668 : "=r" (uDR6)
2669 : "r" (uNewValue));
2670# else
2671 __asm__ __volatile__("movl %%dr6, %0\n\t"
2672 "movl %1, %%dr6\n\t"
2673 : "=r" (uDR6)
2674 : "r" (uNewValue));
2675# endif
2676# else
2677 __asm
2678 {
2679# ifdef RT_ARCH_AMD64
2680 mov rax, dr6
2681 mov [uDR6], rax
2682 mov rcx, rax
2683 mov ecx, 0ffff0ff0h; /* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
2684 mov dr6, rcx
2685# else
2686 mov eax, dr6
2687 mov [uDR6], eax
2688 mov ecx, 0ffff0ff0h; /* 31-16 and 4-11 are 1's, 12 is zero. */
2689 mov dr6, ecx
2690# endif
2691 }
2692# endif
2693 return uDR6;
2694}
2695#endif
2696
2697
2698/**
2699 * Gets dr7.
2700 *
2701 * @returns dr7.
2702 */
2703#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2704RT_ASM_DECL_PRAGMA_WATCOM(RTCCUINTXREG) ASMGetDR7(void);
2705#else
2706DECLINLINE(RTCCUINTXREG) ASMGetDR7(void)
2707{
2708 RTCCUINTXREG uDR7;
2709# if RT_INLINE_ASM_USES_INTRIN
2710 uDR7 = __readdr(7);
2711# elif RT_INLINE_ASM_GNU_STYLE
2712# ifdef RT_ARCH_AMD64
2713 __asm__ __volatile__("movq %%dr7, %0\n\t" : "=r" (uDR7));
2714# else
2715 __asm__ __volatile__("movl %%dr7, %0\n\t" : "=r" (uDR7));
2716# endif
2717# else
2718 __asm
2719 {
2720# ifdef RT_ARCH_AMD64
2721 mov rax, dr7
2722 mov [uDR7], rax
2723# else
2724 mov eax, dr7
2725 mov [uDR7], eax
2726# endif
2727 }
2728# endif
2729 return uDR7;
2730}
2731#endif
2732
2733
2734/**
2735 * Sets dr0.
2736 *
2737 * @param uDRVal Debug register value to write
2738 */
2739#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2740RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetDR0(RTCCUINTXREG uDRVal);
2741#else
2742DECLINLINE(void) ASMSetDR0(RTCCUINTXREG uDRVal)
2743{
2744# if RT_INLINE_ASM_USES_INTRIN
2745 __writedr(0, uDRVal);
2746# elif RT_INLINE_ASM_GNU_STYLE
2747# ifdef RT_ARCH_AMD64
2748 __asm__ __volatile__("movq %0, %%dr0\n\t" : : "r" (uDRVal));
2749# else
2750 __asm__ __volatile__("movl %0, %%dr0\n\t" : : "r" (uDRVal));
2751# endif
2752# else
2753 __asm
2754 {
2755# ifdef RT_ARCH_AMD64
2756 mov rax, [uDRVal]
2757 mov dr0, rax
2758# else
2759 mov eax, [uDRVal]
2760 mov dr0, eax
2761# endif
2762 }
2763# endif
2764}
2765#endif
2766
2767
2768/**
2769 * Sets dr1.
2770 *
2771 * @param uDRVal Debug register value to write
2772 */
2773#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2774RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetDR1(RTCCUINTXREG uDRVal);
2775#else
2776DECLINLINE(void) ASMSetDR1(RTCCUINTXREG uDRVal)
2777{
2778# if RT_INLINE_ASM_USES_INTRIN
2779 __writedr(1, uDRVal);
2780# elif RT_INLINE_ASM_GNU_STYLE
2781# ifdef RT_ARCH_AMD64
2782 __asm__ __volatile__("movq %0, %%dr1\n\t" : : "r" (uDRVal));
2783# else
2784 __asm__ __volatile__("movl %0, %%dr1\n\t" : : "r" (uDRVal));
2785# endif
2786# else
2787 __asm
2788 {
2789# ifdef RT_ARCH_AMD64
2790 mov rax, [uDRVal]
2791 mov dr1, rax
2792# else
2793 mov eax, [uDRVal]
2794 mov dr1, eax
2795# endif
2796 }
2797# endif
2798}
2799#endif
2800
2801
2802/**
2803 * Sets dr2.
2804 *
2805 * @param uDRVal Debug register value to write
2806 */
2807#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2808RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetDR2(RTCCUINTXREG uDRVal);
2809#else
2810DECLINLINE(void) ASMSetDR2(RTCCUINTXREG uDRVal)
2811{
2812# if RT_INLINE_ASM_USES_INTRIN
2813 __writedr(2, uDRVal);
2814# elif RT_INLINE_ASM_GNU_STYLE
2815# ifdef RT_ARCH_AMD64
2816 __asm__ __volatile__("movq %0, %%dr2\n\t" : : "r" (uDRVal));
2817# else
2818 __asm__ __volatile__("movl %0, %%dr2\n\t" : : "r" (uDRVal));
2819# endif
2820# else
2821 __asm
2822 {
2823# ifdef RT_ARCH_AMD64
2824 mov rax, [uDRVal]
2825 mov dr2, rax
2826# else
2827 mov eax, [uDRVal]
2828 mov dr2, eax
2829# endif
2830 }
2831# endif
2832}
2833#endif
2834
2835
2836/**
2837 * Sets dr3.
2838 *
2839 * @param uDRVal Debug register value to write
2840 */
2841#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2842RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetDR3(RTCCUINTXREG uDRVal);
2843#else
2844DECLINLINE(void) ASMSetDR3(RTCCUINTXREG uDRVal)
2845{
2846# if RT_INLINE_ASM_USES_INTRIN
2847 __writedr(3, uDRVal);
2848# elif RT_INLINE_ASM_GNU_STYLE
2849# ifdef RT_ARCH_AMD64
2850 __asm__ __volatile__("movq %0, %%dr3\n\t" : : "r" (uDRVal));
2851# else
2852 __asm__ __volatile__("movl %0, %%dr3\n\t" : : "r" (uDRVal));
2853# endif
2854# else
2855 __asm
2856 {
2857# ifdef RT_ARCH_AMD64
2858 mov rax, [uDRVal]
2859 mov dr3, rax
2860# else
2861 mov eax, [uDRVal]
2862 mov dr3, eax
2863# endif
2864 }
2865# endif
2866}
2867#endif
2868
2869
2870/**
2871 * Sets dr6.
2872 *
2873 * @param uDRVal Debug register value to write
2874 */
2875#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2876RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetDR6(RTCCUINTXREG uDRVal);
2877#else
2878DECLINLINE(void) ASMSetDR6(RTCCUINTXREG uDRVal)
2879{
2880# if RT_INLINE_ASM_USES_INTRIN
2881 __writedr(6, uDRVal);
2882# elif RT_INLINE_ASM_GNU_STYLE
2883# ifdef RT_ARCH_AMD64
2884 __asm__ __volatile__("movq %0, %%dr6\n\t" : : "r" (uDRVal));
2885# else
2886 __asm__ __volatile__("movl %0, %%dr6\n\t" : : "r" (uDRVal));
2887# endif
2888# else
2889 __asm
2890 {
2891# ifdef RT_ARCH_AMD64
2892 mov rax, [uDRVal]
2893 mov dr6, rax
2894# else
2895 mov eax, [uDRVal]
2896 mov dr6, eax
2897# endif
2898 }
2899# endif
2900}
2901#endif
2902
2903
2904/**
2905 * Sets dr7.
2906 *
2907 * @param uDRVal Debug register value to write
2908 */
2909#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2910RT_ASM_DECL_PRAGMA_WATCOM(void) ASMSetDR7(RTCCUINTXREG uDRVal);
2911#else
2912DECLINLINE(void) ASMSetDR7(RTCCUINTXREG uDRVal)
2913{
2914# if RT_INLINE_ASM_USES_INTRIN
2915 __writedr(7, uDRVal);
2916# elif RT_INLINE_ASM_GNU_STYLE
2917# ifdef RT_ARCH_AMD64
2918 __asm__ __volatile__("movq %0, %%dr7\n\t" : : "r" (uDRVal));
2919# else
2920 __asm__ __volatile__("movl %0, %%dr7\n\t" : : "r" (uDRVal));
2921# endif
2922# else
2923 __asm
2924 {
2925# ifdef RT_ARCH_AMD64
2926 mov rax, [uDRVal]
2927 mov dr7, rax
2928# else
2929 mov eax, [uDRVal]
2930 mov dr7, eax
2931# endif
2932 }
2933# endif
2934}
2935#endif
2936
2937
2938/**
2939 * Writes a 8-bit unsigned integer to an I/O port, ordered.
2940 *
2941 * @param Port I/O port to write to.
2942 * @param u8 8-bit integer to write.
2943 */
2944#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2945RT_ASM_DECL_PRAGMA_WATCOM(void) ASMOutU8(RTIOPORT Port, uint8_t u8);
2946#else
2947DECLINLINE(void) ASMOutU8(RTIOPORT Port, uint8_t u8)
2948{
2949# if RT_INLINE_ASM_GNU_STYLE
2950 __asm__ __volatile__("outb %b1, %w0\n\t"
2951 :: "Nd" (Port),
2952 "a" (u8));
2953
2954# elif RT_INLINE_ASM_USES_INTRIN
2955 __outbyte(Port, u8);
2956
2957# else
2958 __asm
2959 {
2960 mov dx, [Port]
2961 mov al, [u8]
2962 out dx, al
2963 }
2964# endif
2965}
2966#endif
2967
2968
2969/**
2970 * Reads a 8-bit unsigned integer from an I/O port, ordered.
2971 *
2972 * @returns 8-bit integer.
2973 * @param Port I/O port to read from.
2974 */
2975#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2976RT_ASM_DECL_PRAGMA_WATCOM(uint8_t) ASMInU8(RTIOPORT Port);
2977#else
2978DECLINLINE(uint8_t) ASMInU8(RTIOPORT Port)
2979{
2980 uint8_t u8;
2981# if RT_INLINE_ASM_GNU_STYLE
2982 __asm__ __volatile__("inb %w1, %b0\n\t"
2983 : "=a" (u8)
2984 : "Nd" (Port));
2985
2986# elif RT_INLINE_ASM_USES_INTRIN
2987 u8 = __inbyte(Port);
2988
2989# else
2990 __asm
2991 {
2992 mov dx, [Port]
2993 in al, dx
2994 mov [u8], al
2995 }
2996# endif
2997 return u8;
2998}
2999#endif
3000
3001
3002/**
3003 * Writes a 16-bit unsigned integer to an I/O port, ordered.
3004 *
3005 * @param Port I/O port to write to.
3006 * @param u16 16-bit integer to write.
3007 */
3008#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3009RT_ASM_DECL_PRAGMA_WATCOM(void) ASMOutU16(RTIOPORT Port, uint16_t u16);
3010#else
3011DECLINLINE(void) ASMOutU16(RTIOPORT Port, uint16_t u16)
3012{
3013# if RT_INLINE_ASM_GNU_STYLE
3014 __asm__ __volatile__("outw %w1, %w0\n\t"
3015 :: "Nd" (Port),
3016 "a" (u16));
3017
3018# elif RT_INLINE_ASM_USES_INTRIN
3019 __outword(Port, u16);
3020
3021# else
3022 __asm
3023 {
3024 mov dx, [Port]
3025 mov ax, [u16]
3026 out dx, ax
3027 }
3028# endif
3029}
3030#endif
3031
3032
3033/**
3034 * Reads a 16-bit unsigned integer from an I/O port, ordered.
3035 *
3036 * @returns 16-bit integer.
3037 * @param Port I/O port to read from.
3038 */
3039#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3040RT_ASM_DECL_PRAGMA_WATCOM(uint16_t) ASMInU16(RTIOPORT Port);
3041#else
3042DECLINLINE(uint16_t) ASMInU16(RTIOPORT Port)
3043{
3044 uint16_t u16;
3045# if RT_INLINE_ASM_GNU_STYLE
3046 __asm__ __volatile__("inw %w1, %w0\n\t"
3047 : "=a" (u16)
3048 : "Nd" (Port));
3049
3050# elif RT_INLINE_ASM_USES_INTRIN
3051 u16 = __inword(Port);
3052
3053# else
3054 __asm
3055 {
3056 mov dx, [Port]
3057 in ax, dx
3058 mov [u16], ax
3059 }
3060# endif
3061 return u16;
3062}
3063#endif
3064
3065
3066/**
3067 * Writes a 32-bit unsigned integer to an I/O port, ordered.
3068 *
3069 * @param Port I/O port to write to.
3070 * @param u32 32-bit integer to write.
3071 */
3072#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3073RT_ASM_DECL_PRAGMA_WATCOM(void) ASMOutU32(RTIOPORT Port, uint32_t u32);
3074#else
3075DECLINLINE(void) ASMOutU32(RTIOPORT Port, uint32_t u32)
3076{
3077# if RT_INLINE_ASM_GNU_STYLE
3078 __asm__ __volatile__("outl %1, %w0\n\t"
3079 :: "Nd" (Port),
3080 "a" (u32));
3081
3082# elif RT_INLINE_ASM_USES_INTRIN
3083 __outdword(Port, u32);
3084
3085# else
3086 __asm
3087 {
3088 mov dx, [Port]
3089 mov eax, [u32]
3090 out dx, eax
3091 }
3092# endif
3093}
3094#endif
3095
3096
3097/**
3098 * Reads a 32-bit unsigned integer from an I/O port, ordered.
3099 *
3100 * @returns 32-bit integer.
3101 * @param Port I/O port to read from.
3102 */
3103#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3104RT_ASM_DECL_PRAGMA_WATCOM(uint32_t) ASMInU32(RTIOPORT Port);
3105#else
3106DECLINLINE(uint32_t) ASMInU32(RTIOPORT Port)
3107{
3108 uint32_t u32;
3109# if RT_INLINE_ASM_GNU_STYLE
3110 __asm__ __volatile__("inl %w1, %0\n\t"
3111 : "=a" (u32)
3112 : "Nd" (Port));
3113
3114# elif RT_INLINE_ASM_USES_INTRIN
3115 u32 = __indword(Port);
3116
3117# else
3118 __asm
3119 {
3120 mov dx, [Port]
3121 in eax, dx
3122 mov [u32], eax
3123 }
3124# endif
3125 return u32;
3126}
3127#endif
3128
3129
3130/**
3131 * Writes a string of 8-bit unsigned integer items to an I/O port, ordered.
3132 *
3133 * @param Port I/O port to write to.
3134 * @param pau8 Pointer to the string buffer.
3135 * @param c The number of items to write.
3136 */
3137#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3138RT_ASM_DECL_PRAGMA_WATCOM(void) ASMOutStrU8(RTIOPORT Port, uint8_t const RT_FAR *pau8, size_t c);
3139#else
3140DECLINLINE(void) ASMOutStrU8(RTIOPORT Port, uint8_t const RT_FAR *pau8, size_t c)
3141{
3142# if RT_INLINE_ASM_GNU_STYLE
3143 __asm__ __volatile__("rep; outsb\n\t"
3144 : "+S" (pau8),
3145 "+c" (c)
3146 : "d" (Port));
3147
3148# elif RT_INLINE_ASM_USES_INTRIN
3149 __outbytestring(Port, (unsigned char RT_FAR *)pau8, (unsigned long)c);
3150
3151# else
3152 __asm
3153 {
3154 mov dx, [Port]
3155 mov ecx, [c]
3156 mov eax, [pau8]
3157 xchg esi, eax
3158 rep outsb
3159 xchg esi, eax
3160 }
3161# endif
3162}
3163#endif
3164
3165
3166/**
3167 * Reads a string of 8-bit unsigned integer items from an I/O port, ordered.
3168 *
3169 * @param Port I/O port to read from.
3170 * @param pau8 Pointer to the string buffer (output).
3171 * @param c The number of items to read.
3172 */
3173#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3174RT_ASM_DECL_PRAGMA_WATCOM(void) ASMInStrU8(RTIOPORT Port, uint8_t RT_FAR *pau8, size_t c);
3175#else
3176DECLINLINE(void) ASMInStrU8(RTIOPORT Port, uint8_t RT_FAR *pau8, size_t c)
3177{
3178# if RT_INLINE_ASM_GNU_STYLE
3179 __asm__ __volatile__("rep; insb\n\t"
3180 : "+D" (pau8),
3181 "+c" (c)
3182 : "d" (Port));
3183
3184# elif RT_INLINE_ASM_USES_INTRIN
3185 __inbytestring(Port, pau8, (unsigned long)c);
3186
3187# else
3188 __asm
3189 {
3190 mov dx, [Port]
3191 mov ecx, [c]
3192 mov eax, [pau8]
3193 xchg edi, eax
3194 rep insb
3195 xchg edi, eax
3196 }
3197# endif
3198}
3199#endif
3200
3201
3202/**
3203 * Writes a string of 16-bit unsigned integer items to an I/O port, ordered.
3204 *
3205 * @param Port I/O port to write to.
3206 * @param pau16 Pointer to the string buffer.
3207 * @param c The number of items to write.
3208 */
3209#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3210RT_ASM_DECL_PRAGMA_WATCOM(void) ASMOutStrU16(RTIOPORT Port, uint16_t const RT_FAR *pau16, size_t c);
3211#else
3212DECLINLINE(void) ASMOutStrU16(RTIOPORT Port, uint16_t const RT_FAR *pau16, size_t c)
3213{
3214# if RT_INLINE_ASM_GNU_STYLE
3215 __asm__ __volatile__("rep; outsw\n\t"
3216 : "+S" (pau16),
3217 "+c" (c)
3218 : "d" (Port));
3219
3220# elif RT_INLINE_ASM_USES_INTRIN
3221 __outwordstring(Port, (unsigned short RT_FAR *)pau16, (unsigned long)c);
3222
3223# else
3224 __asm
3225 {
3226 mov dx, [Port]
3227 mov ecx, [c]
3228 mov eax, [pau16]
3229 xchg esi, eax
3230 rep outsw
3231 xchg esi, eax
3232 }
3233# endif
3234}
3235#endif
3236
3237
3238/**
3239 * Reads a string of 16-bit unsigned integer items from an I/O port, ordered.
3240 *
3241 * @param Port I/O port to read from.
3242 * @param pau16 Pointer to the string buffer (output).
3243 * @param c The number of items to read.
3244 */
3245#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3246RT_ASM_DECL_PRAGMA_WATCOM(void) ASMInStrU16(RTIOPORT Port, uint16_t RT_FAR *pau16, size_t c);
3247#else
3248DECLINLINE(void) ASMInStrU16(RTIOPORT Port, uint16_t RT_FAR *pau16, size_t c)
3249{
3250# if RT_INLINE_ASM_GNU_STYLE
3251 __asm__ __volatile__("rep; insw\n\t"
3252 : "+D" (pau16),
3253 "+c" (c)
3254 : "d" (Port));
3255
3256# elif RT_INLINE_ASM_USES_INTRIN
3257 __inwordstring(Port, pau16, (unsigned long)c);
3258
3259# else
3260 __asm
3261 {
3262 mov dx, [Port]
3263 mov ecx, [c]
3264 mov eax, [pau16]
3265 xchg edi, eax
3266 rep insw
3267 xchg edi, eax
3268 }
3269# endif
3270}
3271#endif
3272
3273
3274/**
3275 * Writes a string of 32-bit unsigned integer items to an I/O port, ordered.
3276 *
3277 * @param Port I/O port to write to.
3278 * @param pau32 Pointer to the string buffer.
3279 * @param c The number of items to write.
3280 */
3281#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3282RT_ASM_DECL_PRAGMA_WATCOM(void) ASMOutStrU32(RTIOPORT Port, uint32_t const RT_FAR *pau32, size_t c);
3283#else
3284DECLINLINE(void) ASMOutStrU32(RTIOPORT Port, uint32_t const RT_FAR *pau32, size_t c)
3285{
3286# if RT_INLINE_ASM_GNU_STYLE
3287 __asm__ __volatile__("rep; outsl\n\t"
3288 : "+S" (pau32),
3289 "+c" (c)
3290 : "d" (Port));
3291
3292# elif RT_INLINE_ASM_USES_INTRIN
3293 __outdwordstring(Port, (unsigned long RT_FAR *)pau32, (unsigned long)c);
3294
3295# else
3296 __asm
3297 {
3298 mov dx, [Port]
3299 mov ecx, [c]
3300 mov eax, [pau32]
3301 xchg esi, eax
3302 rep outsd
3303 xchg esi, eax
3304 }
3305# endif
3306}
3307#endif
3308
3309
3310/**
3311 * Reads a string of 32-bit unsigned integer items from an I/O port, ordered.
3312 *
3313 * @param Port I/O port to read from.
3314 * @param pau32 Pointer to the string buffer (output).
3315 * @param c The number of items to read.
3316 */
3317#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3318RT_ASM_DECL_PRAGMA_WATCOM(void) ASMInStrU32(RTIOPORT Port, uint32_t RT_FAR *pau32, size_t c);
3319#else
3320DECLINLINE(void) ASMInStrU32(RTIOPORT Port, uint32_t RT_FAR *pau32, size_t c)
3321{
3322# if RT_INLINE_ASM_GNU_STYLE
3323 __asm__ __volatile__("rep; insl\n\t"
3324 : "+D" (pau32),
3325 "+c" (c)
3326 : "d" (Port));
3327
3328# elif RT_INLINE_ASM_USES_INTRIN
3329 __indwordstring(Port, (unsigned long RT_FAR *)pau32, (unsigned long)c);
3330
3331# else
3332 __asm
3333 {
3334 mov dx, [Port]
3335 mov ecx, [c]
3336 mov eax, [pau32]
3337 xchg edi, eax
3338 rep insd
3339 xchg edi, eax
3340 }
3341# endif
3342}
3343#endif
3344
3345
3346/**
3347 * Invalidate page.
3348 *
3349 * @param uPtr Address of the page to invalidate.
3350 */
3351#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3352RT_ASM_DECL_PRAGMA_WATCOM(void) ASMInvalidatePage(RTCCUINTXREG uPtr);
3353#else
3354DECLINLINE(void) ASMInvalidatePage(RTCCUINTXREG uPtr)
3355{
3356# if RT_INLINE_ASM_USES_INTRIN
3357 __invlpg((void RT_FAR *)uPtr);
3358
3359# elif RT_INLINE_ASM_GNU_STYLE
3360 __asm__ __volatile__("invlpg %0\n\t"
3361 : : "m" (*(uint8_t RT_FAR *)(uintptr_t)uPtr));
3362# else
3363 __asm
3364 {
3365# ifdef RT_ARCH_AMD64
3366 mov rax, [uPtr]
3367 invlpg [rax]
3368# else
3369 mov eax, [uPtr]
3370 invlpg [eax]
3371# endif
3372 }
3373# endif
3374}
3375#endif
3376
3377
3378/**
3379 * Write back the internal caches and invalidate them.
3380 */
3381#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3382RT_ASM_DECL_PRAGMA_WATCOM(void) ASMWriteBackAndInvalidateCaches(void);
3383#else
3384DECLINLINE(void) ASMWriteBackAndInvalidateCaches(void)
3385{
3386# if RT_INLINE_ASM_USES_INTRIN
3387 __wbinvd();
3388
3389# elif RT_INLINE_ASM_GNU_STYLE
3390 __asm__ __volatile__("wbinvd");
3391# else
3392 __asm
3393 {
3394 wbinvd
3395 }
3396# endif
3397}
3398#endif
3399
3400
3401/**
3402 * Invalidate internal and (perhaps) external caches without first
3403 * flushing dirty cache lines. Use with extreme care.
3404 */
3405#if RT_INLINE_ASM_EXTERNAL
3406RT_ASM_DECL_PRAGMA_WATCOM(void) ASMInvalidateInternalCaches(void);
3407#else
3408DECLINLINE(void) ASMInvalidateInternalCaches(void)
3409{
3410# if RT_INLINE_ASM_GNU_STYLE
3411 __asm__ __volatile__("invd");
3412# else
3413 __asm
3414 {
3415 invd
3416 }
3417# endif
3418}
3419#endif
3420
3421
3422/**
3423 * Memory load/store fence, waits for any pending writes and reads to complete.
3424 * Requires the X86_CPUID_FEATURE_EDX_SSE2 CPUID bit set.
3425 */
3426DECLINLINE(void) ASMMemoryFenceSSE2(void)
3427{
3428#if RT_INLINE_ASM_GNU_STYLE
3429 __asm__ __volatile__ (".byte 0x0f,0xae,0xf0\n\t");
3430#elif RT_INLINE_ASM_USES_INTRIN
3431 _mm_mfence();
3432#else
3433 __asm
3434 {
3435 _emit 0x0f
3436 _emit 0xae
3437 _emit 0xf0
3438 }
3439#endif
3440}
3441
3442
3443/**
3444 * Memory store fence, waits for any writes to complete.
3445 * Requires the X86_CPUID_FEATURE_EDX_SSE CPUID bit set.
3446 */
3447DECLINLINE(void) ASMWriteFenceSSE(void)
3448{
3449#if RT_INLINE_ASM_GNU_STYLE
3450 __asm__ __volatile__ (".byte 0x0f,0xae,0xf8\n\t");
3451#elif RT_INLINE_ASM_USES_INTRIN
3452 _mm_sfence();
3453#else
3454 __asm
3455 {
3456 _emit 0x0f
3457 _emit 0xae
3458 _emit 0xf8
3459 }
3460#endif
3461}
3462
3463
3464/**
3465 * Memory load fence, waits for any pending reads to complete.
3466 * Requires the X86_CPUID_FEATURE_EDX_SSE2 CPUID bit set.
3467 */
3468DECLINLINE(void) ASMReadFenceSSE2(void)
3469{
3470#if RT_INLINE_ASM_GNU_STYLE
3471 __asm__ __volatile__ (".byte 0x0f,0xae,0xe8\n\t");
3472#elif RT_INLINE_ASM_USES_INTRIN
3473 _mm_lfence();
3474#else
3475 __asm
3476 {
3477 _emit 0x0f
3478 _emit 0xae
3479 _emit 0xe8
3480 }
3481#endif
3482}
3483
3484#if !defined(_MSC_VER) || !defined(RT_ARCH_AMD64)
3485
3486/*
3487 * Clear the AC bit in the EFLAGS register.
3488 * Requires the X86_CPUID_STEXT_FEATURE_EBX_SMAP CPUID bit set.
3489 * Requires to be executed in R0.
3490 */
3491DECLINLINE(void) ASMClearAC(void)
3492{
3493#if RT_INLINE_ASM_GNU_STYLE
3494 __asm__ __volatile__ (".byte 0x0f,0x01,0xca\n\t");
3495#else
3496 __asm
3497 {
3498 _emit 0x0f
3499 _emit 0x01
3500 _emit 0xca
3501 }
3502#endif
3503}
3504
3505
3506/*
3507 * Set the AC bit in the EFLAGS register.
3508 * Requires the X86_CPUID_STEXT_FEATURE_EBX_SMAP CPUID bit set.
3509 * Requires to be executed in R0.
3510 */
3511DECLINLINE(void) ASMSetAC(void)
3512{
3513#if RT_INLINE_ASM_GNU_STYLE
3514 __asm__ __volatile__ (".byte 0x0f,0x01,0xcb\n\t");
3515#else
3516 __asm
3517 {
3518 _emit 0x0f
3519 _emit 0x01
3520 _emit 0xcb
3521 }
3522#endif
3523}
3524
3525#endif /* !_MSC_VER || !RT_ARCH_AMD64 */
3526
3527
3528/*
3529 * Include #pragma aux definitions for Watcom C/C++.
3530 */
3531#if defined(__WATCOMC__) && ARCH_BITS == 16
3532# define IPRT_ASM_AMD64_X86_WATCOM_16_INSTANTIATE
3533# undef IPRT_INCLUDED_asm_amd64_x86_watcom_16_h
3534# include "asm-amd64-x86-watcom-16.h"
3535#elif defined(__WATCOMC__) && ARCH_BITS == 32
3536# define IPRT_ASM_AMD64_X86_WATCOM_32_INSTANTIATE
3537# undef IPRT_INCLUDED_asm_amd64_x86_watcom_32_h
3538# include "asm-amd64-x86-watcom-32.h"
3539#endif
3540
3541
3542/** @} */
3543#endif /* !IPRT_INCLUDED_asm_amd64_x86_h */
3544
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette