1 | /* $Id: CPUMR3Msr-x86.h 109008 2025-04-16 20:59:36Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * CPUM - x86 MSR macros.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2013-2024 Oracle and/or its affiliates.
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox base platform packages, as
|
---|
10 | * available from https://www.virtualbox.org.
|
---|
11 | *
|
---|
12 | * This program is free software; you can redistribute it and/or
|
---|
13 | * modify it under the terms of the GNU General Public License
|
---|
14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
15 | * License.
|
---|
16 | *
|
---|
17 | * This program is distributed in the hope that it will be useful, but
|
---|
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
20 | * General Public License for more details.
|
---|
21 | *
|
---|
22 | * You should have received a copy of the GNU General Public License
|
---|
23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
24 | *
|
---|
25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
26 | */
|
---|
27 |
|
---|
28 |
|
---|
29 | #ifndef VMM_INCLUDED_SRC_VMMR3_CPUMR3Msr_x86_h
|
---|
30 | #define VMM_INCLUDED_SRC_VMMR3_CPUMR3Msr_x86_h
|
---|
31 | #ifndef RT_WITHOUT_PRAGMA_ONCE
|
---|
32 | # pragma once
|
---|
33 | #endif
|
---|
34 |
|
---|
35 | /** @name Short macros for the MSR range entries.
|
---|
36 | *
|
---|
37 | * These are rather cryptic, but this is to reduce the attack on the right
|
---|
38 | * margin.
|
---|
39 | *
|
---|
40 | * @{ */
|
---|
41 | /** Alias one MSR onto another (a_uTarget). */
|
---|
42 | #define MAL(a_uMsr, a_szName, a_uTarget) \
|
---|
43 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_MsrAlias, kCpumMsrWrFn_MsrAlias, 0, a_uTarget, 0, 0, a_szName)
|
---|
44 | /** Functions handles everything. */
|
---|
45 | #define MFN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
|
---|
46 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
|
---|
47 | /** Functions handles everything, with GP mask. */
|
---|
48 | #define MFG(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrGpMask) \
|
---|
49 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, a_fWrGpMask, a_szName)
|
---|
50 | /** Function handlers, read-only. */
|
---|
51 | #define MFO(a_uMsr, a_szName, a_enmRdFnSuff) \
|
---|
52 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_ReadOnly, 0, 0, 0, UINT64_MAX, a_szName)
|
---|
53 | /** Function handlers, ignore all writes. */
|
---|
54 | #define MFI(a_uMsr, a_szName, a_enmRdFnSuff) \
|
---|
55 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_IgnoreWrite, 0, 0, UINT64_MAX, 0, a_szName)
|
---|
56 | /** Function handlers, with value. */
|
---|
57 | #define MFV(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue) \
|
---|
58 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, 0, 0, a_szName)
|
---|
59 | /** Function handlers, with write ignore mask. */
|
---|
60 | #define MFW(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrIgnMask) \
|
---|
61 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, a_fWrIgnMask, 0, a_szName)
|
---|
62 | /** Function handlers, extended version. */
|
---|
63 | #define MFX(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
|
---|
64 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
|
---|
65 | /** Function handlers, with CPUMCPU storage variable. */
|
---|
66 | #define MFS(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember) \
|
---|
67 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
|
---|
68 | RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, 0, 0, a_szName)
|
---|
69 | /** Function handlers, with CPUMCPU storage variable, ignore mask and GP mask. */
|
---|
70 | #define MFZ(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember, a_fWrIgnMask, a_fWrGpMask) \
|
---|
71 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
|
---|
72 | RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, a_fWrIgnMask, a_fWrGpMask, a_szName)
|
---|
73 | /** Read-only fixed value. */
|
---|
74 | #define MVO(a_uMsr, a_szName, a_uValue) \
|
---|
75 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
|
---|
76 | /** Read-only fixed value, ignores all writes. */
|
---|
77 | #define MVI(a_uMsr, a_szName, a_uValue) \
|
---|
78 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
|
---|
79 | /** Read fixed value, ignore writes outside GP mask. */
|
---|
80 | #define MVG(a_uMsr, a_szName, a_uValue, a_fWrGpMask) \
|
---|
81 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, 0, a_fWrGpMask, a_szName)
|
---|
82 | /** Read fixed value, extended version with both GP and ignore masks. */
|
---|
83 | #define MVX(a_uMsr, a_szName, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
|
---|
84 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
|
---|
85 | /** The short form, no CPUM backing. */
|
---|
86 | #define MSN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
|
---|
87 | RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
|
---|
88 | a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
|
---|
89 |
|
---|
90 | /** Range: Functions handles everything. */
|
---|
91 | #define RFN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
|
---|
92 | RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
|
---|
93 | /** Range: Read fixed value, read-only. */
|
---|
94 | #define RVO(a_uFirst, a_uLast, a_szName, a_uValue) \
|
---|
95 | RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
|
---|
96 | /** Range: Read fixed value, ignore writes. */
|
---|
97 | #define RVI(a_uFirst, a_uLast, a_szName, a_uValue) \
|
---|
98 | RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
|
---|
99 | /** Range: The short form, no CPUM backing. */
|
---|
100 | #define RSN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
|
---|
101 | RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
|
---|
102 | a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
|
---|
103 |
|
---|
104 | /** Internal form used by the macros. */
|
---|
105 | #ifdef VBOX_WITH_STATISTICS
|
---|
106 | # define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
|
---|
107 | { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName, \
|
---|
108 | { 0 }, { 0 }, { 0 }, { 0 } }
|
---|
109 | #else
|
---|
110 | # define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
|
---|
111 | { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName }
|
---|
112 | #endif
|
---|
113 | /** @} */
|
---|
114 |
|
---|
115 | #endif /* !VMM_INCLUDED_SRC_VMMR3_CPUMR3Msr_x86_h */
|
---|