VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllTlbInline-x86.h@ 108791

Last change on this file since 108791 was 108791, checked in by vboxsync, 3 weeks ago

VMM/IEM: More ARM target work. jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 20.2 KB
Line 
1/* $Id: IEMAllTlbInline-x86.h 108791 2025-03-28 21:58:31Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - x86 target, Inline TLB routines.
4 *
5 * Mainly related to large pages.
6 */
7
8/*
9 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
10 *
11 * This file is part of VirtualBox base platform packages, as
12 * available from https://www.virtualbox.org.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation, in version 3 of the
17 * License.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see <https://www.gnu.org/licenses>.
26 *
27 * SPDX-License-Identifier: GPL-3.0-only
28 */
29
30
31#ifndef VMM_INCLUDED_SRC_VMMAll_target_x86_IEMAllTlbInline_x86_h
32#define VMM_INCLUDED_SRC_VMMAll_target_x86_IEMAllTlbInline_x86_h
33#ifndef RT_WITHOUT_PRAGMA_ONCE
34# pragma once
35#endif
36
37#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
38
39/**
40 * Helper for doing large page accounting at TLB load time.
41 */
42template<bool const a_fGlobal>
43DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages)
44{
45 if (a_fGlobal)
46 pTlb->cTlbGlobalLargePageCurLoads++;
47 else
48 pTlb->cTlbNonGlobalLargePageCurLoads++;
49
50# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
51 RTGCPTR const idxBit = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + a_fGlobal;
52 ASMBitSet(pTlb->bmLargePage, idxBit);
53# endif
54
55 AssertCompile(IEMTLB_CALC_TAG_NO_REV(pVCpu, (RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
56 uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT;
57 IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal
58 ? &pTlb->GlobalLargePageRange
59 : &pTlb->NonGlobalLargePageRange;
60 uTagNoRev &= ~(RTGCPTR)fMask;
61 if (uTagNoRev < pRange->uFirstTag)
62 pRange->uFirstTag = uTagNoRev;
63
64 uTagNoRev |= fMask;
65 if (uTagNoRev > pRange->uLastTag)
66 pRange->uLastTag = uTagNoRev;
67
68 RT_NOREF_PV(pVCpu);
69}
70
71
72/** @todo graduate this to cdefs.h or asm-mem.h. */
73# ifdef RT_ARCH_ARM64 /** @todo RT_CACHELINE_SIZE is wrong for M1 */
74# undef RT_CACHELINE_SIZE
75# define RT_CACHELINE_SIZE 128
76# endif
77
78# if defined(_MM_HINT_T0) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
79# define MY_PREFETCH(a_pvAddr) _mm_prefetch((const char *)(a_pvAddr), _MM_HINT_T0)
80# elif defined(_MSC_VER) && (defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32))
81# define MY_PREFETCH(a_pvAddr) __prefetch((a_pvAddr))
82# elif defined(__GNUC__) || RT_CLANG_HAS_FEATURE(__builtin_prefetch)
83# define MY_PREFETCH(a_pvAddr) __builtin_prefetch((a_pvAddr), 0 /*rw*/, 3 /*locality*/)
84# else
85# define MY_PREFETCH(a_pvAddr) ((void)0)
86# endif
87# if 0
88# undef MY_PREFETCH
89# define MY_PREFETCH(a_pvAddr) ((void)0)
90# endif
91
92/** @def MY_PREFETCH_64
93 * 64 byte prefetch hint, could be more depending on cache line size. */
94/** @def MY_PREFETCH_128
95 * 128 byte prefetch hint. */
96/** @def MY_PREFETCH_256
97 * 256 byte prefetch hint. */
98# if RT_CACHELINE_SIZE >= 128
99 /* 128 byte cache lines */
100# define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)
101# define MY_PREFETCH_128(a_pvAddr) MY_PREFETCH(a_pvAddr)
102# define MY_PREFETCH_256(a_pvAddr) do { \
103 MY_PREFETCH(a_pvAddr); \
104 MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \
105 } while (0)
106# else
107 /* 64 byte cache lines */
108# define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)
109# define MY_PREFETCH_128(a_pvAddr) do { \
110 MY_PREFETCH(a_pvAddr); \
111 MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \
112 } while (0)
113# define MY_PREFETCH_256(a_pvAddr) do { \
114 MY_PREFETCH(a_pvAddr); \
115 MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \
116 MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \
117 MY_PREFETCH((uint8_t const *)a_pvAddr + 192); \
118 } while (0)
119# endif
120
121template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal>
122DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,
123 RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT
124{
125 IEMTLBTRACE_LARGE_SCAN(pVCpu, a_fGlobal, a_fNonGlobal, a_fDataTlb);
126 AssertCompile(IEMTLB_ENTRY_COUNT >= 16); /* prefetching + unroll assumption */
127
128 if (a_fGlobal)
129 pTlb->cTlbInvlPgLargeGlobal += 1;
130 if (a_fNonGlobal)
131 pTlb->cTlbInvlPgLargeNonGlobal += 1;
132
133 /*
134 * Set up the scan.
135 *
136 * GCPtrTagMask: A 2MB page consists of 512 4K pages, so a 256 TLB will map
137 * offset zero and offset 1MB to the same slot pair. Our GCPtrTag[Globl]
138 * values are for the range 0-1MB, or slots 0-256. So, we construct a mask
139 * that fold large page offsets 1MB-2MB into the 0-1MB range.
140 *
141 * For our example with 2MB pages and a 256 entry TLB: 0xfffffffffffffeff
142 *
143 * MY_PREFETCH: Hope that prefetching 256 bytes at the time is okay for
144 * relevant host architectures.
145 */
146 /** @todo benchmark this code from the guest side. */
147 bool const fPartialScan = IEMTLB_ENTRY_COUNT > (a_f2MbLargePage ? 512 : 1024);
148#ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
149 uintptr_t idxBitmap = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) / 64 : 0;
150 uintptr_t const idxBitmapEnd = fPartialScan ? idxBitmap + ((a_f2MbLargePage ? 512 : 1024) * 2) / 64
151 : IEMTLB_ENTRY_COUNT * 2 / 64;
152#else
153 uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0;
154 MY_PREFETCH_256(&pTlb->aEntries[idxEven + !a_fNonGlobal]);
155 uintptr_t const idxEvenEnd = fPartialScan ? idxEven + ((a_f2MbLargePage ? 512 : 1024) * 2) : IEMTLB_ENTRY_COUNT * 2;
156#endif
157 RTGCPTR const GCPtrTagMask = fPartialScan ? ~(RTGCPTR)0
158 : ~(RTGCPTR)( (RT_BIT_32(a_f2MbLargePage ? 9 : 10) - 1U)
159 & ~(uint32_t)(RT_BIT_32(IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U));
160
161 /*
162 * Set cbInstrBufTotal to zero if GCPtrInstrBufPcTag is within any of the tag ranges.
163 * We make ASSUMPTIONS about IEMTLB_CALC_TAG_NO_REV here.
164 */
165 AssertCompile(IEMTLB_CALC_TAG_NO_REV(pVCpu, (RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
166 if ( !a_fDataTlb
167 && GCPtrInstrBufPcTag - GCPtrTag < (a_f2MbLargePage ? 512U : 1024U))
168 pVCpu->iem.s.cbInstrBufTotal = 0;
169
170 /*
171 * Combine TAG values with the TLB revisions.
172 */
173 RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0;
174 if (a_fNonGlobal)
175 GCPtrTag |= pTlb->uTlbRevision;
176
177 /*
178 * Do the scanning.
179 */
180#ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
181 uint64_t const bmMask = a_fGlobal && a_fNonGlobal ? UINT64_MAX
182 : a_fGlobal ? UINT64_C(0xaaaaaaaaaaaaaaaa) : UINT64_C(0x5555555555555555);
183 /* Scan bitmap entries (64 bits at the time): */
184 for (;;)
185 {
186# if 1
187 uint64_t bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;
188 if (bmEntry)
189 {
190 /* Scan the non-zero 64-bit value in groups of 8 bits: */
191 uint64_t bmToClear = 0;
192 uintptr_t idxEven = idxBitmap * 64;
193 uint32_t idxTag = 0;
194 for (;;)
195 {
196 if (bmEntry & 0xff)
197 {
198# define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \
199 if (a_fNonGlobal) \
200 { \
201 if (bmEntry & a_bmNonGlobal) \
202 { \
203 Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
204 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \
205 { \
206 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \
207 pTlb->aEntries[a_idxEvenIter].GCPhys, \
208 a_idxEvenIter, a_fDataTlb); \
209 pTlb->aEntries[a_idxEvenIter].uTag = 0; \
210 bmToClearSub8 |= a_bmNonGlobal; \
211 } \
212 } \
213 else \
214 Assert( !(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
215 || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \
216 != (GCPtrTag & IEMTLB_REVISION_MASK)); \
217 } \
218 if (a_fGlobal) \
219 { \
220 if (bmEntry & a_bmGlobal) \
221 { \
222 Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
223 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \
224 { \
225 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \
226 pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
227 a_idxEvenIter + 1, a_fDataTlb); \
228 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
229 bmToClearSub8 |= a_bmGlobal; \
230 } \
231 } \
232 else \
233 Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
234 || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \
235 != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \
236 }
237 uint64_t bmToClearSub8 = 0;
238 ONE_PAIR(idxTag + 0, idxEven + 0, 0x01, 0x02)
239 ONE_PAIR(idxTag + 1, idxEven + 2, 0x04, 0x08)
240 ONE_PAIR(idxTag + 2, idxEven + 4, 0x10, 0x20)
241 ONE_PAIR(idxTag + 3, idxEven + 6, 0x40, 0x80)
242 bmToClear |= bmToClearSub8 << (idxTag * 2);
243# undef ONE_PAIR
244 }
245
246 /* advance to the next 8 bits. */
247 bmEntry >>= 8;
248 if (!bmEntry)
249 break;
250 idxEven += 8;
251 idxTag += 4;
252 }
253
254 /* Clear the large page flags we covered. */
255 pTlb->bmLargePage[idxBitmap] &= ~bmToClear;
256 }
257# else
258 uint64_t const bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;
259 if (bmEntry)
260 {
261 /* Scan the non-zero 64-bit value completely unrolled: */
262 uintptr_t const idxEven = idxBitmap * 64;
263 uint64_t bmToClear = 0;
264# define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \
265 if (a_fNonGlobal) \
266 { \
267 if (bmEntry & a_bmNonGlobal) \
268 { \
269 Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
270 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \
271 { \
272 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \
273 pTlb->aEntries[a_idxEvenIter].GCPhys, \
274 a_idxEvenIter, a_fDataTlb); \
275 pTlb->aEntries[a_idxEvenIter].uTag = 0; \
276 bmToClear |= a_bmNonGlobal; \
277 } \
278 } \
279 else \
280 Assert( !(pTlb->aEntriqes[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
281 || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \
282 != (GCPtrTag & IEMTLB_REVISION_MASK)); \
283 } \
284 if (a_fGlobal) \
285 { \
286 if (bmEntry & a_bmGlobal) \
287 { \
288 Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
289 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \
290 { \
291 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \
292 pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
293 a_idxEvenIter + 1, a_fDataTlb); \
294 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
295 bmToClear |= a_bmGlobal; \
296 } \
297 } \
298 else \
299 Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
300 || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \
301 != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \
302 } ((void)0)
303# define FOUR_PAIRS(a_iByte, a_cShift) \
304 ONE_PAIR(0 + a_iByte * 4, idxEven + 0 + a_iByte * 8, UINT64_C(0x01) << a_cShift, UINT64_C(0x02) << a_cShift); \
305 ONE_PAIR(1 + a_iByte * 4, idxEven + 2 + a_iByte * 8, UINT64_C(0x04) << a_cShift, UINT64_C(0x08) << a_cShift); \
306 ONE_PAIR(2 + a_iByte * 4, idxEven + 4 + a_iByte * 8, UINT64_C(0x10) << a_cShift, UINT64_C(0x20) << a_cShift); \
307 ONE_PAIR(3 + a_iByte * 4, idxEven + 6 + a_iByte * 8, UINT64_C(0x40) << a_cShift, UINT64_C(0x80) << a_cShift)
308 if (bmEntry & (uint32_t)UINT16_MAX)
309 {
310 FOUR_PAIRS(0, 0);
311 FOUR_PAIRS(1, 8);
312 }
313 if (bmEntry & ((uint32_t)UINT16_MAX << 16))
314 {
315 FOUR_PAIRS(2, 16);
316 FOUR_PAIRS(3, 24);
317 }
318 if (bmEntry & ((uint64_t)UINT16_MAX << 32))
319 {
320 FOUR_PAIRS(4, 32);
321 FOUR_PAIRS(5, 40);
322 }
323 if (bmEntry & ((uint64_t)UINT16_MAX << 16))
324 {
325 FOUR_PAIRS(6, 48);
326 FOUR_PAIRS(7, 56);
327 }
328# undef FOUR_PAIRS
329
330 /* Clear the large page flags we covered. */
331 pTlb->bmLargePage[idxBitmap] &= ~bmToClear;
332 }
333# endif
334
335 /* advance */
336 idxBitmap++;
337 if (idxBitmap >= idxBitmapEnd)
338 break;
339 if (a_fNonGlobal)
340 GCPtrTag += 32;
341 if (a_fGlobal)
342 GCPtrTagGlob += 32;
343 }
344
345#else /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */
346
347 for (; idxEven < idxEvenEnd; idxEven += 8)
348 {
349# define ONE_ITERATION(a_idxEvenIter) \
350 if (a_fNonGlobal) \
351 { \
352 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == GCPtrTag) \
353 { \
354 if (pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \
355 { \
356 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter].GCPhys, \
357 a_idxEvenIter, a_fDataTlb); \
358 pTlb->aEntries[a_idxEvenIter].uTag = 0; \
359 } \
360 } \
361 GCPtrTag++; \
362 } \
363 \
364 if (a_fGlobal) \
365 { \
366 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == GCPtrTagGlob) \
367 { \
368 if (pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \
369 { \
370 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
371 a_idxEvenIter + 1, a_fDataTlb); \
372 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
373 } \
374 } \
375 GCPtrTagGlob++; \
376 }
377 if (idxEven < idxEvenEnd - 4)
378 MY_PREFETCH_256(&pTlb->aEntries[idxEven + 8 + !a_fNonGlobal]);
379 ONE_ITERATION(idxEven)
380 ONE_ITERATION(idxEven + 2)
381 ONE_ITERATION(idxEven + 4)
382 ONE_ITERATION(idxEven + 6)
383# undef ONE_ITERATION
384 }
385#endif /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */
386}
387
388template<bool const a_fDataTlb, bool const a_f2MbLargePage>
389DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,
390 RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT
391{
392 AssertCompile(IEMTLB_CALC_TAG_NO_REV(pVCpu, (RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
393
394 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U);
395 if ( GCPtrTag >= pTlb->GlobalLargePageRange.uFirstTag
396 && GCPtrTag <= pTlb->GlobalLargePageRange.uLastTag)
397 {
398 if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag
399 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)
400 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
401 else
402 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
403 }
404 else if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag
405 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)
406 {
407 /* Large pages aren't as likely in the non-global TLB half. */
408 IEMTLBTRACE_LARGE_SCAN(pVCpu, false, false, a_fDataTlb);
409 }
410 else
411 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
412}
413
414template<bool const a_fDataTlb>
415DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven) RT_NOEXCEPT
416{
417 pTlb->cTlbInvlPg += 1;
418
419 /*
420 * Flush the entry pair.
421 */
422 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))
423 {
424 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven].GCPhys, idxEven, a_fDataTlb);
425 pTlb->aEntries[idxEven].uTag = 0;
426 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu, pVCpu->iem.s.uInstrBufPc))
427 pVCpu->iem.s.cbInstrBufTotal = 0;
428 }
429 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))
430 {
431 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven + 1].GCPhys, idxEven + 1, a_fDataTlb);
432 pTlb->aEntries[idxEven + 1].uTag = 0;
433 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu, pVCpu->iem.s.uInstrBufPc))
434 pVCpu->iem.s.cbInstrBufTotal = 0;
435 }
436
437 /*
438 * If there are (or has been) large pages in the TLB, we must check if the
439 * address being flushed may involve one of those, as then we'd have to
440 * scan for entries relating to the same page and flush those as well.
441 */
442# if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */
443 if (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads)
444# else
445 if (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag)
446# endif
447 {
448 RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu, pVCpu->iem.s.uInstrBufPc);
449 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
450 iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
451 else
452 iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
453 }
454}
455
456#endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */
457
458#endif /* !VMM_INCLUDED_SRC_VMMAll_target_x86_IEMAllTlbInline_x86_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette