VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-armv8/IEMAllTlbInline-armv8.h@ 108791

Last change on this file since 108791 was 108791, checked in by vboxsync, 3 weeks ago

VMM/IEM: More ARM target work. jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 20.5 KB
Line 
1/* $Id: IEMAllTlbInline-armv8.h 108791 2025-03-28 21:58:31Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - x86 target, Inline TLB routines.
4 *
5 * Mainly related to large pages.
6 */
7
8/*
9 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
10 *
11 * This file is part of VirtualBox base platform packages, as
12 * available from https://www.virtualbox.org.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation, in version 3 of the
17 * License.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see <https://www.gnu.org/licenses>.
26 *
27 * SPDX-License-Identifier: GPL-3.0-only
28 */
29
30
31#ifndef VMM_INCLUDED_SRC_VMMAll_target_armv8_IEMAllTlbInline_armv8_h
32#define VMM_INCLUDED_SRC_VMMAll_target_armv8_IEMAllTlbInline_armv8_h
33#ifndef RT_WITHOUT_PRAGMA_ONCE
34# pragma once
35#endif
36
37#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
38
39#if 0 /** @todo ARMv8: TLB flushing is a lot more complex on arm! */
40
41/**
42 * Helper for doing large page accounting at TLB load time.
43 */
44template<bool const a_fGlobal>
45DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages)
46{
47 if (a_fGlobal)
48 pTlb->cTlbGlobalLargePageCurLoads++;
49 else
50 pTlb->cTlbNonGlobalLargePageCurLoads++;
51
52# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
53 RTGCPTR const idxBit = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + a_fGlobal;
54 ASMBitSet(pTlb->bmLargePage, idxBit);
55# endif
56
57 AssertCompile(IEMTLB_CALC_TAG_NO_REV(pVCpu, (RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
58 uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT;
59 IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal
60 ? &pTlb->GlobalLargePageRange
61 : &pTlb->NonGlobalLargePageRange;
62 uTagNoRev &= ~(RTGCPTR)fMask;
63 if (uTagNoRev < pRange->uFirstTag)
64 pRange->uFirstTag = uTagNoRev;
65
66 uTagNoRev |= fMask;
67 if (uTagNoRev > pRange->uLastTag)
68 pRange->uLastTag = uTagNoRev;
69
70 RT_NOREF_PV(pVCpu);
71}
72
73
74/** @todo graduate this to cdefs.h or asm-mem.h. */
75# ifdef RT_ARCH_ARM64 /** @todo RT_CACHELINE_SIZE is wrong for M1 */
76# undef RT_CACHELINE_SIZE
77# define RT_CACHELINE_SIZE 128
78# endif
79
80# if defined(_MM_HINT_T0) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
81# define MY_PREFETCH(a_pvAddr) _mm_prefetch((const char *)(a_pvAddr), _MM_HINT_T0)
82# elif defined(_MSC_VER) && (defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32))
83# define MY_PREFETCH(a_pvAddr) __prefetch((a_pvAddr))
84# elif defined(__GNUC__) || RT_CLANG_HAS_FEATURE(__builtin_prefetch)
85# define MY_PREFETCH(a_pvAddr) __builtin_prefetch((a_pvAddr), 0 /*rw*/, 3 /*locality*/)
86# else
87# define MY_PREFETCH(a_pvAddr) ((void)0)
88# endif
89# if 0
90# undef MY_PREFETCH
91# define MY_PREFETCH(a_pvAddr) ((void)0)
92# endif
93
94/** @def MY_PREFETCH_64
95 * 64 byte prefetch hint, could be more depending on cache line size. */
96/** @def MY_PREFETCH_128
97 * 128 byte prefetch hint. */
98/** @def MY_PREFETCH_256
99 * 256 byte prefetch hint. */
100# if RT_CACHELINE_SIZE >= 128
101 /* 128 byte cache lines */
102# define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)
103# define MY_PREFETCH_128(a_pvAddr) MY_PREFETCH(a_pvAddr)
104# define MY_PREFETCH_256(a_pvAddr) do { \
105 MY_PREFETCH(a_pvAddr); \
106 MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \
107 } while (0)
108# else
109 /* 64 byte cache lines */
110# define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)
111# define MY_PREFETCH_128(a_pvAddr) do { \
112 MY_PREFETCH(a_pvAddr); \
113 MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \
114 } while (0)
115# define MY_PREFETCH_256(a_pvAddr) do { \
116 MY_PREFETCH(a_pvAddr); \
117 MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \
118 MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \
119 MY_PREFETCH((uint8_t const *)a_pvAddr + 192); \
120 } while (0)
121# endif
122
123template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal>
124DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,
125 RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT
126{
127 IEMTLBTRACE_LARGE_SCAN(pVCpu, a_fGlobal, a_fNonGlobal, a_fDataTlb);
128 AssertCompile(IEMTLB_ENTRY_COUNT >= 16); /* prefetching + unroll assumption */
129
130 if (a_fGlobal)
131 pTlb->cTlbInvlPgLargeGlobal += 1;
132 if (a_fNonGlobal)
133 pTlb->cTlbInvlPgLargeNonGlobal += 1;
134
135 /*
136 * Set up the scan.
137 *
138 * GCPtrTagMask: A 2MB page consists of 512 4K pages, so a 256 TLB will map
139 * offset zero and offset 1MB to the same slot pair. Our GCPtrTag[Globl]
140 * values are for the range 0-1MB, or slots 0-256. So, we construct a mask
141 * that fold large page offsets 1MB-2MB into the 0-1MB range.
142 *
143 * For our example with 2MB pages and a 256 entry TLB: 0xfffffffffffffeff
144 *
145 * MY_PREFETCH: Hope that prefetching 256 bytes at the time is okay for
146 * relevant host architectures.
147 */
148 /** @todo benchmark this code from the guest side. */
149 bool const fPartialScan = IEMTLB_ENTRY_COUNT > (a_f2MbLargePage ? 512 : 1024);
150#ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
151 uintptr_t idxBitmap = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) / 64 : 0;
152 uintptr_t const idxBitmapEnd = fPartialScan ? idxBitmap + ((a_f2MbLargePage ? 512 : 1024) * 2) / 64
153 : IEMTLB_ENTRY_COUNT * 2 / 64;
154#else
155 uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0;
156 MY_PREFETCH_256(&pTlb->aEntries[idxEven + !a_fNonGlobal]);
157 uintptr_t const idxEvenEnd = fPartialScan ? idxEven + ((a_f2MbLargePage ? 512 : 1024) * 2) : IEMTLB_ENTRY_COUNT * 2;
158#endif
159 RTGCPTR const GCPtrTagMask = fPartialScan ? ~(RTGCPTR)0
160 : ~(RTGCPTR)( (RT_BIT_32(a_f2MbLargePage ? 9 : 10) - 1U)
161 & ~(uint32_t)(RT_BIT_32(IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U));
162
163 /*
164 * Set cbInstrBufTotal to zero if GCPtrInstrBufPcTag is within any of the tag ranges.
165 * We make ASSUMPTIONS about IEMTLB_CALC_TAG_NO_REV here.
166 */
167 AssertCompile(IEMTLB_CALC_TAG_NO_REV(p V C p u - not true, (RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
168 if ( !a_fDataTlb
169 && GCPtrInstrBufPcTag - GCPtrTag < (a_f2MbLargePage ? 512U : 1024U))
170 pVCpu->iem.s.cbInstrBufTotal = 0;
171
172 /*
173 * Combine TAG values with the TLB revisions.
174 */
175 RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0;
176 if (a_fNonGlobal)
177 GCPtrTag |= pTlb->uTlbRevision;
178
179 /*
180 * Do the scanning.
181 */
182#ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
183 uint64_t const bmMask = a_fGlobal && a_fNonGlobal ? UINT64_MAX
184 : a_fGlobal ? UINT64_C(0xaaaaaaaaaaaaaaaa) : UINT64_C(0x5555555555555555);
185 /* Scan bitmap entries (64 bits at the time): */
186 for (;;)
187 {
188# if 1
189 uint64_t bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;
190 if (bmEntry)
191 {
192 /* Scan the non-zero 64-bit value in groups of 8 bits: */
193 uint64_t bmToClear = 0;
194 uintptr_t idxEven = idxBitmap * 64;
195 uint32_t idxTag = 0;
196 for (;;)
197 {
198 if (bmEntry & 0xff)
199 {
200# define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \
201 if (a_fNonGlobal) \
202 { \
203 if (bmEntry & a_bmNonGlobal) \
204 { \
205 Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
206 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \
207 { \
208 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \
209 pTlb->aEntries[a_idxEvenIter].GCPhys, \
210 a_idxEvenIter, a_fDataTlb); \
211 pTlb->aEntries[a_idxEvenIter].uTag = 0; \
212 bmToClearSub8 |= a_bmNonGlobal; \
213 } \
214 } \
215 else \
216 Assert( !(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
217 || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \
218 != (GCPtrTag & IEMTLB_REVISION_MASK)); \
219 } \
220 if (a_fGlobal) \
221 { \
222 if (bmEntry & a_bmGlobal) \
223 { \
224 Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
225 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \
226 { \
227 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \
228 pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
229 a_idxEvenIter + 1, a_fDataTlb); \
230 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
231 bmToClearSub8 |= a_bmGlobal; \
232 } \
233 } \
234 else \
235 Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
236 || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \
237 != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \
238 }
239 uint64_t bmToClearSub8 = 0;
240 ONE_PAIR(idxTag + 0, idxEven + 0, 0x01, 0x02)
241 ONE_PAIR(idxTag + 1, idxEven + 2, 0x04, 0x08)
242 ONE_PAIR(idxTag + 2, idxEven + 4, 0x10, 0x20)
243 ONE_PAIR(idxTag + 3, idxEven + 6, 0x40, 0x80)
244 bmToClear |= bmToClearSub8 << (idxTag * 2);
245# undef ONE_PAIR
246 }
247
248 /* advance to the next 8 bits. */
249 bmEntry >>= 8;
250 if (!bmEntry)
251 break;
252 idxEven += 8;
253 idxTag += 4;
254 }
255
256 /* Clear the large page flags we covered. */
257 pTlb->bmLargePage[idxBitmap] &= ~bmToClear;
258 }
259# else
260 uint64_t const bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;
261 if (bmEntry)
262 {
263 /* Scan the non-zero 64-bit value completely unrolled: */
264 uintptr_t const idxEven = idxBitmap * 64;
265 uint64_t bmToClear = 0;
266# define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \
267 if (a_fNonGlobal) \
268 { \
269 if (bmEntry & a_bmNonGlobal) \
270 { \
271 Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
272 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \
273 { \
274 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \
275 pTlb->aEntries[a_idxEvenIter].GCPhys, \
276 a_idxEvenIter, a_fDataTlb); \
277 pTlb->aEntries[a_idxEvenIter].uTag = 0; \
278 bmToClear |= a_bmNonGlobal; \
279 } \
280 } \
281 else \
282 Assert( !(pTlb->aEntriqes[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
283 || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \
284 != (GCPtrTag & IEMTLB_REVISION_MASK)); \
285 } \
286 if (a_fGlobal) \
287 { \
288 if (bmEntry & a_bmGlobal) \
289 { \
290 Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
291 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \
292 { \
293 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \
294 pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
295 a_idxEvenIter + 1, a_fDataTlb); \
296 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
297 bmToClear |= a_bmGlobal; \
298 } \
299 } \
300 else \
301 Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
302 || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \
303 != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \
304 } ((void)0)
305# define FOUR_PAIRS(a_iByte, a_cShift) \
306 ONE_PAIR(0 + a_iByte * 4, idxEven + 0 + a_iByte * 8, UINT64_C(0x01) << a_cShift, UINT64_C(0x02) << a_cShift); \
307 ONE_PAIR(1 + a_iByte * 4, idxEven + 2 + a_iByte * 8, UINT64_C(0x04) << a_cShift, UINT64_C(0x08) << a_cShift); \
308 ONE_PAIR(2 + a_iByte * 4, idxEven + 4 + a_iByte * 8, UINT64_C(0x10) << a_cShift, UINT64_C(0x20) << a_cShift); \
309 ONE_PAIR(3 + a_iByte * 4, idxEven + 6 + a_iByte * 8, UINT64_C(0x40) << a_cShift, UINT64_C(0x80) << a_cShift)
310 if (bmEntry & (uint32_t)UINT16_MAX)
311 {
312 FOUR_PAIRS(0, 0);
313 FOUR_PAIRS(1, 8);
314 }
315 if (bmEntry & ((uint32_t)UINT16_MAX << 16))
316 {
317 FOUR_PAIRS(2, 16);
318 FOUR_PAIRS(3, 24);
319 }
320 if (bmEntry & ((uint64_t)UINT16_MAX << 32))
321 {
322 FOUR_PAIRS(4, 32);
323 FOUR_PAIRS(5, 40);
324 }
325 if (bmEntry & ((uint64_t)UINT16_MAX << 16))
326 {
327 FOUR_PAIRS(6, 48);
328 FOUR_PAIRS(7, 56);
329 }
330# undef FOUR_PAIRS
331
332 /* Clear the large page flags we covered. */
333 pTlb->bmLargePage[idxBitmap] &= ~bmToClear;
334 }
335# endif
336
337 /* advance */
338 idxBitmap++;
339 if (idxBitmap >= idxBitmapEnd)
340 break;
341 if (a_fNonGlobal)
342 GCPtrTag += 32;
343 if (a_fGlobal)
344 GCPtrTagGlob += 32;
345 }
346
347#else /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */
348
349 for (; idxEven < idxEvenEnd; idxEven += 8)
350 {
351# define ONE_ITERATION(a_idxEvenIter) \
352 if (a_fNonGlobal) \
353 { \
354 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == GCPtrTag) \
355 { \
356 if (pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \
357 { \
358 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter].GCPhys, \
359 a_idxEvenIter, a_fDataTlb); \
360 pTlb->aEntries[a_idxEvenIter].uTag = 0; \
361 } \
362 } \
363 GCPtrTag++; \
364 } \
365 \
366 if (a_fGlobal) \
367 { \
368 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == GCPtrTagGlob) \
369 { \
370 if (pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \
371 { \
372 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
373 a_idxEvenIter + 1, a_fDataTlb); \
374 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
375 } \
376 } \
377 GCPtrTagGlob++; \
378 }
379 if (idxEven < idxEvenEnd - 4)
380 MY_PREFETCH_256(&pTlb->aEntries[idxEven + 8 + !a_fNonGlobal]);
381 ONE_ITERATION(idxEven)
382 ONE_ITERATION(idxEven + 2)
383 ONE_ITERATION(idxEven + 4)
384 ONE_ITERATION(idxEven + 6)
385# undef ONE_ITERATION
386 }
387#endif /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */
388}
389
390template<bool const a_fDataTlb, bool const a_f2MbLargePage>
391DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,
392 RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT
393{
394 AssertCompile(IEMTLB_CALC_TAG_NO_REV(p V C p u - not true, (RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
395
396 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U);
397 if ( GCPtrTag >= pTlb->GlobalLargePageRange.uFirstTag
398 && GCPtrTag <= pTlb->GlobalLargePageRange.uLastTag)
399 {
400 if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag
401 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)
402 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
403 else
404 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
405 }
406 else if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag
407 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)
408 {
409 /* Large pages aren't as likely in the non-global TLB half. */
410 IEMTLBTRACE_LARGE_SCAN(pVCpu, false, false, a_fDataTlb);
411 }
412 else
413 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
414}
415
416#endif /** @todo ARMv8: TLB flushing is a lot more complex on arm! */
417
418template<bool const a_fDataTlb>
419DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven) RT_NOEXCEPT
420{
421 pTlb->cTlbInvlPg += 1;
422
423 /*
424 * Flush the entry pair.
425 */
426 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))
427 {
428 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven].GCPhys, idxEven, a_fDataTlb);
429 pTlb->aEntries[idxEven].uTag = 0;
430 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu, pVCpu->iem.s.uInstrBufPc))
431 pVCpu->iem.s.cbInstrBufTotal = 0;
432 }
433 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))
434 {
435 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven + 1].GCPhys, idxEven + 1, a_fDataTlb);
436 pTlb->aEntries[idxEven + 1].uTag = 0;
437 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu, pVCpu->iem.s.uInstrBufPc))
438 pVCpu->iem.s.cbInstrBufTotal = 0;
439 }
440
441#if 0 /** @todo ARMv8: TLB flushing is a lot more complex on arm! */
442 /*
443 * If there are (or has been) large pages in the TLB, we must check if the
444 * address being flushed may involve one of those, as then we'd have to
445 * scan for entries relating to the same page and flush those as well.
446 */
447# if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */
448 if (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads)
449# else
450 if (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag)
451# endif
452 {
453 RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu, pVCpu->iem.s.uInstrBufPc);
454 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
455 iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
456 else
457 iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
458 }
459#endif
460}
461
462#endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */
463
464#endif /* !VMM_INCLUDED_SRC_VMMAll_target_armv8_IEMAllTlbInline_armv8_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette