VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h@ 101189

Last change on this file since 101189 was 100868, checked in by vboxsync, 16 months ago

VBox/log.h,VMM/IEM: Added a dedicated logging group for IEM memory accesses: IEM_MEM bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 52.6 KB
Line 
1/* $Id: IEMAllMemRWTmplInline.cpp.h 100868 2023-08-14 00:49:27Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_SIZE
34# error "TMPL_MEM_TYPE_SIZE is undefined"
35#endif
36#ifndef TMPL_MEM_TYPE_ALIGN
37# error "TMPL_MEM_TYPE_ALIGN is undefined"
38#endif
39#ifndef TMPL_MEM_FN_SUFF
40# error "TMPL_MEM_FN_SUFF is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_TYPE
43# error "TMPL_MEM_FMT_TYPE is undefined"
44#endif
45#ifndef TMPL_MEM_FMT_DESC
46# error "TMPL_MEM_FMT_DESC is undefined"
47#endif
48
49#if TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
50# error Have not implemented TMPL_MEM_TYPE_ALIGN smaller than TMPL_MEM_TYPE_SIZE - 1.
51#endif
52
53
54#ifdef IEM_WITH_SETJMP
55
56
57/*********************************************************************************************************************************
58* Fetches *
59*********************************************************************************************************************************/
60
61/**
62 * Inlined fetch function that longjumps on error.
63 *
64 * @note The @a iSegRef is not allowed to be UINT8_MAX!
65 */
66DECL_INLINE_THROW(TMPL_MEM_TYPE)
67RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
68{
69 AssertCompile(sizeof(TMPL_MEM_TYPE) == TMPL_MEM_TYPE_SIZE);
70# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
71 /*
72 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
73 */
74 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
75# if TMPL_MEM_TYPE_SIZE > 1
76 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */
77 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
78# endif
79 {
80 /*
81 * TLB lookup.
82 */
83 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
84 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
85 if (RT_LIKELY(pTlbe->uTag == uTag))
86 {
87 /*
88 * Check TLB page table level access flags.
89 */
90 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
91 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
92 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
93 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
94 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
95 {
96 /*
97 * Fetch and return the data.
98 */
99 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
100 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
101 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
102 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
103 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet));
104 return uRet;
105 }
106 }
107 }
108
109 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
110 outdated page pointer, or other troubles. (This will do a TLB load.) */
111 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
112# endif
113 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem);
114}
115
116
117/**
118 * Inlined flat addressing fetch function that longjumps on error.
119 */
120DECL_INLINE_THROW(TMPL_MEM_TYPE)
121RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
122{
123# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
124 /*
125 * Check that it doesn't cross a page boundrary.
126 */
127# if TMPL_MEM_TYPE_SIZE > 1
128 AssertCompile(X86_CR0_AM == X86_EFL_AC);
129 AssertCompile(((3U + 1U) << 16) == X86_CR0_AM);
130 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */
131 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
132# endif
133 {
134 /*
135 * TLB lookup.
136 */
137 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
138 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
139 if (RT_LIKELY(pTlbe->uTag == uTag))
140 {
141 /*
142 * Check TLB page table level access flags.
143 */
144 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
145 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
146 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
147 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
148 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
149 {
150 /*
151 * Fetch and return the dword
152 */
153 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
154 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
155 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
156 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
157 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet));
158 return uRet;
159 }
160 }
161 }
162
163 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
164 outdated page pointer, or other troubles. (This will do a TLB load.) */
165 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
166# endif
167 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem);
168}
169
170
171/*********************************************************************************************************************************
172* Stores *
173*********************************************************************************************************************************/
174# ifndef TMPL_MEM_NO_STORE
175
176/**
177 * Inlined store function that longjumps on error.
178 *
179 * @note The @a iSegRef is not allowed to be UINT8_MAX!
180 */
181DECL_INLINE_THROW(void)
182RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
183 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
184{
185# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
186 /*
187 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
188 */
189 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
190# if TMPL_MEM_TYPE_SIZE > 1
191 AssertCompile(X86_CR0_AM == X86_EFL_AC);
192 AssertCompile(((3U + 1U) << 16) == X86_CR0_AM);
193 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */
194 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
195# endif
196 {
197 /*
198 * TLB lookup.
199 */
200 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
201 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
202 if (RT_LIKELY(pTlbe->uTag == uTag))
203 {
204 /*
205 * Check TLB page table level access flags.
206 */
207 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
208 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
209 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
210 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
211 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
212 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
213 {
214 /*
215 * Store the dword and return.
216 */
217 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
218 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
219 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
220 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
221 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
222 return;
223 }
224 }
225 }
226
227 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
228 outdated page pointer, or other troubles. (This will do a TLB load.) */
229 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
230# endif
231 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue);
232}
233
234
235/**
236 * Inlined flat addressing store function that longjumps on error.
237 */
238DECL_INLINE_THROW(void)
239RT_CONCAT3(iemMemFlatStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
240 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
241{
242# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
243 /*
244 * Check that it doesn't cross a page boundrary.
245 */
246# if TMPL_MEM_TYPE_SIZE > 1
247 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN)
248 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
249# endif
250 {
251 /*
252 * TLB lookup.
253 */
254 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
255 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
256 if (RT_LIKELY(pTlbe->uTag == uTag))
257 {
258 /*
259 * Check TLB page table level access flags.
260 */
261 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
262 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
263 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
264 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
265 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
266 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
267 {
268 /*
269 * Store the dword and return.
270 */
271 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
272 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
273 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
274 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
275 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
276 return;
277 }
278 }
279 }
280
281 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
282 outdated page pointer, or other troubles. (This will do a TLB load.) */
283 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
284# endif
285 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, uValue);
286}
287
288# endif /* !TMPL_MEM_NO_STORE */
289
290
291/*********************************************************************************************************************************
292* Mapping / Direct Memory Access *
293*********************************************************************************************************************************/
294# ifndef TMPL_MEM_NO_MAPPING
295
296/**
297 * Inlined read-write memory mapping function that longjumps on error.
298 */
299DECL_INLINE_THROW(TMPL_MEM_TYPE *)
300RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
301 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
302{
303# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
304 /*
305 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
306 */
307 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
308# if TMPL_MEM_TYPE_SIZE > 1
309 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
310 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
311# endif
312 {
313 /*
314 * TLB lookup.
315 */
316 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
317 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
318 if (RT_LIKELY(pTlbe->uTag == uTag))
319 {
320 /*
321 * Check TLB page table level access flags.
322 */
323 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
324 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
325 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
326 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
327 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
328 | fNoUser))
329 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
330 {
331 /*
332 * Return the address.
333 */
334 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
335 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
336 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
337 *pbUnmapInfo = 0;
338 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n",
339 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
340 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
341 }
342 }
343 }
344
345 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
346 outdated page pointer, or other troubles. (This will do a TLB load.) */
347 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
348# endif
349 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
350}
351
352
353/**
354 * Inlined flat read-write memory mapping function that longjumps on error.
355 */
356DECL_INLINE_THROW(TMPL_MEM_TYPE *)
357RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
358 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
359{
360# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
361 /*
362 * Check that the address doesn't cross a page boundrary.
363 */
364# if TMPL_MEM_TYPE_SIZE > 1
365 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN)
366 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
367# endif
368 {
369 /*
370 * TLB lookup.
371 */
372 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
373 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
374 if (RT_LIKELY(pTlbe->uTag == uTag))
375 {
376 /*
377 * Check TLB page table level access flags.
378 */
379 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
380 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
381 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
382 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
383 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
384 | fNoUser))
385 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
386 {
387 /*
388 * Return the address.
389 */
390 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
391 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
392 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
393 *pbUnmapInfo = 0;
394 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
395 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
396 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
397 }
398 }
399 }
400
401 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
402 outdated page pointer, or other troubles. (This will do a TLB load.) */
403 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
404# endif
405 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
406}
407
408
409/**
410 * Inlined write-only memory mapping function that longjumps on error.
411 */
412DECL_INLINE_THROW(TMPL_MEM_TYPE *)
413RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
414 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
415{
416# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
417 /*
418 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
419 */
420 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
421# if TMPL_MEM_TYPE_SIZE > 1
422 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
423 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
424# endif
425 {
426 /*
427 * TLB lookup.
428 */
429 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
430 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
431 if (RT_LIKELY(pTlbe->uTag == uTag))
432 {
433 /*
434 * Check TLB page table level access flags.
435 */
436 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
437 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
438 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
439 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
440 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
441 | fNoUser))
442 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
443 {
444 /*
445 * Return the address.
446 */
447 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
448 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
449 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
450 *pbUnmapInfo = 0;
451 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n",
452 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
453 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
454 }
455 }
456 }
457
458 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
459 outdated page pointer, or other troubles. (This will do a TLB load.) */
460 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
461# endif
462 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
463}
464
465
466/**
467 * Inlined flat write-only memory mapping function that longjumps on error.
468 */
469DECL_INLINE_THROW(TMPL_MEM_TYPE *)
470RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
471 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
472{
473# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
474 /*
475 * Check that the address doesn't cross a page boundrary.
476 */
477# if TMPL_MEM_TYPE_SIZE > 1
478 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN)
479 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
480# endif
481 {
482 /*
483 * TLB lookup.
484 */
485 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
486 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
487 if (RT_LIKELY(pTlbe->uTag == uTag))
488 {
489 /*
490 * Check TLB page table level access flags.
491 */
492 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
493 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
494 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
495 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
496 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
497 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
498 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
499 {
500 /*
501 * Return the address.
502 */
503 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
504 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
505 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
506 *pbUnmapInfo = 0;
507 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
508 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
509 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
510 }
511 }
512 }
513
514 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
515 outdated page pointer, or other troubles. (This will do a TLB load.) */
516 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
517# endif
518 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
519}
520
521
522/**
523 * Inlined read-only memory mapping function that longjumps on error.
524 */
525DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
526RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
527 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
528{
529# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
530 /*
531 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
532 */
533 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
534# if TMPL_MEM_TYPE_SIZE > 1
535 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
536 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
537# endif
538 {
539 /*
540 * TLB lookup.
541 */
542 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
543 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
544 if (RT_LIKELY(pTlbe->uTag == uTag))
545 {
546 /*
547 * Check TLB page table level access flags.
548 */
549 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
550 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
551 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
552 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
553 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
554 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
555 {
556 /*
557 * Return the address.
558 */
559 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
560 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
561 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
562 *pbUnmapInfo = 0;
563 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n",
564 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
565 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
566 }
567 }
568 }
569
570 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
571 outdated page pointer, or other troubles. (This will do a TLB load.) */
572 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
573# endif
574 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
575}
576
577
578/**
579 * Inlined read-only memory mapping function that longjumps on error.
580 */
581DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
582RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
583 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
584{
585# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
586 /*
587 * Check that the address doesn't cross a page boundrary.
588 */
589# if TMPL_MEM_TYPE_SIZE > 1
590 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN)
591 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
592# endif
593 {
594 /*
595 * TLB lookup.
596 */
597 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
598 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
599 if (RT_LIKELY(pTlbe->uTag == uTag))
600 {
601 /*
602 * Check TLB page table level access flags.
603 */
604 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
605 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
606 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
607 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
608 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
609 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
610 {
611 /*
612 * Return the address.
613 */
614 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
615 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
616 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
617 *pbUnmapInfo = 0;
618 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
619 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
620 return (TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
621 }
622 }
623 }
624
625 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
626 outdated page pointer, or other troubles. (This will do a TLB load.) */
627 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
628# endif
629 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
630}
631
632# endif /* !TMPL_MEM_NO_MAPPING */
633
634
635/*********************************************************************************************************************************
636* Stack Access *
637*********************************************************************************************************************************/
638# ifdef TMPL_MEM_WITH_STACK
639# ifdef IEM_WITH_SETJMP
640
641/**
642 * Stack push function that longjmps on error.
643 */
644DECL_INLINE_THROW(void)
645RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
646{
647# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
648 /*
649 * Decrement the stack pointer (prep), apply segmentation and check that
650 * the item doesn't cross a page boundrary.
651 */
652 uint64_t uNewRsp;
653 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
654 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
655# if TMPL_MEM_TYPE_SIZE > 1
656 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
657 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
658# endif
659 {
660 /*
661 * TLB lookup.
662 */
663 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
664 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
665 if (RT_LIKELY(pTlbe->uTag == uTag))
666 {
667 /*
668 * Check TLB page table level access flags.
669 */
670 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
671 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
672 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
673 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
674 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
675 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
676 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
677 {
678 /*
679 * Do the push and return.
680 */
681 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
682 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
683 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
684 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
685 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
686 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
687 pVCpu->cpum.GstCtx.rsp = uNewRsp;
688 return;
689 }
690 }
691 }
692
693 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
694 outdated page pointer, or other troubles. (This will do a TLB load.) */
695 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
696# endif
697 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
698}
699
700
701/**
702 * Stack pop function that longjmps on error.
703 */
704DECL_INLINE_THROW(TMPL_MEM_TYPE)
705RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
706{
707# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
708 /*
709 * Increment the stack pointer (prep), apply segmentation and check that
710 * the item doesn't cross a page boundrary.
711 */
712 uint64_t uNewRsp;
713 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
714 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
715# if TMPL_MEM_TYPE_SIZE > 1
716 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
717 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
718# endif
719 {
720 /*
721 * TLB lookup.
722 */
723 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
724 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
725 if (RT_LIKELY(pTlbe->uTag == uTag))
726 {
727 /*
728 * Check TLB page table level access flags.
729 */
730 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
731 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
732 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
733 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
734 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
735 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
736 {
737 /*
738 * Do the push and return.
739 */
740 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
741 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
742 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
743 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
744 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
745 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uRet));
746 pVCpu->cpum.GstCtx.rsp = uNewRsp;
747 return uRet;
748 }
749 }
750 }
751
752 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
753 outdated page pointer, or other troubles. (This will do a TLB load.) */
754 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
755# endif
756 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu);
757}
758
759# ifdef TMPL_WITH_PUSH_SREG
760/**
761 * Stack segment push function that longjmps on error.
762 *
763 * For a detailed discussion of the behaviour see the fallback functions
764 * iemMemStackPushUxxSRegSafeJmp.
765 */
766DECL_INLINE_THROW(void)
767RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
768{
769# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
770 /*
771 * Decrement the stack pointer (prep), apply segmentation and check that
772 * the item doesn't cross a page boundrary.
773 */
774 uint64_t uNewRsp;
775 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
776 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
777# if TMPL_MEM_TYPE_SIZE > 1
778 if (RT_LIKELY( !(GCPtrEff & (sizeof(uint16_t) - 1U))
779 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) ))
780# endif
781 {
782 /*
783 * TLB lookup.
784 */
785 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
786 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
787 if (RT_LIKELY(pTlbe->uTag == uTag))
788 {
789 /*
790 * Check TLB page table level access flags.
791 */
792 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
793 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
794 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
795 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
796 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
797 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
798 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
799 {
800 /*
801 * Do the push and return.
802 */
803 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
804 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
805 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
806 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
807 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
808 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
809 pVCpu->cpum.GstCtx.rsp = uNewRsp;
810 return;
811 }
812 }
813 }
814
815 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
816 outdated page pointer, or other troubles. (This will do a TLB load.) */
817 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
818# endif
819 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
820}
821
822# endif
823# if TMPL_MEM_TYPE_SIZE != 8
824
825/**
826 * 32-bit flat stack push function that longjmps on error.
827 */
828DECL_INLINE_THROW(void)
829RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
830{
831 Assert( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
832 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
833 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
834 && pVCpu->cpum.GstCtx.ss.u64Base == 0);
835# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
836 /*
837 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
838 */
839 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
840# if TMPL_MEM_TYPE_SIZE > 1
841 if (RT_LIKELY( !(uNewEsp & TMPL_MEM_TYPE_ALIGN)
842 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, TMPL_MEM_TYPE) ))
843# endif
844 {
845 /*
846 * TLB lookup.
847 */
848 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
849 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
850 if (RT_LIKELY(pTlbe->uTag == uTag))
851 {
852 /*
853 * Check TLB page table level access flags.
854 */
855 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
856 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
857 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
858 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
859 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
860 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
861 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
862 {
863 /*
864 * Do the push and return.
865 */
866 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
867 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
868 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
869 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE "\n",
870 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
871 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = uValue;
872 pVCpu->cpum.GstCtx.rsp = uNewEsp;
873 return;
874 }
875 }
876 }
877
878 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
879 outdated page pointer, or other troubles. (This will do a TLB load.) */
880 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
881# endif
882 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
883}
884
885
886/**
887 * 32-bit flat stack pop function that longjmps on error.
888 */
889DECL_INLINE_THROW(TMPL_MEM_TYPE)
890RT_CONCAT3(iemMemFlat32StackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
891{
892# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
893 /*
894 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
895 */
896 uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp;
897# if TMPL_MEM_TYPE_SIZE > 1
898 if (RT_LIKELY( !(uOldEsp & TMPL_MEM_TYPE_ALIGN)
899 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uOldEsp, TMPL_MEM_TYPE) ))
900# endif
901 {
902 /*
903 * TLB lookup.
904 */
905 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uOldEsp); /* Cast is required! 2023-08-11 */
906 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
907 if (RT_LIKELY(pTlbe->uTag == uTag))
908 {
909 /*
910 * Check TLB page table level access flags.
911 */
912 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
913 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
914 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
915 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
916 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
917 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
918 {
919 /*
920 * Do the push and return.
921 */
922 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
923 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
924 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
925 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK];
926 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE);
927 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE "\n",
928 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uRet));
929 return uRet;
930 }
931 }
932 }
933
934 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
935 outdated page pointer, or other troubles. (This will do a TLB load.) */
936 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uOldEsp));
937# endif
938 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu);
939}
940
941# endif /* TMPL_MEM_TYPE_SIZE != 8*/
942# ifdef TMPL_WITH_PUSH_SREG
943/**
944 * 32-bit flat stack segment push function that longjmps on error.
945 *
946 * For a detailed discussion of the behaviour see the fallback functions
947 * iemMemStackPushUxxSRegSafeJmp.
948 */
949DECL_INLINE_THROW(void)
950RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
951{
952# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
953 /*
954 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
955 */
956 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
957 if (RT_LIKELY( !(uNewEsp & (sizeof(uint16_t) - 1))
958 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, uint16_t) ))
959 {
960 /*
961 * TLB lookup.
962 */
963 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
964 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
965 if (RT_LIKELY(pTlbe->uTag == uTag))
966 {
967 /*
968 * Check TLB page table level access flags.
969 */
970 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
971 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
972 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
973 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
974 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
975 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
976 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
977 {
978 /*
979 * Do the push and return.
980 */
981 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
982 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
983 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
984 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE " [sreg]\n",
985 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
986 *(uint16_t *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
987 pVCpu->cpum.GstCtx.rsp = uNewEsp;
988 return;
989 }
990 }
991 }
992
993 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
994 outdated page pointer, or other troubles. (This will do a TLB load.) */
995 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
996# endif
997 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
998}
999
1000# endif
1001# if TMPL_MEM_TYPE_SIZE != 4
1002
1003/**
1004 * 64-bit flat stack push function that longjmps on error.
1005 */
1006DECL_INLINE_THROW(void)
1007RT_CONCAT3(iemMemFlat64StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1008{
1009# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1010 /*
1011 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1012 */
1013 uint64_t const uNewRsp = pVCpu->cpum.GstCtx.rsp - sizeof(TMPL_MEM_TYPE);
1014# if TMPL_MEM_TYPE_SIZE > 1
1015 if (RT_LIKELY( !(uNewRsp & TMPL_MEM_TYPE_ALIGN)
1016 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewRsp, TMPL_MEM_TYPE) ))
1017# endif
1018 {
1019 /*
1020 * TLB lookup.
1021 */
1022 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, uNewRsp);
1023 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1024 if (RT_LIKELY(pTlbe->uTag == uTag))
1025 {
1026 /*
1027 * Check TLB page table level access flags.
1028 */
1029 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1030 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1031 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1032 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1033 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1034 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1035 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1036 {
1037 /*
1038 * Do the push and return.
1039 */
1040 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1041 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1042 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1043 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX64 (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
1044 uNewRsp, pVCpu->cpum.GstCtx.esp, uValue));
1045 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewRsp & GUEST_PAGE_OFFSET_MASK] = uValue;
1046 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1047 return;
1048 }
1049 }
1050 }
1051
1052 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1053 outdated page pointer, or other troubles. (This will do a TLB load.) */
1054 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uNewRsp));
1055# endif
1056 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1057}
1058
1059
1060/**
1061 * 64-bit flat stack pop function that longjmps on error.
1062 */
1063DECL_INLINE_THROW(TMPL_MEM_TYPE)
1064RT_CONCAT3(iemMemFlat64StackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1065{
1066# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1067 /*
1068 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1069 */
1070 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.rsp;
1071# if TMPL_MEM_TYPE_SIZE > 1
1072 if (RT_LIKELY( !(uOldRsp & TMPL_MEM_TYPE_ALIGN)
1073 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uOldRsp, TMPL_MEM_TYPE) ))
1074# endif
1075 {
1076 /*
1077 * TLB lookup.
1078 */
1079 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, uOldRsp);
1080 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1081 if (RT_LIKELY(pTlbe->uTag == uTag))
1082 {
1083 /*
1084 * Check TLB page table level access flags.
1085 */
1086 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1087 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1088 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1089 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1090 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1091 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1092 {
1093 /*
1094 * Do the push and return.
1095 */
1096 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1097 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1098 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1099 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldRsp & GUEST_PAGE_OFFSET_MASK];
1100 pVCpu->cpum.GstCtx.rsp = uOldRsp + sizeof(TMPL_MEM_TYPE);
1101 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE "\n",
1102 uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uRet));
1103 return uRet;
1104 }
1105 }
1106 }
1107
1108 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1109 outdated page pointer, or other troubles. (This will do a TLB load.) */
1110 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uOldRsp));
1111# endif
1112 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu);
1113}
1114
1115#endif /* TMPL_MEM_TYPE_SIZE != 4 */
1116
1117# endif /* IEM_WITH_SETJMP */
1118# endif /* TMPL_MEM_WITH_STACK */
1119
1120
1121#endif /* IEM_WITH_SETJMP */
1122
1123#undef TMPL_MEM_TYPE
1124#undef TMPL_MEM_TYPE_ALIGN
1125#undef TMPL_MEM_TYPE_SIZE
1126#undef TMPL_MEM_FN_SUFF
1127#undef TMPL_MEM_FMT_TYPE
1128#undef TMPL_MEM_FMT_DESC
1129#undef TMPL_MEM_NO_STORE
1130
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette