VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp@ 100764

Last change on this file since 100764 was 100761, checked in by vboxsync, 17 months ago

VMM/IEM: Check for IRQs every so often, especially after sti, popf and iret. Increased the hash table size. Disabled some debug code. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 23.8 KB
Line 
1/* $Id: IEMAllThrdFuncsBltIn.cpp 100761 2023-08-01 02:24:11Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.virtualbox.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72
73static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
74{
75 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3);
76 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo different status code... */
77}
78
79
80/**
81 * Built-in function that calls a C-implemention function taking zero arguments.
82 */
83IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_DeferToCImpl0)
84{
85 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
86 uint8_t const cbInstr = (uint8_t)uParam1;
87 RT_NOREF(uParam2);
88 return pfnCImpl(pVCpu, cbInstr);
89}
90
91
92/**
93 * Built-in function that checks for pending interrupts that can be delivered.
94 *
95 * This triggers after the completion of an instruction, so EIP is already at
96 * the next instruction. If an IRQ or important FF is pending, this will return
97 * a non-zero status that stops TB execution.
98 */
99IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckIrq)
100{
101 RT_NOREF(uParam0, uParam1, uParam2);
102
103 /*
104 * Check for IRQs and other FFs that needs servicing.
105 */
106 uint64_t fCpu = pVCpu->fLocalForcedActions;
107 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
108 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
109 | VMCPU_FF_TLB_FLUSH
110 | VMCPU_FF_UNHALT );
111 if (RT_LIKELY( ( !fCpu
112 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
113 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
114 && !VM_FF_IS_ANY_SET(pVCpu->CTX_SUFF(pVM), VM_FF_ALL_MASK) ))
115 return VINF_SUCCESS;
116
117 Log(("%04x:%08RX32: Pending IRQ and/or FF: fCpu=%#RX64 fVm=%#RX32 IF=%d\n",
118 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, fCpu,
119 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions & VM_FF_ALL_MASK, pVCpu->cpum.GstCtx.rflags.Bits.u1IF));
120 return VINF_IEM_REEXEC_MODE_CHANGED;
121}
122
123
124
125/**
126 * Built-in function that compares the fExec mask against uParam0.
127 */
128IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckMode)
129{
130 uint32_t const fExpectedExec = (uint32_t)uParam0;
131 if (pVCpu->iem.s.fExec == fExpectedExec)
132 return VINF_SUCCESS;
133 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
134 fExpectedExec, pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec));
135 RT_NOREF(uParam1, uParam2);
136 return VINF_IEM_REEXEC_MODE_CHANGED;
137}
138
139
140DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange)
141{
142 Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges)));
143 uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage;
144 Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages));
145 if (idxPage == 0)
146 return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
147 Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK));
148 return pTb->aGCPhysPages[idxPage - 1];
149}
150
151
152/**
153 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
154 * number of functions.
155 */
156#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
157 if (RT_LIKELY(pVCpu->cpum.GstCtx.eip - pVCpu->cpum.GstCtx.cs.u32Limit >= cbInstr)) \
158 { /* likely */ } \
159 else \
160 { \
161 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
162 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
163 return iemRaiseGeneralProtectionFault0(pVCpu); \
164 } \
165 } while(0)
166
167/**
168 * Macro that implements opcode (re-)checking.
169 */
170#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
171 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
172 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
173 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
174 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
175 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
176 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
177 { /* likely */ } \
178 else \
179 { \
180 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
181 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
182 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
183 RT_NOREF(a_cbInstr); \
184 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
185 } \
186 } while(0)
187
188/**
189 * Macro that implements TLB loading and updating pbInstrBuf updating for an
190 * instruction crossing into a new page.
191 *
192 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
193 */
194#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
195 pVCpu->iem.s.pbInstrBuf = NULL; \
196 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
197 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
198 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
199 \
200 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
201 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
202 && pVCpu->iem.s.pbInstrBuf)) \
203 { /* likely */ } \
204 else \
205 { \
206 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
207 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
208 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
209 RT_NOREF(a_cbInstr); \
210 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
211 } \
212 } while(0)
213
214/**
215 * Macro that implements TLB loading and updating pbInstrBuf updating when
216 * branching or when crossing a page on an instruction boundrary.
217 *
218 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
219 * it is an inter-page branch and also check the page offset.
220 *
221 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
222 */
223#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
224 /* Is RIP within the current code page? */ \
225 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
226 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
227 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
228 if (off < pVCpu->iem.s.cbInstrBufTotal) \
229 { \
230 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
231 Assert(pVCpu->iem.s.pbInstrBuf); \
232 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
233 | pTb->aRanges[(a_idxRange)].offPhysPage; \
234 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
235 { /* we're good */ } \
236 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
237 { \
238 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
239 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
240 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
241 RT_NOREF(a_cbInstr); \
242 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
243 } \
244 else \
245 { \
246 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
247 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
248 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
249 RT_NOREF(a_cbInstr); \
250 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
251 } \
252 } \
253 else \
254 { \
255 /* Must translate new RIP. */ \
256 pVCpu->iem.s.pbInstrBuf = NULL; \
257 pVCpu->iem.s.offCurInstrStart = 0; \
258 pVCpu->iem.s.offInstrNextByte = 0; \
259 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
260 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
261 \
262 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
263 | pTb->aRanges[(a_idxRange)].offPhysPage; \
264 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
265 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
266 && pVCpu->iem.s.pbInstrBuf) \
267 { /* likely */ } \
268 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
269 && pVCpu->iem.s.pbInstrBuf) \
270 { \
271 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
272 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
273 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
274 RT_NOREF(a_cbInstr); \
275 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
276 } \
277 else \
278 { \
279 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
280 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
281 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
282 RT_NOREF(a_cbInstr); \
283 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
284 } \
285 } \
286 } while(0)
287
288/**
289 * Macro that implements PC check after a conditional branch.
290 */
291#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
292 /* Is RIP within the current code page? */ \
293 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
294 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
295 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
296 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
297 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
298 | pTb->aRanges[(a_idxRange)].offPhysPage; \
299 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
300 && off < pVCpu->iem.s.cbInstrBufTotal) \
301 { /* we're good */ } \
302 else \
303 { \
304 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
305 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
306 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
307 RT_NOREF(a_cbInstr); \
308 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
309 } \
310 } while(0)
311
312
313/**
314 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
315 * raising a \#GP(0) if this isn't the case.
316 */
317IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLim)
318{
319 uint32_t const cbInstr = (uint32_t)uParam0;
320 RT_NOREF(uParam1, uParam2);
321 BODY_CHECK_CS_LIM(cbInstr);
322 return VINF_SUCCESS;
323}
324
325
326/**
327 * Built-in function for re-checking opcodes and CS.LIM after an instruction
328 * that may have modified them.
329 */
330IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes)
331{
332 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
333 uint32_t const cbInstr = (uint32_t)uParam0;
334 uint32_t const idxRange = (uint32_t)uParam1;
335 uint32_t const offRange = (uint32_t)uParam2;
336 BODY_CHECK_CS_LIM(cbInstr);
337 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
338 return VINF_SUCCESS;
339}
340
341
342/**
343 * Built-in function for re-checking opcodes after an instruction that may have
344 * modified them.
345 */
346IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodes)
347{
348 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
349 uint32_t const cbInstr = (uint32_t)uParam0;
350 uint32_t const idxRange = (uint32_t)uParam1;
351 uint32_t const offRange = (uint32_t)uParam2;
352 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
353 return VINF_SUCCESS;
354}
355
356
357/*
358 * Post-branching checkers.
359 */
360
361/**
362 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
363 * after conditional branching within the same page.
364 *
365 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
366 */
367IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes)
368{
369 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
370 uint32_t const cbInstr = (uint32_t)uParam0;
371 uint32_t const idxRange = (uint32_t)uParam1;
372 uint32_t const offRange = (uint32_t)uParam2;
373 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
374 BODY_CHECK_CS_LIM(cbInstr);
375 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
376 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
377 //LogFunc(("okay\n"));
378 return VINF_SUCCESS;
379}
380
381
382/**
383 * Built-in function for checking the PC and checking opcodes after conditional
384 * branching within the same page.
385 *
386 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
387 */
388IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodes)
389{
390 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
391 uint32_t const cbInstr = (uint32_t)uParam0;
392 uint32_t const idxRange = (uint32_t)uParam1;
393 uint32_t const offRange = (uint32_t)uParam2;
394 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
395 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
396 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
397 //LogFunc(("okay\n"));
398 return VINF_SUCCESS;
399}
400
401
402/**
403 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
404 * transitioning to a different code page.
405 *
406 * The code page transition can either be natural over onto the next page (with
407 * the instruction starting at page offset zero) or by means of branching.
408 *
409 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
410 */
411IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
412{
413 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
414 uint32_t const cbInstr = (uint32_t)uParam0;
415 uint32_t const idxRange = (uint32_t)uParam1;
416 uint32_t const offRange = (uint32_t)uParam2;
417 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
418 BODY_CHECK_CS_LIM(cbInstr);
419 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
420 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
421 //LogFunc(("okay\n"));
422 return VINF_SUCCESS;
423}
424
425
426/**
427 * Built-in function for loading TLB and checking opcodes when transitioning to
428 * a different code page.
429 *
430 * The code page transition can either be natural over onto the next page (with
431 * the instruction starting at page offset zero) or by means of branching.
432 *
433 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
434 */
435IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb)
436{
437 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
438 uint32_t const cbInstr = (uint32_t)uParam0;
439 uint32_t const idxRange = (uint32_t)uParam1;
440 uint32_t const offRange = (uint32_t)uParam2;
441 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
442 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
443 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
444 //LogFunc(("okay\n"));
445 return VINF_SUCCESS;
446}
447
448
449
450/*
451 * Natural page crossing checkers.
452 */
453
454/**
455 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
456 * both pages when transitioning to a different code page.
457 *
458 * This is used when the previous instruction requires revalidation of opcodes
459 * bytes and the current instruction stries a page boundrary with opcode bytes
460 * in both the old and new page.
461 *
462 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
463 */
464IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
465{
466 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
467 uint32_t const cbInstr = (uint32_t)uParam0;
468 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
469 uint32_t const idxRange1 = (uint32_t)uParam1;
470 uint32_t const offRange1 = (uint32_t)uParam2;
471 uint32_t const idxRange2 = idxRange1 + 1;
472 BODY_CHECK_CS_LIM(cbInstr);
473 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
474 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
475 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
476 return VINF_SUCCESS;
477}
478
479
480/**
481 * Built-in function for loading TLB and checking opcodes on both pages when
482 * transitioning to a different code page.
483 *
484 * This is used when the previous instruction requires revalidation of opcodes
485 * bytes and the current instruction stries a page boundrary with opcode bytes
486 * in both the old and new page.
487 *
488 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
489 */
490IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
491{
492 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
493 uint32_t const cbInstr = (uint32_t)uParam0;
494 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
495 uint32_t const idxRange1 = (uint32_t)uParam1;
496 uint32_t const offRange1 = (uint32_t)uParam2;
497 uint32_t const idxRange2 = idxRange1 + 1;
498 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
499 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
500 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
501 return VINF_SUCCESS;
502}
503
504
505/**
506 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
507 * advancing naturally to a different code page.
508 *
509 * Only opcodes on the new page is checked.
510 *
511 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
512 */
513IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
514{
515 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
516 uint32_t const cbInstr = (uint32_t)uParam0;
517 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
518 uint32_t const idxRange1 = (uint32_t)uParam1;
519 //uint32_t const offRange1 = (uint32_t)uParam2;
520 uint32_t const idxRange2 = idxRange1 + 1;
521 BODY_CHECK_CS_LIM(cbInstr);
522 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
523 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
524 RT_NOREF(uParam2);
525 return VINF_SUCCESS;
526}
527
528
529/**
530 * Built-in function for loading TLB and checking opcodes when advancing
531 * naturally to a different code page.
532 *
533 * Only opcodes on the new page is checked.
534 *
535 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
536 */
537IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
538{
539 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
540 uint32_t const cbInstr = (uint32_t)uParam0;
541 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
542 uint32_t const idxRange1 = (uint32_t)uParam1;
543 //uint32_t const offRange1 = (uint32_t)uParam2;
544 uint32_t const idxRange2 = idxRange1 + 1;
545 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
546 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
547 RT_NOREF(uParam2);
548 return VINF_SUCCESS;
549}
550
551
552/**
553 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
554 * advancing naturally to a different code page with first instr at byte 0.
555 *
556 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
557 */
558IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
559{
560 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
561 uint32_t const cbInstr = (uint32_t)uParam0;
562 uint32_t const idxRange = (uint32_t)uParam1;
563 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
564 BODY_CHECK_CS_LIM(cbInstr);
565 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
566 Assert(pVCpu->iem.s.offCurInstrStart == 0);
567 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
568 return VINF_SUCCESS;
569}
570
571
572/**
573 * Built-in function for loading TLB and checking opcodes when advancing
574 * naturally to a different code page with first instr at byte 0.
575 *
576 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
577 */
578IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
579{
580 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
581 uint32_t const cbInstr = (uint32_t)uParam0;
582 uint32_t const idxRange = (uint32_t)uParam1;
583 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
584 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
585 Assert(pVCpu->iem.s.offCurInstrStart == 0);
586 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
587 return VINF_SUCCESS;
588}
589
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette