VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp@ 108195

Last change on this file since 108195 was 108195, checked in by vboxsync, 45 hours ago

VMM/IEM: Splitting out most of the x86 target specific stuff from IEMInternal.h and into VMMAll/target-x86/IEMInternal-x86.h. jiraref:VBP-1431

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 36.3 KB
Line 
1/* $Id: IEMAllThrdFuncsBltIn.cpp 108195 2025-02-13 14:57:25Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.virtualbox.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#ifdef IN_RING0
38# define VBOX_VMM_TARGET_X86
39#endif
40#include <VBox/vmm/iem.h>
41#include <VBox/vmm/cpum.h>
42#include <VBox/vmm/pdmapic.h>
43#include <VBox/vmm/pdm.h>
44#include <VBox/vmm/pgm.h>
45#include <VBox/vmm/iom.h>
46#include <VBox/vmm/em.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/vmm/nem.h>
49#include <VBox/vmm/gim.h>
50#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
51# include <VBox/vmm/em.h>
52# include <VBox/vmm/hm_svm.h>
53#endif
54#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
55# include <VBox/vmm/hmvmxinline.h>
56#endif
57#include <VBox/vmm/tm.h>
58#include <VBox/vmm/dbgf.h>
59#include <VBox/vmm/dbgftrace.h>
60#include "IEMInternal.h"
61#include <VBox/vmm/vmcc.h>
62#include <VBox/log.h>
63#include <VBox/err.h>
64#include <VBox/param.h>
65#include <VBox/dis.h>
66#include <VBox/disopcode-x86-amd64.h>
67#include <iprt/asm-math.h>
68#include <iprt/assert.h>
69#include <iprt/string.h>
70#include <iprt/x86.h>
71
72#include "IEMInline.h"
73
74
75
76static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
77{
78 /* We set fSafeToFree to false where as we're being called in the context
79 of a TB callback function, which for native TBs means we cannot release
80 the executable memory till we've returned our way back to iemTbExec as
81 that return path codes via the native code generated for the TB. */
82 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/);
83 return VINF_IEM_REEXEC_BREAK;
84}
85
86
87/**
88 * Built-in function that does absolutely nothing - for debugging.
89 *
90 * This can be used for artifically increasing the number of calls generated, or
91 * for triggering flushes associated with threaded calls.
92 */
93IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_Nop)
94{
95 RT_NOREF(pVCpu, uParam0, uParam1, uParam2);
96 return VINF_SUCCESS;
97}
98
99
100
101/**
102 * This is also called from iemNativeHlpAsmSafeWrapLogCpuState.
103 */
104DECLASM(void) iemThreadedFunc_BltIn_LogCpuStateWorker(PVMCPU pVCpu)
105{
106#ifdef LOG_ENABLED
107 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
108 PCX86FXSTATE const pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
109 Log2(("**** LG%c fExec=%x pTb=%p cUsed=%u\n"
110 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
111 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
112 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
113 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
114 , pTb && (pTb->fFlags & IEMTB_F_TYPE_NATIVE) ? 'n' : 't', pVCpu->iem.s.fExec, pTb, pTb ? pTb->cUsed : 0,
115 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
116 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
117 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
118 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
119 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK ));
120#else
121 RT_NOREF(pVCpu);
122#endif
123}
124
125
126/**
127 * Built-in function that logs the current CPU state - for debugging.
128 */
129IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_LogCpuState)
130{
131 iemThreadedFunc_BltIn_LogCpuStateWorker(pVCpu);
132 RT_NOREF(uParam0, uParam1, uParam2);
133 return VINF_SUCCESS;
134}
135
136
137/**
138 * Built-in function that calls a C-implemention function taking zero arguments.
139 */
140IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_DeferToCImpl0)
141{
142 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
143 uint8_t const cbInstr = (uint8_t)uParam1;
144 RT_NOREF(uParam2);
145 return pfnCImpl(pVCpu, cbInstr);
146}
147
148
149/**
150 * Worker for iemThreadedFunc_BltIn_CheckIrq and
151 * iemThreadedFunc_BltIn_CheckTimersAndIrqs that checks for pending FFs
152 * and IRQs, and if it's only the latter whether we can dispatch them now.
153 */
154DECL_FORCE_INLINE(int) iemThreadedFunc_BltIn_CheckIrqCommon(PVMCPUCC pVCpu)
155{
156 /* Get and mask the per-CPU FFs.*/
157 uint64_t const fCpuRaw = pVCpu->fLocalForcedActions;
158 uint64_t fFlags = fCpuRaw & (VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
159 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
160 | VMCPU_FF_TLB_FLUSH
161 | VMCPU_FF_UNHALT ));
162
163 /* OR in VM-wide FFs and check them together. */
164 uint32_t const fVmRaw = pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions;
165 fFlags |= fVmRaw;
166 if (RT_LIKELY(!fFlags))
167 return VINF_SUCCESS;
168
169 /* Since the VMCPU_FF_INTERUPT_XXX flags was once upon a time in fVm and
170 we haven't reused the bits yet, we can still reliably check whether
171 we're only here for reasons of pending interrupts and whether these
172 are suppressed by EFLAGS.IF=0 or interrupt shadowing. */
173 Assert(!(fVmRaw & (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
174 AssertCompile((VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) == 3);
175 if ( fFlags <= (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
176 && ( !pVCpu->cpum.GstCtx.rflags.Bits.u1IF
177 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)))
178 return VINF_SUCCESS;
179
180 Log(("%04x:%08RX32: Pending IRQ and/or FF: fCpu=%#RX64 fVm=%#RX32 IF=%d\n",
181 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, fCpuRaw, fVmRaw, pVCpu->cpum.GstCtx.rflags.Bits.u1IF));
182 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckIrqBreaks);
183 return VINF_IEM_REEXEC_BREAK;
184}
185
186
187/**
188 * Built-in function that checks for pending interrupts that can be delivered or
189 * forced action flags.
190 *
191 * This triggers after the completion of an instruction, so EIP is already at
192 * the next instruction. If an IRQ or important FF is pending, this will return
193 * a non-zero status that stops TB execution.
194 */
195/** @todo add VMX / SVM variants of this. */
196IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckIrq)
197{
198 RT_NOREF(uParam0, uParam1, uParam2);
199 return iemThreadedFunc_BltIn_CheckIrqCommon(pVCpu);
200}
201
202
203/**
204 * Built-in function that works the cTbsTillNextTimerPoll counter on direct TB
205 * linking, like loop-jumps.
206 */
207IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckTimers)
208{
209 if (RT_LIKELY(--pVCpu->iem.s.cTbsTillNextTimerPoll > 0))
210 return VINF_SUCCESS;
211
212 Log(("%04x:%08RX32: Check timers\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip));
213 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckTimersBreaks);
214 RT_NOREF(uParam0, uParam1, uParam2);
215 return VINF_IEM_REEXEC_BREAK;
216}
217
218
219/**
220 * Combined BltIn_CheckTimers + BltIn_CheckIrq for direct linking.
221 */
222IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckTimersAndIrq)
223{
224 if (RT_LIKELY(--pVCpu->iem.s.cTbsTillNextTimerPoll > 0))
225 return iemThreadedFunc_BltIn_CheckIrqCommon(pVCpu);
226
227 Log(("%04x:%08RX32: Check timers\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip));
228 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckTimersBreaks);
229 RT_NOREF(uParam0, uParam1, uParam2);
230 return VINF_IEM_REEXEC_BREAK;
231}
232
233
234/**
235 * Built-in function that compares the fExec mask against uParam0.
236 *
237 * This is used both for IEM_CIMPL_F_MODE and IEM_CIMPL_F_VMEXIT after executing
238 * an instruction.
239 */
240IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckMode)
241{
242 uint32_t const fExpectedExec = (uint32_t)uParam0;
243 if ((pVCpu->iem.s.fExec & IEMTB_F_KEY_MASK) == (fExpectedExec & IEMTB_F_KEY_MASK))
244 return VINF_SUCCESS;
245 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x, xor-key: %#x)\n",
246 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fExpectedExec,
247 pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec, (fExpectedExec ^ pVCpu->iem.s.fExec) & IEMTB_F_KEY_MASK));
248 RT_NOREF(uParam1, uParam2);
249 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckModeBreaks);
250 return VINF_IEM_REEXEC_BREAK;
251}
252
253
254/**
255 * Built-in function that checks for hardware instruction breakpoints.
256 */
257IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckHwInstrBps)
258{
259 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
260 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base,
261 !(pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_SHADOW_SS)
262 || IEM_IS_GUEST_CPU_AMD(pVCpu));
263 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
264 return VINF_SUCCESS;
265
266 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
267 {
268 LogFlow(("Guest HW bp at %04x:%08RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
269 rcStrict = iemRaiseDebugException(pVCpu);
270 Assert(rcStrict != VINF_SUCCESS);
271 }
272 else
273 LogFlow(("VBoxDbg HW bp at %04x:%08RX64: %Rrc\n",
274 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict) ));
275 RT_NOREF(uParam0, uParam1, uParam2);
276 return rcStrict;
277}
278
279
280/**
281 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
282 * number of functions.
283 */
284/** @todo consider 32-bit EIP mid-instruction wrap-around... Difficult to
285 * test, since it would require replacing the default firmware. */
286#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
287 if (RT_LIKELY((uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr - 1U) <= pVCpu->cpum.GstCtx.cs.u32Limit)) \
288 { /* likely */ } \
289 else \
290 { \
291 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
292 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
293 return iemRaiseGeneralProtectionFault0(pVCpu); \
294 } \
295 } while(0)
296
297/**
298 * Macro that considers whether we need CS.LIM checking after a branch or
299 * crossing over to a new page.
300 */
301#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) do { \
302 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip; \
303 if (offFromLim >= GUEST_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK)) \
304 { /* likely */ } \
305 else \
306 { \
307 Log7(("TB need CS.LIM: %p at %04x:%08RX64 LB %u; #%u offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n", \
308 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), offFromLim, \
309 pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base, __LINE__)); \
310 RT_NOREF(a_pTb, a_cbInstr); \
311 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking); \
312 return VINF_IEM_REEXEC_BREAK; \
313 } \
314 } while(0)
315
316/**
317 * Macro that implements opcode (re-)checking.
318 */
319#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
320 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
321 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
322 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
323 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
324 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
325 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
326 { /* likely */ } \
327 else \
328 { \
329 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
330 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
331 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
332 RT_NOREF(a_cbInstr); \
333 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
334 } \
335 } while(0)
336
337/**
338 * Macro that implements TLB loading and updating pbInstrBuf updating for an
339 * instruction crossing into a new page.
340 *
341 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
342 */
343#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
344 pVCpu->iem.s.pbInstrBuf = NULL; \
345 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
346 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
347 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
348 \
349 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
350 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
351 && pVCpu->iem.s.pbInstrBuf)) \
352 { /* likely */ } \
353 else \
354 { \
355 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
356 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
357 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
358 RT_NOREF(a_cbInstr); \
359 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
360 } \
361 } while(0)
362
363/**
364 * Macro that implements TLB loading and updating pbInstrBuf updating when
365 * branching or when crossing a page on an instruction boundrary.
366 *
367 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
368 * it is an inter-page branch and also check the page offset.
369 *
370 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
371 */
372#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
373 /* Is RIP within the current code page? */ \
374 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
375 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
376 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
377 if (off < pVCpu->iem.s.cbInstrBufTotal) \
378 { \
379 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
380 Assert(pVCpu->iem.s.pbInstrBuf); \
381 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
382 | pTb->aRanges[(a_idxRange)].offPhysPage; \
383 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
384 { /* we're good */ } \
385 /** @todo r=bird: Not sure if we need the TB obsolete complication here. \
386 * If we're preceeded by an indirect jump, there is no reason why the TB \
387 * would be 'obsolete' just because this time around the indirect jump ends \
388 * up at the same offset in a different page. This would be real bad for \
389 * indirect trampolines/validators. */ \
390 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
391 { \
392 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
393 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
394 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
395 RT_NOREF(a_cbInstr); \
396 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
397 return VINF_IEM_REEXEC_BREAK; \
398 } \
399 else \
400 { \
401 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
402 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
403 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
404 RT_NOREF(a_cbInstr); \
405 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
406 } \
407 } \
408 else \
409 { \
410 /* Must translate new RIP. */ \
411 pVCpu->iem.s.pbInstrBuf = NULL; \
412 pVCpu->iem.s.offCurInstrStart = 0; \
413 pVCpu->iem.s.offInstrNextByte = 0; \
414 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
415 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
416 \
417 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
418 | pTb->aRanges[(a_idxRange)].offPhysPage; \
419 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
420 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
421 && pVCpu->iem.s.pbInstrBuf) \
422 { /* likely */ } \
423 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
424 && pVCpu->iem.s.pbInstrBuf) \
425 { \
426 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
427 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
428 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
429 RT_NOREF(a_cbInstr); \
430 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
431 return VINF_IEM_REEXEC_BREAK; \
432 } \
433 else \
434 { \
435 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
436 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
437 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
438 RT_NOREF(a_cbInstr); \
439 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
440 } \
441 } \
442 } while(0)
443
444/**
445 * Macro that implements PC check after a conditional branch.
446 */
447#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
448 /* Is RIP within the current code page? */ \
449 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
450 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
451 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
452 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
453 RTGCPHYS const GCPhysRangePageWithOffset = ( iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
454 | (a_pTb)->aRanges[(a_idxRange)].offPhysPage) \
455 + (a_offRange); \
456 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
457 && off < /*pVCpu->iem.s.cbInstrBufTotal - ignore flushes and CS.LIM is check elsewhere*/ X86_PAGE_SIZE) \
458 { /* we're good */ } \
459 else \
460 { \
461 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
462 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
463 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
464 RT_NOREF(a_cbInstr); \
465 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
466 return VINF_IEM_REEXEC_BREAK; \
467 } \
468 } while(0)
469
470
471
472/**
473 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
474 * raising a \#GP(0) if this isn't the case.
475 */
476IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLim)
477{
478 uint32_t const cbInstr = (uint8_t)uParam0;
479 RT_NOREF(uParam1, uParam2);
480 BODY_CHECK_CS_LIM(cbInstr);
481 return VINF_SUCCESS;
482}
483
484
485/**
486 * Built-in function for re-checking opcodes and CS.LIM after an instruction
487 * that may have modified them.
488 */
489IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes)
490{
491 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
492 uint32_t const cbInstr = (uint8_t)uParam0;
493 uint32_t const idxRange = (uint32_t)uParam1;
494 uint32_t const offRange = (uint32_t)uParam2;
495 BODY_CHECK_CS_LIM(cbInstr);
496 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
497 return VINF_SUCCESS;
498}
499
500
501/**
502 * Built-in function for re-checking opcodes after an instruction that may have
503 * modified them.
504 */
505IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodes)
506{
507 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
508 uint32_t const cbInstr = (uint8_t)uParam0;
509 uint32_t const idxRange = (uint32_t)uParam1;
510 uint32_t const offRange = (uint32_t)uParam2;
511 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
512 return VINF_SUCCESS;
513}
514
515
516/**
517 * Built-in function for re-checking opcodes and considering the need for CS.LIM
518 * checking after an instruction that may have modified them.
519 */
520IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim)
521{
522 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
523 uint32_t const cbInstr = (uint8_t)uParam0;
524 uint32_t const idxRange = (uint32_t)uParam1;
525 uint32_t const offRange = (uint32_t)uParam2;
526 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
527 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
528 return VINF_SUCCESS;
529}
530
531
532/*
533 * Post-branching checkers.
534 */
535
536/**
537 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
538 * after conditional branching within the same page.
539 *
540 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
541 */
542IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes)
543{
544 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
545 uint32_t const cbInstr = (uint8_t)uParam0;
546 uint32_t const idxRange = (uint32_t)uParam1;
547 uint32_t const offRange = (uint32_t)uParam2;
548 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
549 BODY_CHECK_CS_LIM(cbInstr);
550 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
551 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
552 //LogFunc(("okay\n"));
553 return VINF_SUCCESS;
554}
555
556
557/**
558 * Built-in function for checking the PC and checking opcodes after conditional
559 * branching within the same page.
560 *
561 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
562 */
563IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodes)
564{
565 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
566 uint32_t const cbInstr = (uint8_t)uParam0;
567 uint32_t const idxRange = (uint32_t)uParam1;
568 uint32_t const offRange = (uint32_t)uParam2;
569 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
570 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
571 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
572 //LogFunc(("okay\n"));
573 return VINF_SUCCESS;
574}
575
576
577/**
578 * Built-in function for checking the PC and checking opcodes and considering
579 * the need for CS.LIM checking after conditional branching within the same
580 * page.
581 *
582 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
583 */
584IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
585{
586 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
587 uint32_t const cbInstr = (uint8_t)uParam0;
588 uint32_t const idxRange = (uint32_t)uParam1;
589 uint32_t const offRange = (uint32_t)uParam2;
590 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
591 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
592 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
593 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
594 //LogFunc(("okay\n"));
595 return VINF_SUCCESS;
596}
597
598
599/**
600 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
601 * transitioning to a different code page.
602 *
603 * The code page transition can either be natural over onto the next page (with
604 * the instruction starting at page offset zero) or by means of branching.
605 *
606 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
607 */
608IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
609{
610 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
611 uint32_t const cbInstr = (uint8_t)uParam0;
612 uint32_t const idxRange = (uint32_t)uParam1;
613 uint32_t const offRange = (uint32_t)uParam2;
614 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
615 BODY_CHECK_CS_LIM(cbInstr);
616 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
617 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
618 //LogFunc(("okay\n"));
619 return VINF_SUCCESS;
620}
621
622
623/**
624 * Built-in function for loading TLB and checking opcodes when transitioning to
625 * a different code page.
626 *
627 * The code page transition can either be natural over onto the next page (with
628 * the instruction starting at page offset zero) or by means of branching.
629 *
630 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
631 */
632IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb)
633{
634 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
635 uint32_t const cbInstr = (uint8_t)uParam0;
636 uint32_t const idxRange = (uint32_t)uParam1;
637 uint32_t const offRange = (uint32_t)uParam2;
638 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
639 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
640 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
641 //LogFunc(("okay\n"));
642 return VINF_SUCCESS;
643}
644
645
646/**
647 * Built-in function for loading TLB and checking opcodes and considering the
648 * need for CS.LIM checking when transitioning to a different code page.
649 *
650 * The code page transition can either be natural over onto the next page (with
651 * the instruction starting at page offset zero) or by means of branching.
652 *
653 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
654 */
655IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
656{
657 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
658 uint32_t const cbInstr = (uint8_t)uParam0;
659 uint32_t const idxRange = (uint32_t)uParam1;
660 uint32_t const offRange = (uint32_t)uParam2;
661 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
662 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
663 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
664 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
665 //LogFunc(("okay\n"));
666 return VINF_SUCCESS;
667}
668
669
670
671/*
672 * Natural page crossing checkers.
673 */
674
675/**
676 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
677 * both pages when transitioning to a different code page.
678 *
679 * This is used when the previous instruction requires revalidation of opcodes
680 * bytes and the current instruction stries a page boundrary with opcode bytes
681 * in both the old and new page.
682 *
683 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
684 */
685IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
686{
687 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
688 uint32_t const cbInstr = (uint8_t)uParam0;
689 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
690 uint32_t const idxRange1 = (uint32_t)uParam1;
691 uint32_t const offRange1 = (uint32_t)uParam2;
692 uint32_t const idxRange2 = idxRange1 + 1;
693 BODY_CHECK_CS_LIM(cbInstr);
694 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
695 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
696 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
697 return VINF_SUCCESS;
698}
699
700
701/**
702 * Built-in function for loading TLB and checking opcodes on both pages when
703 * transitioning to a different code page.
704 *
705 * This is used when the previous instruction requires revalidation of opcodes
706 * bytes and the current instruction stries a page boundrary with opcode bytes
707 * in both the old and new page.
708 *
709 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
710 */
711IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
712{
713 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
714 uint32_t const cbInstr = (uint8_t)uParam0;
715 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
716 uint32_t const idxRange1 = (uint32_t)uParam1;
717 uint32_t const offRange1 = (uint32_t)uParam2;
718 uint32_t const idxRange2 = idxRange1 + 1;
719 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
720 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
721 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
722 return VINF_SUCCESS;
723}
724
725
726/**
727 * Built-in function for loading TLB and checking opcodes on both pages and
728 * considering the need for CS.LIM checking when transitioning to a different
729 * code page.
730 *
731 * This is used when the previous instruction requires revalidation of opcodes
732 * bytes and the current instruction stries a page boundrary with opcode bytes
733 * in both the old and new page.
734 *
735 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
736 */
737IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
738{
739 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
740 uint32_t const cbInstr = (uint8_t)uParam0;
741 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
742 uint32_t const idxRange1 = (uint32_t)uParam1;
743 uint32_t const offRange1 = (uint32_t)uParam2;
744 uint32_t const idxRange2 = idxRange1 + 1;
745 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
746 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
747 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
748 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
749 return VINF_SUCCESS;
750}
751
752
753/**
754 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
755 * advancing naturally to a different code page.
756 *
757 * Only opcodes on the new page is checked.
758 *
759 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
760 */
761IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
762{
763 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
764 uint32_t const cbInstr = (uint8_t)uParam0;
765 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
766 uint32_t const idxRange1 = (uint32_t)uParam1;
767 //uint32_t const offRange1 = (uint32_t)uParam2;
768 uint32_t const idxRange2 = idxRange1 + 1;
769 BODY_CHECK_CS_LIM(cbInstr);
770 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
771 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
772 RT_NOREF(uParam2);
773 return VINF_SUCCESS;
774}
775
776
777/**
778 * Built-in function for loading TLB and checking opcodes when advancing
779 * naturally to a different code page.
780 *
781 * Only opcodes on the new page is checked.
782 *
783 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
784 */
785IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
786{
787 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
788 uint32_t const cbInstr = (uint8_t)uParam0;
789 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
790 uint32_t const idxRange1 = (uint32_t)uParam1;
791 //uint32_t const offRange1 = (uint32_t)uParam2;
792 uint32_t const idxRange2 = idxRange1 + 1;
793 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
794 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
795 RT_NOREF(uParam2);
796 return VINF_SUCCESS;
797}
798
799
800/**
801 * Built-in function for loading TLB and checking opcodes and considering the
802 * need for CS.LIM checking when advancing naturally to a different code page.
803 *
804 * Only opcodes on the new page is checked.
805 *
806 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
807 */
808IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
809{
810 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
811 uint32_t const cbInstr = (uint8_t)uParam0;
812 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
813 uint32_t const idxRange1 = (uint32_t)uParam1;
814 //uint32_t const offRange1 = (uint32_t)uParam2;
815 uint32_t const idxRange2 = idxRange1 + 1;
816 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
817 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
818 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
819 RT_NOREF(uParam2);
820 return VINF_SUCCESS;
821}
822
823
824/**
825 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
826 * advancing naturally to a different code page with first instr at byte 0.
827 *
828 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
829 */
830IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
831{
832 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
833 uint32_t const cbInstr = (uint8_t)uParam0;
834 uint32_t const idxRange = (uint32_t)uParam1;
835 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
836 BODY_CHECK_CS_LIM(cbInstr);
837 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
838 Assert(pVCpu->iem.s.offCurInstrStart == 0);
839 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
840 return VINF_SUCCESS;
841}
842
843
844/**
845 * Built-in function for loading TLB and checking opcodes when advancing
846 * naturally to a different code page with first instr at byte 0.
847 *
848 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
849 */
850IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
851{
852 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
853 uint32_t const cbInstr = (uint8_t)uParam0;
854 uint32_t const idxRange = (uint32_t)uParam1;
855 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
856 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
857 Assert(pVCpu->iem.s.offCurInstrStart == 0);
858 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
859 return VINF_SUCCESS;
860}
861
862
863/**
864 * Built-in function for loading TLB and checking opcodes and considering the
865 * need for CS.LIM checking when advancing naturally to a different code page
866 * with first instr at byte 0.
867 *
868 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
869 */
870IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
871{
872 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
873 uint32_t const cbInstr = (uint8_t)uParam0;
874 uint32_t const idxRange = (uint32_t)uParam1;
875 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
876 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
877 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
878 Assert(pVCpu->iem.s.offCurInstrStart == 0);
879 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
880 return VINF_SUCCESS;
881}
882
883
884/**
885 * Built-in function for jumping in the call sequence.
886 */
887IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_Jump)
888{
889 Assert(uParam1 == 0 && uParam2 == 0);
890 RT_NOREF(pVCpu, uParam0, uParam1, uParam2);
891 return VINF_IEM_REEXEC_JUMP;
892}
893
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette