VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp@ 105231

Last change on this file since 105231 was 105072, checked in by vboxsync, 5 months ago

VMM/IEM,DBGF,bs3-cpu-weird-1: Early data breakpoint support, mostly untested except for the ring transition tests in bs3-cpu-weird-1. bugref:10715

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 34.2 KB
Line 
1/* $Id: IEMAllThrdFuncsBltIn.cpp 105072 2024-06-28 12:03:20Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.virtualbox.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72
73static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
74{
75 /* We set fSafeToFree to false where as we're being called in the context
76 of a TB callback function, which for native TBs means we cannot release
77 the executable memory till we've returned our way back to iemTbExec as
78 that return path codes via the native code generated for the TB. */
79 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/);
80 return VINF_IEM_REEXEC_BREAK;
81}
82
83
84/**
85 * Built-in function that does absolutely nothing - for debugging.
86 *
87 * This can be used for artifically increasing the number of calls generated, or
88 * for triggering flushes associated with threaded calls.
89 */
90IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_Nop)
91{
92 RT_NOREF(pVCpu, uParam0, uParam1, uParam2);
93 return VINF_SUCCESS;
94}
95
96
97
98/**
99 * This is also called from iemNativeHlpAsmSafeWrapLogCpuState.
100 */
101DECLASM(void) iemThreadedFunc_BltIn_LogCpuStateWorker(PVMCPU pVCpu)
102{
103#ifdef LOG_ENABLED
104 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
105 PCX86FXSTATE const pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
106 Log2(("**** LG%c fExec=%x pTb=%p cUsed=%u\n"
107 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
108 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
109 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
110 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
111 , pTb && (pTb->fFlags & IEMTB_F_TYPE_NATIVE) ? 'n' : 't', pVCpu->iem.s.fExec, pTb, pTb ? pTb->cUsed : 0,
112 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
113 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
114 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
115 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
116 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK ));
117#else
118 RT_NOREF(pVCpu);
119#endif
120}
121
122
123/**
124 * Built-in function that logs the current CPU state - for debugging.
125 */
126IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_LogCpuState)
127{
128 iemThreadedFunc_BltIn_LogCpuStateWorker(pVCpu);
129 RT_NOREF(uParam0, uParam1, uParam2);
130 return VINF_SUCCESS;
131}
132
133
134/**
135 * Built-in function that calls a C-implemention function taking zero arguments.
136 */
137IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_DeferToCImpl0)
138{
139 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
140 uint8_t const cbInstr = (uint8_t)uParam1;
141 RT_NOREF(uParam2);
142 return pfnCImpl(pVCpu, cbInstr);
143}
144
145
146/**
147 * Built-in function that checks for pending interrupts that can be delivered or
148 * forced action flags.
149 *
150 * This triggers after the completion of an instruction, so EIP is already at
151 * the next instruction. If an IRQ or important FF is pending, this will return
152 * a non-zero status that stops TB execution.
153 */
154IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckIrq)
155{
156 RT_NOREF(uParam0, uParam1, uParam2);
157
158 /*
159 * Check for IRQs and other FFs that needs servicing.
160 */
161 uint64_t fCpu = pVCpu->fLocalForcedActions;
162 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
163 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
164 | VMCPU_FF_TLB_FLUSH
165 | VMCPU_FF_UNHALT );
166 /** @todo this isn't even close to the NMI and interrupt conditions in EM! */
167 if (RT_LIKELY( ( !fCpu
168 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
169 && ( !pVCpu->cpum.GstCtx.rflags.Bits.u1IF
170 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) ) )
171 && !VM_FF_IS_ANY_SET(pVCpu->CTX_SUFF(pVM), VM_FF_ALL_MASK) ))
172 return VINF_SUCCESS;
173
174 Log(("%04x:%08RX32: Pending IRQ and/or FF: fCpu=%#RX64 fVm=%#RX32 IF=%d\n",
175 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, fCpu,
176 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions & VM_FF_ALL_MASK, pVCpu->cpum.GstCtx.rflags.Bits.u1IF));
177 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckIrqBreaks);
178 return VINF_IEM_REEXEC_BREAK;
179}
180
181
182/**
183 * Built-in function that compares the fExec mask against uParam0.
184 *
185 * This is used both for IEM_CIMPL_F_MODE and IEM_CIMPL_F_VMEXIT after executing
186 * an instruction.
187 */
188IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckMode)
189{
190 uint32_t const fExpectedExec = (uint32_t)uParam0;
191 if ((pVCpu->iem.s.fExec & IEMTB_F_KEY_MASK) == (fExpectedExec & IEMTB_F_KEY_MASK))
192 return VINF_SUCCESS;
193 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x, xor-key: %#x)\n",
194 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fExpectedExec,
195 pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec, (fExpectedExec ^ pVCpu->iem.s.fExec) & IEMTB_F_KEY_MASK));
196 RT_NOREF(uParam1, uParam2);
197 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckModeBreaks);
198 return VINF_IEM_REEXEC_BREAK;
199}
200
201
202/**
203 * Built-in function that checks for hardware instruction breakpoints.
204 */
205IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckHwInstrBps)
206{
207 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
208 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base,
209 !(pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_SHADOW_SS)
210 || IEM_IS_GUEST_CPU_AMD(pVCpu));
211 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
212 return VINF_SUCCESS;
213
214 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
215 {
216 LogFlow(("Guest HW bp at %04x:%08RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
217 rcStrict = iemRaiseDebugException(pVCpu);
218 Assert(rcStrict != VINF_SUCCESS);
219 }
220 else
221 LogFlow(("VBoxDbg HW bp at %04x:%08RX64: %Rrc\n",
222 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict) ));
223 RT_NOREF(uParam0, uParam1, uParam2);
224 return rcStrict;
225}
226
227
228/**
229 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
230 * number of functions.
231 */
232/** @todo consider 32-bit EIP mid-instruction wrap-around... Difficult to
233 * test, since it would require replacing the default firmware. */
234#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
235 if (RT_LIKELY((uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr - 1U) <= pVCpu->cpum.GstCtx.cs.u32Limit)) \
236 { /* likely */ } \
237 else \
238 { \
239 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
240 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
241 return iemRaiseGeneralProtectionFault0(pVCpu); \
242 } \
243 } while(0)
244
245/**
246 * Macro that considers whether we need CS.LIM checking after a branch or
247 * crossing over to a new page.
248 */
249#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) do { \
250 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip; \
251 if (offFromLim >= GUEST_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK)) \
252 { /* likely */ } \
253 else \
254 { \
255 Log7(("TB need CS.LIM: %p at %04x:%08RX64 LB %u; #%u offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n", \
256 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), offFromLim, \
257 pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base, __LINE__)); \
258 RT_NOREF(a_pTb, a_cbInstr); \
259 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking); \
260 return VINF_IEM_REEXEC_BREAK; \
261 } \
262 } while(0)
263
264/**
265 * Macro that implements opcode (re-)checking.
266 */
267#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
268 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
269 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
270 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
271 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
272 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
273 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
274 { /* likely */ } \
275 else \
276 { \
277 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
278 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
279 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
280 RT_NOREF(a_cbInstr); \
281 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
282 } \
283 } while(0)
284
285/**
286 * Macro that implements TLB loading and updating pbInstrBuf updating for an
287 * instruction crossing into a new page.
288 *
289 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
290 */
291#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
292 pVCpu->iem.s.pbInstrBuf = NULL; \
293 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
294 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
295 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
296 \
297 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
298 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
299 && pVCpu->iem.s.pbInstrBuf)) \
300 { /* likely */ } \
301 else \
302 { \
303 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
304 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
305 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
306 RT_NOREF(a_cbInstr); \
307 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
308 } \
309 } while(0)
310
311/**
312 * Macro that implements TLB loading and updating pbInstrBuf updating when
313 * branching or when crossing a page on an instruction boundrary.
314 *
315 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
316 * it is an inter-page branch and also check the page offset.
317 *
318 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
319 */
320#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
321 /* Is RIP within the current code page? */ \
322 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
323 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
324 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
325 if (off < pVCpu->iem.s.cbInstrBufTotal) \
326 { \
327 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
328 Assert(pVCpu->iem.s.pbInstrBuf); \
329 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
330 | pTb->aRanges[(a_idxRange)].offPhysPage; \
331 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
332 { /* we're good */ } \
333 /** @todo r=bird: Not sure if we need the TB obsolete complication here. \
334 * If we're preceeded by an indirect jump, there is no reason why the TB \
335 * would be 'obsolete' just because this time around the indirect jump ends \
336 * up at the same offset in a different page. This would be real bad for \
337 * indirect trampolines/validators. */ \
338 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
339 { \
340 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
341 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
342 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
343 RT_NOREF(a_cbInstr); \
344 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
345 return VINF_IEM_REEXEC_BREAK; \
346 } \
347 else \
348 { \
349 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
350 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
351 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
352 RT_NOREF(a_cbInstr); \
353 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
354 } \
355 } \
356 else \
357 { \
358 /* Must translate new RIP. */ \
359 pVCpu->iem.s.pbInstrBuf = NULL; \
360 pVCpu->iem.s.offCurInstrStart = 0; \
361 pVCpu->iem.s.offInstrNextByte = 0; \
362 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
363 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
364 \
365 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
366 | pTb->aRanges[(a_idxRange)].offPhysPage; \
367 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
368 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
369 && pVCpu->iem.s.pbInstrBuf) \
370 { /* likely */ } \
371 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
372 && pVCpu->iem.s.pbInstrBuf) \
373 { \
374 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
375 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
376 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
377 RT_NOREF(a_cbInstr); \
378 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
379 return VINF_IEM_REEXEC_BREAK; \
380 } \
381 else \
382 { \
383 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
384 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
385 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
386 RT_NOREF(a_cbInstr); \
387 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
388 } \
389 } \
390 } while(0)
391
392/**
393 * Macro that implements PC check after a conditional branch.
394 */
395#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
396 /* Is RIP within the current code page? */ \
397 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
398 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
399 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
400 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
401 RTGCPHYS const GCPhysRangePageWithOffset = ( iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
402 | (a_pTb)->aRanges[(a_idxRange)].offPhysPage) \
403 + (a_offRange); \
404 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
405 && off < /*pVCpu->iem.s.cbInstrBufTotal - ignore flushes and CS.LIM is check elsewhere*/ X86_PAGE_SIZE) \
406 { /* we're good */ } \
407 else \
408 { \
409 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
410 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
411 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
412 RT_NOREF(a_cbInstr); \
413 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
414 return VINF_IEM_REEXEC_BREAK; \
415 } \
416 } while(0)
417
418
419
420/**
421 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
422 * raising a \#GP(0) if this isn't the case.
423 */
424IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLim)
425{
426 uint32_t const cbInstr = (uint8_t)uParam0;
427 RT_NOREF(uParam1, uParam2);
428 BODY_CHECK_CS_LIM(cbInstr);
429 return VINF_SUCCESS;
430}
431
432
433/**
434 * Built-in function for re-checking opcodes and CS.LIM after an instruction
435 * that may have modified them.
436 */
437IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes)
438{
439 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
440 uint32_t const cbInstr = (uint8_t)uParam0;
441 uint32_t const idxRange = (uint32_t)uParam1;
442 uint32_t const offRange = (uint32_t)uParam2;
443 BODY_CHECK_CS_LIM(cbInstr);
444 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
445 return VINF_SUCCESS;
446}
447
448
449/**
450 * Built-in function for re-checking opcodes after an instruction that may have
451 * modified them.
452 */
453IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodes)
454{
455 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
456 uint32_t const cbInstr = (uint8_t)uParam0;
457 uint32_t const idxRange = (uint32_t)uParam1;
458 uint32_t const offRange = (uint32_t)uParam2;
459 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
460 return VINF_SUCCESS;
461}
462
463
464/**
465 * Built-in function for re-checking opcodes and considering the need for CS.LIM
466 * checking after an instruction that may have modified them.
467 */
468IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim)
469{
470 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
471 uint32_t const cbInstr = (uint8_t)uParam0;
472 uint32_t const idxRange = (uint32_t)uParam1;
473 uint32_t const offRange = (uint32_t)uParam2;
474 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
475 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
476 return VINF_SUCCESS;
477}
478
479
480/*
481 * Post-branching checkers.
482 */
483
484/**
485 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
486 * after conditional branching within the same page.
487 *
488 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
489 */
490IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes)
491{
492 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
493 uint32_t const cbInstr = (uint8_t)uParam0;
494 uint32_t const idxRange = (uint32_t)uParam1;
495 uint32_t const offRange = (uint32_t)uParam2;
496 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
497 BODY_CHECK_CS_LIM(cbInstr);
498 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
499 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
500 //LogFunc(("okay\n"));
501 return VINF_SUCCESS;
502}
503
504
505/**
506 * Built-in function for checking the PC and checking opcodes after conditional
507 * branching within the same page.
508 *
509 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
510 */
511IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodes)
512{
513 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
514 uint32_t const cbInstr = (uint8_t)uParam0;
515 uint32_t const idxRange = (uint32_t)uParam1;
516 uint32_t const offRange = (uint32_t)uParam2;
517 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
518 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
519 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
520 //LogFunc(("okay\n"));
521 return VINF_SUCCESS;
522}
523
524
525/**
526 * Built-in function for checking the PC and checking opcodes and considering
527 * the need for CS.LIM checking after conditional branching within the same
528 * page.
529 *
530 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
531 */
532IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
533{
534 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
535 uint32_t const cbInstr = (uint8_t)uParam0;
536 uint32_t const idxRange = (uint32_t)uParam1;
537 uint32_t const offRange = (uint32_t)uParam2;
538 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
539 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
540 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
541 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
542 //LogFunc(("okay\n"));
543 return VINF_SUCCESS;
544}
545
546
547/**
548 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
549 * transitioning to a different code page.
550 *
551 * The code page transition can either be natural over onto the next page (with
552 * the instruction starting at page offset zero) or by means of branching.
553 *
554 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
555 */
556IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
557{
558 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
559 uint32_t const cbInstr = (uint8_t)uParam0;
560 uint32_t const idxRange = (uint32_t)uParam1;
561 uint32_t const offRange = (uint32_t)uParam2;
562 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
563 BODY_CHECK_CS_LIM(cbInstr);
564 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
565 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
566 //LogFunc(("okay\n"));
567 return VINF_SUCCESS;
568}
569
570
571/**
572 * Built-in function for loading TLB and checking opcodes when transitioning to
573 * a different code page.
574 *
575 * The code page transition can either be natural over onto the next page (with
576 * the instruction starting at page offset zero) or by means of branching.
577 *
578 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
579 */
580IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb)
581{
582 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
583 uint32_t const cbInstr = (uint8_t)uParam0;
584 uint32_t const idxRange = (uint32_t)uParam1;
585 uint32_t const offRange = (uint32_t)uParam2;
586 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
587 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
588 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
589 //LogFunc(("okay\n"));
590 return VINF_SUCCESS;
591}
592
593
594/**
595 * Built-in function for loading TLB and checking opcodes and considering the
596 * need for CS.LIM checking when transitioning to a different code page.
597 *
598 * The code page transition can either be natural over onto the next page (with
599 * the instruction starting at page offset zero) or by means of branching.
600 *
601 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
602 */
603IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
604{
605 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
606 uint32_t const cbInstr = (uint8_t)uParam0;
607 uint32_t const idxRange = (uint32_t)uParam1;
608 uint32_t const offRange = (uint32_t)uParam2;
609 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
610 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
611 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
612 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
613 //LogFunc(("okay\n"));
614 return VINF_SUCCESS;
615}
616
617
618
619/*
620 * Natural page crossing checkers.
621 */
622
623/**
624 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
625 * both pages when transitioning to a different code page.
626 *
627 * This is used when the previous instruction requires revalidation of opcodes
628 * bytes and the current instruction stries a page boundrary with opcode bytes
629 * in both the old and new page.
630 *
631 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
632 */
633IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
634{
635 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
636 uint32_t const cbInstr = (uint8_t)uParam0;
637 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
638 uint32_t const idxRange1 = (uint32_t)uParam1;
639 uint32_t const offRange1 = (uint32_t)uParam2;
640 uint32_t const idxRange2 = idxRange1 + 1;
641 BODY_CHECK_CS_LIM(cbInstr);
642 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
643 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
644 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
645 return VINF_SUCCESS;
646}
647
648
649/**
650 * Built-in function for loading TLB and checking opcodes on both pages when
651 * transitioning to a different code page.
652 *
653 * This is used when the previous instruction requires revalidation of opcodes
654 * bytes and the current instruction stries a page boundrary with opcode bytes
655 * in both the old and new page.
656 *
657 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
658 */
659IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
660{
661 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
662 uint32_t const cbInstr = (uint8_t)uParam0;
663 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
664 uint32_t const idxRange1 = (uint32_t)uParam1;
665 uint32_t const offRange1 = (uint32_t)uParam2;
666 uint32_t const idxRange2 = idxRange1 + 1;
667 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
668 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
669 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
670 return VINF_SUCCESS;
671}
672
673
674/**
675 * Built-in function for loading TLB and checking opcodes on both pages and
676 * considering the need for CS.LIM checking when transitioning to a different
677 * code page.
678 *
679 * This is used when the previous instruction requires revalidation of opcodes
680 * bytes and the current instruction stries a page boundrary with opcode bytes
681 * in both the old and new page.
682 *
683 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
684 */
685IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
686{
687 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
688 uint32_t const cbInstr = (uint8_t)uParam0;
689 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
690 uint32_t const idxRange1 = (uint32_t)uParam1;
691 uint32_t const offRange1 = (uint32_t)uParam2;
692 uint32_t const idxRange2 = idxRange1 + 1;
693 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
694 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
695 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
696 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
697 return VINF_SUCCESS;
698}
699
700
701/**
702 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
703 * advancing naturally to a different code page.
704 *
705 * Only opcodes on the new page is checked.
706 *
707 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
708 */
709IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
710{
711 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
712 uint32_t const cbInstr = (uint8_t)uParam0;
713 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
714 uint32_t const idxRange1 = (uint32_t)uParam1;
715 //uint32_t const offRange1 = (uint32_t)uParam2;
716 uint32_t const idxRange2 = idxRange1 + 1;
717 BODY_CHECK_CS_LIM(cbInstr);
718 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
719 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
720 RT_NOREF(uParam2);
721 return VINF_SUCCESS;
722}
723
724
725/**
726 * Built-in function for loading TLB and checking opcodes when advancing
727 * naturally to a different code page.
728 *
729 * Only opcodes on the new page is checked.
730 *
731 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
732 */
733IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
734{
735 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
736 uint32_t const cbInstr = (uint8_t)uParam0;
737 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
738 uint32_t const idxRange1 = (uint32_t)uParam1;
739 //uint32_t const offRange1 = (uint32_t)uParam2;
740 uint32_t const idxRange2 = idxRange1 + 1;
741 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
742 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
743 RT_NOREF(uParam2);
744 return VINF_SUCCESS;
745}
746
747
748/**
749 * Built-in function for loading TLB and checking opcodes and considering the
750 * need for CS.LIM checking when advancing naturally to a different code page.
751 *
752 * Only opcodes on the new page is checked.
753 *
754 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
755 */
756IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
757{
758 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
759 uint32_t const cbInstr = (uint8_t)uParam0;
760 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
761 uint32_t const idxRange1 = (uint32_t)uParam1;
762 //uint32_t const offRange1 = (uint32_t)uParam2;
763 uint32_t const idxRange2 = idxRange1 + 1;
764 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
765 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
766 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
767 RT_NOREF(uParam2);
768 return VINF_SUCCESS;
769}
770
771
772/**
773 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
774 * advancing naturally to a different code page with first instr at byte 0.
775 *
776 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
777 */
778IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
779{
780 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
781 uint32_t const cbInstr = (uint8_t)uParam0;
782 uint32_t const idxRange = (uint32_t)uParam1;
783 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
784 BODY_CHECK_CS_LIM(cbInstr);
785 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
786 Assert(pVCpu->iem.s.offCurInstrStart == 0);
787 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
788 return VINF_SUCCESS;
789}
790
791
792/**
793 * Built-in function for loading TLB and checking opcodes when advancing
794 * naturally to a different code page with first instr at byte 0.
795 *
796 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
797 */
798IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
799{
800 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
801 uint32_t const cbInstr = (uint8_t)uParam0;
802 uint32_t const idxRange = (uint32_t)uParam1;
803 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
804 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
805 Assert(pVCpu->iem.s.offCurInstrStart == 0);
806 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
807 return VINF_SUCCESS;
808}
809
810
811/**
812 * Built-in function for loading TLB and checking opcodes and considering the
813 * need for CS.LIM checking when advancing naturally to a different code page
814 * with first instr at byte 0.
815 *
816 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
817 */
818IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
819{
820 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
821 uint32_t const cbInstr = (uint8_t)uParam0;
822 uint32_t const idxRange = (uint32_t)uParam1;
823 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
824 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
825 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
826 Assert(pVCpu->iem.s.offCurInstrStart == 0);
827 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
828 return VINF_SUCCESS;
829}
830
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette