VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllHlpFpu-x86.cpp

Last change on this file was 108260, checked in by vboxsync, 7 weeks ago

VMM/IEM: Splitting up IEMInline.h. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 27.7 KB
Line 
1/* $Id: IEMAllHlpFpu-x86.cpp 108260 2025-02-17 15:24:14Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - x86 target, FPU helpers.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/pgm.h>
41#include <VBox/vmm/tm.h>
42#include <VBox/vmm/dbgf.h>
43#include <VBox/vmm/dbgftrace.h>
44#include "IEMInternal.h"
45#include <VBox/vmm/vmcc.h>
46#include <VBox/log.h>
47#include <iprt/errcore.h>
48#include <iprt/assert.h>
49#include <iprt/string.h>
50#include <iprt/x86.h>
51
52#include "IEMInline-x86.h"
53
54
55/** @name FPU access and helpers.
56 *
57 * @{
58 */
59
60/**
61 * Updates the x87.DS and FPUDP registers.
62 *
63 * @param pVCpu The cross context virtual CPU structure of the calling thread.
64 * @param pFpuCtx The FPU context.
65 * @param iEffSeg The effective segment register.
66 * @param GCPtrEff The effective address relative to @a iEffSeg.
67 */
68DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
69{
70 RTSEL sel;
71 switch (iEffSeg)
72 {
73 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
74 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
75 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
76 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
77 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
78 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
79 default:
80 AssertMsgFailed(("%d\n", iEffSeg));
81 sel = pVCpu->cpum.GstCtx.ds.Sel;
82 }
83 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
84 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
85 {
86 pFpuCtx->DS = 0;
87 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
88 }
89 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
90 {
91 pFpuCtx->DS = sel;
92 pFpuCtx->FPUDP = GCPtrEff;
93 }
94 else
95 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
96}
97
98
99/**
100 * Rotates the stack registers in the push direction.
101 *
102 * @param pFpuCtx The FPU context.
103 * @remarks This is a complete waste of time, but fxsave stores the registers in
104 * stack order.
105 */
106DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
107{
108 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
109 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
110 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
111 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
112 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
113 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
114 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
115 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
116 pFpuCtx->aRegs[0].r80 = r80Tmp;
117}
118
119
120/**
121 * Rotates the stack registers in the pop direction.
122 *
123 * @param pFpuCtx The FPU context.
124 * @remarks This is a complete waste of time, but fxsave stores the registers in
125 * stack order.
126 */
127DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
128{
129 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
130 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
131 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
132 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
133 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
134 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
135 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
136 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
137 pFpuCtx->aRegs[7].r80 = r80Tmp;
138}
139
140
141/**
142 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
143 * exception prevents it.
144 *
145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
146 * @param pResult The FPU operation result to push.
147 * @param pFpuCtx The FPU context.
148 */
149static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
150{
151 /* Update FSW and bail if there are pending exceptions afterwards. */
152 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
153 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
154 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
155 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
156 {
157 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
158 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
159 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
160 pFpuCtx->FSW = fFsw;
161 return;
162 }
163
164 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
165 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
166 {
167 /* All is fine, push the actual value. */
168 pFpuCtx->FTW |= RT_BIT(iNewTop);
169 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
170 }
171 else if (pFpuCtx->FCW & X86_FCW_IM)
172 {
173 /* Masked stack overflow, push QNaN. */
174 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
175 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
176 }
177 else
178 {
179 /* Raise stack overflow, don't push anything. */
180 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
181 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
182 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
183 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
184 return;
185 }
186
187 fFsw &= ~X86_FSW_TOP_MASK;
188 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
189 pFpuCtx->FSW = fFsw;
190
191 iemFpuRotateStackPush(pFpuCtx);
192 RT_NOREF(pVCpu);
193}
194
195
196/**
197 * Stores a result in a FPU register and updates the FSW and FTW.
198 *
199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
200 * @param pFpuCtx The FPU context.
201 * @param pResult The result to store.
202 * @param iStReg Which FPU register to store it in.
203 */
204static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
205{
206 Assert(iStReg < 8);
207 uint16_t fNewFsw = pFpuCtx->FSW;
208 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
209 fNewFsw &= ~X86_FSW_C_MASK;
210 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
211 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
212 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
213 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
214 pFpuCtx->FSW = fNewFsw;
215 pFpuCtx->FTW |= RT_BIT(iReg);
216 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
217 RT_NOREF(pVCpu);
218}
219
220
221/**
222 * Only updates the FPU status word (FSW) with the result of the current
223 * instruction.
224 *
225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
226 * @param pFpuCtx The FPU context.
227 * @param u16FSW The FSW output of the current instruction.
228 */
229static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
230{
231 uint16_t fNewFsw = pFpuCtx->FSW;
232 fNewFsw &= ~X86_FSW_C_MASK;
233 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
234 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
235 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
236 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
237 pFpuCtx->FSW = fNewFsw;
238 RT_NOREF(pVCpu);
239}
240
241
242/**
243 * Pops one item off the FPU stack if no pending exception prevents it.
244 *
245 * @param pFpuCtx The FPU context.
246 */
247static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
248{
249 /* Check pending exceptions. */
250 uint16_t uFSW = pFpuCtx->FSW;
251 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
252 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
253 return;
254
255 /* TOP--. */
256 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
257 uFSW &= ~X86_FSW_TOP_MASK;
258 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
259 pFpuCtx->FSW = uFSW;
260
261 /* Mark the previous ST0 as empty. */
262 iOldTop >>= X86_FSW_TOP_SHIFT;
263 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
264
265 /* Rotate the registers. */
266 iemFpuRotateStackPop(pFpuCtx);
267}
268
269
270/**
271 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
272 *
273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
274 * @param pResult The FPU operation result to push.
275 * @param uFpuOpcode The FPU opcode value.
276 */
277void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
278{
279 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
280 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
281 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
282}
283
284
285/**
286 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
287 * and sets FPUDP and FPUDS.
288 *
289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
290 * @param pResult The FPU operation result to push.
291 * @param iEffSeg The effective segment register.
292 * @param GCPtrEff The effective address relative to @a iEffSeg.
293 * @param uFpuOpcode The FPU opcode value.
294 */
295void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
296 uint16_t uFpuOpcode) RT_NOEXCEPT
297{
298 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
299 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
300 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
301 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
302}
303
304
305/**
306 * Replace ST0 with the first value and push the second onto the FPU stack,
307 * unless a pending exception prevents it.
308 *
309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
310 * @param pResult The FPU operation result to store and push.
311 * @param uFpuOpcode The FPU opcode value.
312 */
313void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
314{
315 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
316 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
317
318 /* Update FSW and bail if there are pending exceptions afterwards. */
319 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
320 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
321 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
322 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
323 {
324 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
325 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
326 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
327 pFpuCtx->FSW = fFsw;
328 return;
329 }
330
331 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
332 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
333 {
334 /* All is fine, push the actual value. */
335 pFpuCtx->FTW |= RT_BIT(iNewTop);
336 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
337 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
338 }
339 else if (pFpuCtx->FCW & X86_FCW_IM)
340 {
341 /* Masked stack overflow, push QNaN. */
342 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
343 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
344 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
345 }
346 else
347 {
348 /* Raise stack overflow, don't push anything. */
349 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
350 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
351 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
352 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
353 return;
354 }
355
356 fFsw &= ~X86_FSW_TOP_MASK;
357 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
358 pFpuCtx->FSW = fFsw;
359
360 iemFpuRotateStackPush(pFpuCtx);
361}
362
363
364/**
365 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
366 * FOP.
367 *
368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
369 * @param pResult The result to store.
370 * @param iStReg Which FPU register to store it in.
371 * @param uFpuOpcode The FPU opcode value.
372 */
373void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
374{
375 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
376 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
377 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
378}
379
380
381/**
382 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
383 * FOP, and then pops the stack.
384 *
385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
386 * @param pResult The result to store.
387 * @param iStReg Which FPU register to store it in.
388 * @param uFpuOpcode The FPU opcode value.
389 */
390void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
391{
392 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
393 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
394 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
395 iemFpuMaybePopOne(pFpuCtx);
396}
397
398
399/**
400 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
401 * FPUDP, and FPUDS.
402 *
403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
404 * @param pResult The result to store.
405 * @param iStReg Which FPU register to store it in.
406 * @param iEffSeg The effective memory operand selector register.
407 * @param GCPtrEff The effective memory operand offset.
408 * @param uFpuOpcode The FPU opcode value.
409 */
410void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
411 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
412{
413 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
414 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
415 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
416 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
417}
418
419
420/**
421 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
422 * FPUDP, and FPUDS, and then pops the stack.
423 *
424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
425 * @param pResult The result to store.
426 * @param iStReg Which FPU register to store it in.
427 * @param iEffSeg The effective memory operand selector register.
428 * @param GCPtrEff The effective memory operand offset.
429 * @param uFpuOpcode The FPU opcode value.
430 */
431void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
432 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
433{
434 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
435 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
436 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
437 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
438 iemFpuMaybePopOne(pFpuCtx);
439}
440
441
442/**
443 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
444 *
445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
446 * @param uFpuOpcode The FPU opcode value.
447 */
448void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
449{
450 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
451 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
452}
453
454
455/**
456 * Updates the FSW, FOP, FPUIP, and FPUCS.
457 *
458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
459 * @param u16FSW The FSW from the current instruction.
460 * @param uFpuOpcode The FPU opcode value.
461 */
462void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
463{
464 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
465 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
466 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
467}
468
469
470/**
471 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
472 *
473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
474 * @param u16FSW The FSW from the current instruction.
475 * @param uFpuOpcode The FPU opcode value.
476 */
477void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
478{
479 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
480 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
481 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
482 iemFpuMaybePopOne(pFpuCtx);
483}
484
485
486/**
487 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
488 *
489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
490 * @param u16FSW The FSW from the current instruction.
491 * @param iEffSeg The effective memory operand selector register.
492 * @param GCPtrEff The effective memory operand offset.
493 * @param uFpuOpcode The FPU opcode value.
494 */
495void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
496{
497 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
498 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
499 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
500 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
501}
502
503
504/**
505 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
506 *
507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
508 * @param u16FSW The FSW from the current instruction.
509 * @param uFpuOpcode The FPU opcode value.
510 */
511void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
512{
513 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
514 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
515 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
516 iemFpuMaybePopOne(pFpuCtx);
517 iemFpuMaybePopOne(pFpuCtx);
518}
519
520
521/**
522 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
523 *
524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
525 * @param u16FSW The FSW from the current instruction.
526 * @param iEffSeg The effective memory operand selector register.
527 * @param GCPtrEff The effective memory operand offset.
528 * @param uFpuOpcode The FPU opcode value.
529 */
530void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
531 uint16_t uFpuOpcode) RT_NOEXCEPT
532{
533 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
534 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
535 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
536 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
537 iemFpuMaybePopOne(pFpuCtx);
538}
539
540
541/**
542 * Worker routine for raising an FPU stack underflow exception.
543 *
544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
545 * @param pFpuCtx The FPU context.
546 * @param iStReg The stack register being accessed.
547 */
548static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
549{
550 Assert(iStReg < 8 || iStReg == UINT8_MAX);
551 if (pFpuCtx->FCW & X86_FCW_IM)
552 {
553 /* Masked underflow. */
554 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
555 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
556 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
557 if (iStReg != UINT8_MAX)
558 {
559 pFpuCtx->FTW |= RT_BIT(iReg);
560 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
561 }
562 }
563 else
564 {
565 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
566 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
567 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
568 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
569 }
570 RT_NOREF(pVCpu);
571}
572
573
574/**
575 * Raises a FPU stack underflow exception.
576 *
577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
578 * @param iStReg The destination register that should be loaded
579 * with QNaN if \#IS is not masked. Specify
580 * UINT8_MAX if none (like for fcom).
581 * @param uFpuOpcode The FPU opcode value.
582 */
583void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
584{
585 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
586 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
587 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
588}
589
590
591void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
592{
593 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
594 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
595 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
596 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
597}
598
599
600void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
601{
602 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
603 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
604 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
605 iemFpuMaybePopOne(pFpuCtx);
606}
607
608
609void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
610 uint16_t uFpuOpcode) RT_NOEXCEPT
611{
612 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
613 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
614 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
615 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
616 iemFpuMaybePopOne(pFpuCtx);
617}
618
619
620void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
621{
622 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
623 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
624 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
625 iemFpuMaybePopOne(pFpuCtx);
626 iemFpuMaybePopOne(pFpuCtx);
627}
628
629
630void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
631{
632 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
633 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
634
635 if (pFpuCtx->FCW & X86_FCW_IM)
636 {
637 /* Masked overflow - Push QNaN. */
638 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
639 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
640 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
641 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
642 pFpuCtx->FTW |= RT_BIT(iNewTop);
643 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
644 iemFpuRotateStackPush(pFpuCtx);
645 }
646 else
647 {
648 /* Exception pending - don't change TOP or the register stack. */
649 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
650 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
651 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
652 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
653 }
654}
655
656
657void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
658{
659 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
660 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
661
662 if (pFpuCtx->FCW & X86_FCW_IM)
663 {
664 /* Masked overflow - Push QNaN. */
665 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
666 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
667 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
668 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
669 pFpuCtx->FTW |= RT_BIT(iNewTop);
670 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
671 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
672 iemFpuRotateStackPush(pFpuCtx);
673 }
674 else
675 {
676 /* Exception pending - don't change TOP or the register stack. */
677 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
678 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
679 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
680 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
681 }
682}
683
684
685/**
686 * Worker routine for raising an FPU stack overflow exception on a push.
687 *
688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
689 * @param pFpuCtx The FPU context.
690 */
691static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
692{
693 if (pFpuCtx->FCW & X86_FCW_IM)
694 {
695 /* Masked overflow. */
696 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
697 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
698 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
699 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
700 pFpuCtx->FTW |= RT_BIT(iNewTop);
701 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
702 iemFpuRotateStackPush(pFpuCtx);
703 }
704 else
705 {
706 /* Exception pending - don't change TOP or the register stack. */
707 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
708 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
709 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
710 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
711 }
712 RT_NOREF(pVCpu);
713}
714
715
716/**
717 * Raises a FPU stack overflow exception on a push.
718 *
719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
720 * @param uFpuOpcode The FPU opcode value.
721 */
722void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
723{
724 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
725 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
726 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
727}
728
729
730/**
731 * Raises a FPU stack overflow exception on a push with a memory operand.
732 *
733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
734 * @param iEffSeg The effective memory operand selector register.
735 * @param GCPtrEff The effective memory operand offset.
736 * @param uFpuOpcode The FPU opcode value.
737 */
738void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
739{
740 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
741 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
742 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
743 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
744}
745
746/** @} */
747
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette