Changeset 36829 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Apr 24, 2011 1:45:25 PM (14 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r36828 r36829 45 45 * Header Files * 46 46 *******************************************************************************/ 47 //#define RT_STRICT48 //#define LOG_ENABLED49 47 #define LOG_GROUP LOG_GROUP_EM /** @todo add log group */ 50 48 #include <VBox/vmm/iem.h> … … 192 190 /** Temporary hack to disable the double execution. Will be removed in favor 193 191 * of a dedicated execution mode in EM. */ 194 #define IEM_VERIFICATION_MODE_NO_REM192 //#define IEM_VERIFICATION_MODE_NO_REM 195 193 196 194 /** Used to shut up GCC warnings about variables that 'may be used uninitialized' … … 516 514 static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess); 517 515 static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc); 518 #if defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_NO_REM)516 #ifdef IEM_VERIFICATION_MODE 519 517 static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu); 520 518 static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue); … … 1333 1331 typedef int ignore_semicolon 1334 1332 1333 /** Stubs an opcode. */ 1334 #define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \ 1335 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \ 1336 { \ 1337 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \ 1338 iemOpStubMsg2(pIemCpu); \ 1339 RTAssertPanic(); \ 1340 return VERR_NOT_IMPLEMENTED; \ 1341 } \ 1342 typedef int ignore_semicolon 1343 1335 1344 1336 1345 … … 2167 2176 static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem) 2168 2177 { 2169 #if defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_NO_REM)2178 #ifdef IEM_VERIFICATION_MODE 2170 2179 /* Force the alternative path so we can ignore writes. */ 2171 if ( fAccess & IEM_ACCESS_TYPE_WRITE)2180 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem) 2172 2181 return VERR_PGM_PHYS_TLB_CATCH_ALL; 2173 2182 #endif … … 2251 2260 */ 2252 2261 int rc; 2253 #if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) /* No memory changes in verification mode. */ 2254 if (!pIemCpu->aMemBbMappings[iMemMap].fUnassigned) 2262 if (!pIemCpu->aMemBbMappings[iMemMap].fUnassigned && IEM_VERIFICATION_ENABLED(pIemCpu)) 2255 2263 { 2256 2264 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst; … … 2283 2291 } 2284 2292 else 2285 #endif2286 2293 rc = VINF_SUCCESS; 2287 2294 2288 #if defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_NO_REM)2295 #ifdef IEM_VERIFICATION_MODE 2289 2296 /* 2290 2297 * Record the write(s). 2291 2298 */ 2292 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu); 2293 if (pEvtRec) 2294 { 2295 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE; 2296 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst; 2297 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst; 2298 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst); 2299 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext; 2300 *pIemCpu->ppIemEvtRecNext = pEvtRec; 2301 } 2302 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond) 2303 { 2304 pEvtRec = iemVerifyAllocRecord(pIemCpu); 2299 if (!pIemCpu->fNoRem) 2300 { 2301 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu); 2305 2302 if (pEvtRec) 2306 2303 { 2307 2304 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE; 2308 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond; 2309 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond; 2310 memcpy(pEvtRec->u.RamWrite.ab, 2311 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst], 2312 pIemCpu->aMemBbMappings[iMemMap].cbSecond); 2305 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst; 2306 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst; 2307 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst); 2313 2308 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext; 2314 2309 *pIemCpu->ppIemEvtRecNext = pEvtRec; 2310 } 2311 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond) 2312 { 2313 pEvtRec = iemVerifyAllocRecord(pIemCpu); 2314 if (pEvtRec) 2315 { 2316 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE; 2317 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond; 2318 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond; 2319 memcpy(pEvtRec->u.RamWrite.ab, 2320 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst], 2321 pIemCpu->aMemBbMappings[iMemMap].cbSecond); 2322 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext; 2323 *pIemCpu->ppIemEvtRecNext = pEvtRec; 2324 } 2315 2325 } 2316 2326 } … … 2376 2386 } 2377 2387 2378 #if defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_NO_REM) 2379 /* 2380 * Record the reads. 2381 */ 2382 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu); 2383 if (pEvtRec) 2384 { 2385 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ; 2386 pEvtRec->u.RamRead.GCPhys = GCPhysFirst; 2387 pEvtRec->u.RamRead.cb = cbFirstPage; 2388 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext; 2389 *pIemCpu->ppIemEvtRecNext = pEvtRec; 2390 } 2391 pEvtRec = iemVerifyAllocRecord(pIemCpu); 2392 if (pEvtRec) 2393 { 2394 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ; 2395 pEvtRec->u.RamRead.GCPhys = GCPhysSecond; 2396 pEvtRec->u.RamRead.cb = cbSecondPage; 2397 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext; 2398 *pIemCpu->ppIemEvtRecNext = pEvtRec; 2388 #ifdef IEM_VERIFICATION_MODE 2389 if (!pIemCpu->fNoRem) 2390 { 2391 /* 2392 * Record the reads. 2393 */ 2394 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu); 2395 if (pEvtRec) 2396 { 2397 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ; 2398 pEvtRec->u.RamRead.GCPhys = GCPhysFirst; 2399 pEvtRec->u.RamRead.cb = cbFirstPage; 2400 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext; 2401 *pIemCpu->ppIemEvtRecNext = pEvtRec; 2402 } 2403 pEvtRec = iemVerifyAllocRecord(pIemCpu); 2404 if (pEvtRec) 2405 { 2406 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ; 2407 pEvtRec->u.RamRead.GCPhys = GCPhysSecond; 2408 pEvtRec->u.RamRead.cb = cbSecondPage; 2409 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext; 2410 *pIemCpu->ppIemEvtRecNext = pEvtRec; 2411 } 2399 2412 } 2400 2413 #endif … … 2463 2476 } 2464 2477 2465 #if defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_NO_REM) 2466 /* 2467 * Record the read. 2468 */ 2469 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu); 2470 if (pEvtRec) 2471 { 2472 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ; 2473 pEvtRec->u.RamRead.GCPhys = GCPhysFirst; 2474 pEvtRec->u.RamRead.cb = cbMem; 2475 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext; 2476 *pIemCpu->ppIemEvtRecNext = pEvtRec; 2478 #ifdef IEM_VERIFICATION_MODE 2479 if (!pIemCpu->fNoRem) 2480 { 2481 /* 2482 * Record the read. 2483 */ 2484 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu); 2485 if (pEvtRec) 2486 { 2487 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ; 2488 pEvtRec->u.RamRead.GCPhys = GCPhysFirst; 2489 pEvtRec->u.RamRead.cb = cbMem; 2490 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext; 2491 *pIemCpu->ppIemEvtRecNext = pEvtRec; 2492 } 2477 2493 } 2478 2494 #endif … … 4988 5004 if (rcStrict == VINF_SUCCESS) 4989 5005 { 4990 #if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 4991 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit); 4992 #else 4993 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 4994 pCtx->gdtr.cbGdt = cbLimit; 4995 pCtx->gdtr.pGdt = GCPtrBase; 4996 #endif 5006 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 5007 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit); 5008 else 5009 { 5010 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5011 pCtx->gdtr.cbGdt = cbLimit; 5012 pCtx->gdtr.pGdt = GCPtrBase; 5013 } 4997 5014 if (rcStrict == VINF_SUCCESS) 4998 5015 iemRegAddToRip(pIemCpu, cbInstr); … … 5023 5040 if (rcStrict == VINF_SUCCESS) 5024 5041 { 5025 #if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 5026 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit); 5027 #else 5028 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5029 pCtx->idtr.cbIdt = cbLimit; 5030 pCtx->idtr.pIdt = GCPtrBase; 5031 #endif 5042 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 5043 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit); 5044 else 5045 { 5046 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5047 pCtx->idtr.cbIdt = cbLimit; 5048 pCtx->idtr.pIdt = GCPtrBase; 5049 } 5032 5050 if (rcStrict == VINF_SUCCESS) 5033 5051 iemRegAddToRip(pIemCpu, cbInstr); … … 5059 5077 case 4: crX = pCtx->cr4; break; 5060 5078 case 8: 5061 #if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 5062 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */ 5063 #else 5064 crX = 0xff; 5065 #endif 5079 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 5080 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */ 5081 else 5082 crX = 0xff; 5066 5083 break; 5067 5084 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */ … … 5166 5183 * Change CR0. 5167 5184 */ 5168 #if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 5169 rc = CPUMSetGuestCR0(pVCpu, NewCrX); 5170 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3); 5171 #else 5172 pCtx->cr0 = NewCrX; 5173 #endif 5185 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 5186 { 5187 rc = CPUMSetGuestCR0(pVCpu, NewCrX); 5188 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3); 5189 } 5190 else 5191 pCtx->cr0 = NewCrX; 5174 5192 Assert(pCtx->cr0 == NewCrX); 5175 5193 … … 5186 5204 NewEFER &= ~MSR_K6_EFER_LME; 5187 5205 5188 #if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 5189 CPUMSetGuestEFER(pVCpu, NewEFER); 5190 #else 5191 pCtx->msrEFER = NewEFER; 5192 #endif 5206 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 5207 CPUMSetGuestEFER(pVCpu, NewEFER); 5208 else 5209 pCtx->msrEFER = NewEFER; 5193 5210 Assert(pCtx->msrEFER == NewEFER); 5194 5211 } 5195 5212 5196 #if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)5197 5213 /* 5198 5214 * Inform PGM. 5199 5215 */ 5200 if ( (NewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) 5201 != (OldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) ) 5216 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 5202 5217 { 5203 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */); 5204 AssertRCReturn(rc, rc); 5205 /* ignore informational status codes */ 5218 if ( (NewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) 5219 != (OldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) ) 5220 { 5221 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */); 5222 AssertRCReturn(rc, rc); 5223 /* ignore informational status codes */ 5224 } 5225 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER); 5226 /** @todo Status code management. */ 5206 5227 } 5207 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER); 5208 /** @todo Status code management. */ 5209 #else 5210 rcStrict = VINF_SUCCESS; 5211 #endif 5228 else 5229 rcStrict = VINF_SUCCESS; 5212 5230 break; 5213 5231 } … … 5256 5274 5257 5275 /* Make the change. */ 5258 #if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)5259 rc = CPUMSetGuestCR3(pVCpu, NewCrX);5260 AssertRCSuccessReturn(rc, rc);5261 #else 5262 pCtx->cr3 = NewCrX;5263 #endif 5264 5265 #if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 5276 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 5277 { 5278 rc = CPUMSetGuestCR3(pVCpu, NewCrX); 5279 AssertRCSuccessReturn(rc, rc); 5280 } 5281 else 5282 pCtx->cr3 = NewCrX; 5283 5266 5284 /* Inform PGM. */ 5267 if ( pCtx->cr0 & X86_CR0_PG)5285 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 5268 5286 { 5269 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE)); 5270 AssertRCReturn(rc, rc); 5271 /* ignore informational status codes */ 5272 /** @todo status code management */ 5287 if (pCtx->cr0 & X86_CR0_PG) 5288 { 5289 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE)); 5290 AssertRCReturn(rc, rc); 5291 /* ignore informational status codes */ 5292 /** @todo status code management */ 5293 } 5273 5294 } 5274 #endif5275 5295 rcStrict = VINF_SUCCESS; 5276 5296 break; … … 5315 5335 * Change it. 5316 5336 */ 5317 #if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 5318 rc = CPUMSetGuestCR4(pVCpu, NewCrX); 5319 AssertRCSuccessReturn(rc, rc); 5320 #else 5321 pCtx->cr4 = NewCrX; 5322 #endif 5337 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 5338 { 5339 rc = CPUMSetGuestCR4(pVCpu, NewCrX); 5340 AssertRCSuccessReturn(rc, rc); 5341 } 5342 else 5343 pCtx->cr4 = NewCrX; 5323 5344 Assert(pCtx->cr4 == NewCrX); 5324 5345 … … 5326 5347 * Notify SELM and PGM. 5327 5348 */ 5328 #if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 5329 /* SELM - VME may change things wrt to the TSS shadowing. */ 5330 if ((NewCrX ^ OldCrX) & X86_CR4_VME) 5331 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 5332 5333 /* PGM - flushing and mode. */ 5334 if ( (NewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) 5335 != (OldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) ) 5349 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 5336 5350 { 5337 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */); 5338 AssertRCReturn(rc, rc); 5339 /* ignore informational status codes */ 5351 /* SELM - VME may change things wrt to the TSS shadowing. */ 5352 if ((NewCrX ^ OldCrX) & X86_CR4_VME) 5353 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 5354 5355 /* PGM - flushing and mode. */ 5356 if ( (NewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) 5357 != (OldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) ) 5358 { 5359 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */); 5360 AssertRCReturn(rc, rc); 5361 /* ignore informational status codes */ 5362 } 5363 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER); 5364 /** @todo Status code management. */ 5340 5365 } 5341 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER); 5342 /** @todo Status code management. */ 5343 #else 5344 rcStrict = VINF_SUCCESS; 5345 #endif 5366 else 5367 rcStrict = VINF_SUCCESS; 5346 5368 break; 5347 5369 } … … 5351 5373 */ 5352 5374 case 8: 5353 #if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 5354 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */ 5355 #else 5356 rcStrict = VINF_SUCCESS; 5357 #endif 5375 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 5376 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */ 5377 else 5378 rcStrict = VINF_SUCCESS; 5358 5379 break; 5359 5380 … … 5392 5413 */ 5393 5414 uint32_t u32Value; 5394 #if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 5395 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg); 5396 #else 5397 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg); 5398 #endif 5415 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 5416 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg); 5417 else 5418 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg); 5399 5419 if (IOM_SUCCESS(rcStrict)) 5400 5420 { … … 5457 5477 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3); 5458 5478 } 5459 # if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 5460 VBOXSTRICTRC rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);5461 # else 5462 VBOXSTRICTRC rc = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);5463 # endif 5479 VBOXSTRICTRC rc; 5480 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 5481 rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg); 5482 else 5483 rc = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg); 5464 5484 if (IOM_SUCCESS(rc)) 5465 5485 { … … 6310 6330 { 6311 6331 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx); 6312 6313 # ifndef IEM_VERIFICATION_MODE_NO_REM 6332 pIemCpu->fNoRem = !LogIsEnabled(); /* logging triggers the no-rem/rem verification stuff */ 6333 6314 6334 /* 6315 6335 * Switch state. 6316 6336 */ 6317 static CPUMCTX s_DebugCtx; /* Ugly! */ 6318 6319 s_DebugCtx = *pOrgCtx; 6320 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx; 6321 # endif 6337 if (!IEM_VERIFICATION_ENABLED(pIemCpu)) 6338 { 6339 static CPUMCTX s_DebugCtx; /* Ugly! */ 6340 6341 s_DebugCtx = *pOrgCtx; 6342 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx; 6343 } 6322 6344 6323 6345 /* … … 6332 6354 Log(("Injecting trap %#x\n", TRPMGetTrapNo(pVCpu))); 6333 6355 iemCImpl_int(pIemCpu, 0, TRPMGetTrapNo(pVCpu), false); 6334 # ifdef IEM_VERIFICATION_MODE_NO_REM 6335 TRPMResetTrap(pVCpu); 6336 # endif 6356 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 6357 TRPMResetTrap(pVCpu); 6337 6358 } 6338 6359 … … 6345 6366 pIemCpu->fShiftOfHack= false; 6346 6367 6347 # ifndef IEM_VERIFICATION_MODE_NO_REM 6348 /*6349 * Free all verification records.6350 */6351 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;6352 pIemCpu->pIemEvtRecHead = NULL;6353 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;6354 do6355 {6356 while (pEvtRec)6357 {6358 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;6359 pEvtRec->pNext = pIemCpu->pFreeEvtRec;6360 pIemCpu->pFreeEvtRec = pEvtRec;6361 pEvtRec = pNext;6362 }6363 pEvtRec = pIemCpu->pOtherEvtRecHead;6364 pIemCpu->pOtherEvtRecHead = NULL;6365 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;6366 } while (pEvtRec);6367 # endif 6368 }6369 6370 6371 # ifndef IEM_VERIFICATION_MODE_NO_REM 6368 if (!IEM_VERIFICATION_ENABLED(pIemCpu)) 6369 { 6370 /* 6371 * Free all verification records. 6372 */ 6373 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead; 6374 pIemCpu->pIemEvtRecHead = NULL; 6375 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead; 6376 do 6377 { 6378 while (pEvtRec) 6379 { 6380 PIEMVERIFYEVTREC pNext = pEvtRec->pNext; 6381 pEvtRec->pNext = pIemCpu->pFreeEvtRec; 6382 pIemCpu->pFreeEvtRec = pEvtRec; 6383 pEvtRec = pNext; 6384 } 6385 pEvtRec = pIemCpu->pOtherEvtRecHead; 6386 pIemCpu->pOtherEvtRecHead = NULL; 6387 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead; 6388 } while (pEvtRec); 6389 } 6390 } 6391 6392 6372 6393 /** 6373 6394 * Allocate an event record. … … 6376 6397 static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu) 6377 6398 { 6399 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 6400 return NULL; 6401 6378 6402 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec; 6379 6403 if (pEvtRec) … … 6392 6416 return pEvtRec; 6393 6417 } 6394 # endif6395 6418 6396 6419 … … 6400 6423 VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue) 6401 6424 { 6402 # ifndef IEM_VERIFICATION_MODE_NO_REM6403 6425 PVMCPU pVCpu = VMMGetCpu(pVM); 6404 6426 if (!pVCpu) … … 6413 6435 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext; 6414 6436 *pIemCpu->ppOtherEvtRecNext = pEvtRec; 6415 # endif6416 6437 } 6417 6438 … … 6422 6443 VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue) 6423 6444 { 6424 # ifndef IEM_VERIFICATION_MODE_NO_REM6425 6445 PVMCPU pVCpu = VMMGetCpu(pVM); 6426 6446 if (!pVCpu) … … 6439 6459 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext; 6440 6460 *pIemCpu->ppOtherEvtRecNext = pEvtRec; 6441 # endif6442 6461 } 6443 6462 … … 6448 6467 VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue) 6449 6468 { 6450 # ifndef IEM_VERIFICATION_MODE_NO_REM6451 6469 PVMCPU pVCpu = VMMGetCpu(pVM); 6452 6470 if (!pVCpu) … … 6461 6479 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext; 6462 6480 *pIemCpu->ppOtherEvtRecNext = pEvtRec; 6463 # endif6464 6481 } 6465 6482 … … 6469 6486 VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue) 6470 6487 { 6471 # ifndef IEM_VERIFICATION_MODE_NO_REM6472 6488 PVMCPU pVCpu = VMMGetCpu(pVM); 6473 6489 if (!pVCpu) … … 6483 6499 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext; 6484 6500 *pIemCpu->ppOtherEvtRecNext = pEvtRec; 6485 # endif6486 6501 } 6487 6502 … … 6498 6513 } 6499 6514 6500 # ifndef IEM_VERIFICATION_MODE_NO_REM6501 6515 6502 6516 /** … … 6596 6610 * a record dump attached. 6597 6611 * 6612 * @param pIemCpu The IEM per CPU data. 6598 6613 * @param pEvtRec1 The first record. 6599 6614 * @param pEvtRec2 The second record. 6600 6615 * @param pszMsg The message explaining why we're asserting. 6601 6616 */ 6602 static void iemVerifyAssertRecords(PIEM VERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)6617 static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg) 6603 6618 { 6604 6619 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__); 6605 6620 iemVerifyAssertAddRecordDump(pEvtRec1); 6606 6621 iemVerifyAssertAddRecordDump(pEvtRec2); 6622 iemOpStubMsg2(pIemCpu); 6607 6623 RTAssertPanic(); 6608 6624 } … … 6613 6629 * a record dump attached. 6614 6630 * 6631 * @param pIemCpu The IEM per CPU data. 6615 6632 * @param pEvtRec1 The first record. 6616 6633 * @param pszMsg The message explaining why we're asserting. 6617 6634 */ 6618 static void iemVerifyAssertRecord(PIEM VERIFYEVTREC pEvtRec, const char *pszMsg)6635 static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg) 6619 6636 { 6620 6637 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__); 6621 6638 iemVerifyAssertAddRecordDump(pEvtRec); 6639 iemOpStubMsg2(pIemCpu); 6622 6640 RTAssertPanic(); 6623 6641 } … … 6651 6669 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab); 6652 6670 iemVerifyAssertAddRecordDump(pEvtRec); 6671 iemOpStubMsg2(pIemCpu); 6653 6672 RTAssertPanic(); 6654 6673 } … … 6657 6676 } 6658 6677 6659 # endif /* !IEM_VERIFICATION_MODE_NO_REM */6660 6661 6678 /** 6662 6679 * Performs the post-execution verfication checks. … … 6664 6681 static void iemExecVerificationModeCheck(PIEMCPU pIemCpu) 6665 6682 { 6666 # if defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_NO_REM) 6683 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 6684 return; 6685 6667 6686 /* 6668 6687 * Switch back the state. … … 6829 6848 6830 6849 if (cDiffs != 0) 6831 AssertFailed(); 6850 { 6851 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); 6852 iemOpStubMsg2(pIemCpu); 6853 RTAssertPanic(); 6854 } 6832 6855 # undef CHECK_FIELD 6833 6856 # undef CHECK_BIT_FIELD … … 6862 6885 if (pIemRec->enmEvent != pOtherRec->enmEvent) 6863 6886 { 6864 iemVerifyAssertRecords(pIem Rec, pOtherRec, "Type mismatches");6887 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches"); 6865 6888 break; 6866 6889 } … … 6892 6915 if (!fEquals) 6893 6916 { 6894 iemVerifyAssertRecords(pIem Rec, pOtherRec, "Mismatch");6917 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch"); 6895 6918 break; 6896 6919 } … … 6909 6932 } 6910 6933 if (pIemRec != NULL) 6911 iemVerifyAssertRecord(pIem Rec, "Extra IEM record!");6934 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!"); 6912 6935 else if (pOtherRec != NULL) 6913 iemVerifyAssertRecord(pIem Rec, "Extra Other record!");6936 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!"); 6914 6937 } 6915 6938 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx; 6916 # endif6917 6939 } 6918 6940 … … 6935 6957 #ifdef LOG_ENABLED 6936 6958 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 6937 if ( 0)//LogIs2Enabled())6959 if (LogIs2Enabled()) 6938 6960 { 6939 6961 char szInstr[256]; -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
r36794 r36829 54 54 55 55 56 /** 57 * Implements 'REPE CMPS'. 58 */ 59 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg) 60 { 61 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 62 63 /* 64 * Setup. 65 */ 66 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; 67 if (uCounterReg == 0) 68 return VINF_SUCCESS; 69 70 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg); 71 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg); 72 if (rcStrict != VINF_SUCCESS) 73 return rcStrict; 74 75 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES); 76 if (rcStrict != VINF_SUCCESS) 77 return rcStrict; 78 79 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8); 80 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI; 81 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI; 82 uint32_t uEFlags = pCtx->eflags.u; 83 84 /* 85 * The loop. 86 */ 87 do 88 { 89 /* 90 * Do segmentation and virtual page stuff. 91 */ 92 #if ADDR_SIZE != 64 93 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg; 94 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->esHid.u64Base + uSrc2AddrReg; 95 #else 96 uint64_t uVirtSrc1Addr = uSrc1AddrReg; 97 uint64_t uVirtSrc2Addr = uSrc2AddrReg; 98 #endif 99 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 100 if (cLeftSrc1Page > uCounterReg) 101 cLeftSrc1Page = uCounterReg; 102 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 103 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page); 104 105 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 106 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 107 #if ADDR_SIZE != 64 108 && uSrc1AddrReg < pSrc1Hid->u32Limit 109 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit 110 && uSrc2AddrReg < pCtx->esHid.u32Limit 111 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit 112 #endif 113 ) 114 { 115 RTGCPHYS GCPhysSrc1Mem; 116 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem); 117 if (rcStrict != VINF_SUCCESS) 118 break; 119 120 RTGCPHYS GCPhysSrc2Mem; 121 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem); 122 if (rcStrict != VINF_SUCCESS) 123 break; 124 125 /* 126 * If we can map the page without trouble, do a block processing 127 * until the end of the current page. 128 */ 129 OP_TYPE const *puSrc2Mem; 130 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem); 131 if (rcStrict == VINF_SUCCESS) 132 { 133 OP_TYPE const *puSrc1Mem; 134 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem); 135 if (rcStrict == VINF_SUCCESS) 136 { 137 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8))) 138 { 139 /* All matches, only compare the last itme to get the right eflags. */ 140 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags); 141 uSrc1AddrReg += cLeftPage * cbIncr; 142 uSrc2AddrReg += cLeftPage * cbIncr; 143 uCounterReg -= cLeftPage; 144 } 145 else 146 { 147 /* Some mismatch, compare each item (and keep volatile 148 memory in mind). */ 149 do 150 { 151 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)puSrc1Mem, *puSrc2Mem, &uEFlags); 152 uSrc1AddrReg += cbIncr; 153 uSrc2AddrReg += cbIncr; 154 uCounterReg--; 155 puSrc1Mem++; 156 puSrc2Mem++; 157 cLeftPage--; 158 } while ( (int32_t)cLeftPage > 0 159 && (uEFlags & X86_EFL_ZF)); 160 } 161 continue; 162 } 163 } 164 } 165 166 /* 167 * Fallback - slow processing till the end of the current page. 168 * In the cross page boundrary case we will end up here with cLeftPage 169 * as 0, we execute one loop then. 170 */ 171 do 172 { 173 OP_TYPE uValue1; 174 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg); 175 if (rcStrict != VINF_SUCCESS) 176 break; 177 OP_TYPE uValue2; 178 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg); 179 if (rcStrict != VINF_SUCCESS) 180 break; 181 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags); 182 183 uSrc1AddrReg += cbIncr; 184 uSrc2AddrReg += cbIncr; 185 uCounterReg--; 186 cLeftPage--; 187 } while ( (int32_t)cLeftPage > 0 188 && (uEFlags & X86_EFL_ZF)); 189 if (rcStrict != VINF_SUCCESS) 190 break; 191 } while ( uCounterReg != 0 192 && (uEFlags & X86_EFL_ZF)); 193 194 /* 195 * Update the registers. 196 */ 197 pCtx->ADDR_rCX = uCounterReg; 198 pCtx->ADDR_rSI = uSrc1AddrReg; 199 pCtx->ADDR_rDI = uSrc2AddrReg; 200 pCtx->eflags.u = uEFlags; 201 if (rcStrict == VINF_SUCCESS) 202 iemRegAddToRip(pIemCpu, cbInstr); 203 204 return rcStrict; 205 } 206 207 208 /** 209 * Implements 'REPNE CMPS'. 210 */ 211 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg) 212 { 213 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 214 215 /* 216 * Setup. 217 */ 218 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; 219 if (uCounterReg == 0) 220 return VINF_SUCCESS; 221 222 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg); 223 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg); 224 if (rcStrict != VINF_SUCCESS) 225 return rcStrict; 226 227 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES); 228 if (rcStrict != VINF_SUCCESS) 229 return rcStrict; 230 231 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8); 232 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI; 233 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI; 234 uint32_t uEFlags = pCtx->eflags.u; 235 236 /* 237 * The loop. 238 */ 239 do 240 { 241 /* 242 * Do segmentation and virtual page stuff. 243 */ 244 #if ADDR_SIZE != 64 245 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg; 246 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->esHid.u64Base + uSrc2AddrReg; 247 #else 248 uint64_t uVirtSrc1Addr = uSrc1AddrReg; 249 uint64_t uVirtSrc2Addr = uSrc2AddrReg; 250 #endif 251 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 252 if (cLeftSrc1Page > uCounterReg) 253 cLeftSrc1Page = uCounterReg; 254 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 255 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page); 256 257 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 258 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 259 #if ADDR_SIZE != 64 260 && uSrc1AddrReg < pSrc1Hid->u32Limit 261 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit 262 && uSrc2AddrReg < pCtx->esHid.u32Limit 263 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit 264 #endif 265 ) 266 { 267 RTGCPHYS GCPhysSrc1Mem; 268 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem); 269 if (rcStrict != VINF_SUCCESS) 270 break; 271 272 RTGCPHYS GCPhysSrc2Mem; 273 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem); 274 if (rcStrict != VINF_SUCCESS) 275 break; 276 277 /* 278 * If we can map the page without trouble, do a block processing 279 * until the end of the current page. 280 */ 281 OP_TYPE const *puSrc2Mem; 282 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem); 283 if (rcStrict == VINF_SUCCESS) 284 { 285 OP_TYPE const *puSrc1Mem; 286 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem); 287 if (rcStrict == VINF_SUCCESS) 288 { 289 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8))) 290 { 291 /* All matches, only compare the last itme to get the right eflags. */ 292 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags); 293 uSrc1AddrReg += cLeftPage * cbIncr; 294 uSrc2AddrReg += cLeftPage * cbIncr; 295 uCounterReg -= cLeftPage; 296 } 297 else 298 { 299 /* Some mismatch, compare each item (and keep volatile 300 memory in mind). */ 301 do 302 { 303 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)puSrc1Mem, *puSrc2Mem, &uEFlags); 304 uSrc1AddrReg += cbIncr; 305 uSrc2AddrReg += cbIncr; 306 uCounterReg--; 307 puSrc1Mem++; 308 puSrc2Mem++; 309 cLeftPage--; 310 } while ( (int32_t)cLeftPage > 0 311 && !(uEFlags & X86_EFL_ZF)); 312 } 313 continue; 314 } 315 } 316 } 317 318 /* 319 * Fallback - slow processing till the end of the current page. 320 * In the cross page boundrary case we will end up here with cLeftPage 321 * as 0, we execute one loop then. 322 */ 323 do 324 { 325 OP_TYPE uValue1; 326 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg); 327 if (rcStrict != VINF_SUCCESS) 328 break; 329 OP_TYPE uValue2; 330 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg); 331 if (rcStrict != VINF_SUCCESS) 332 break; 333 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags); 334 335 uSrc1AddrReg += cbIncr; 336 uSrc2AddrReg += cbIncr; 337 uCounterReg--; 338 cLeftPage--; 339 } while ( (int32_t)cLeftPage > 0 340 && !(uEFlags & X86_EFL_ZF)); 341 if (rcStrict != VINF_SUCCESS) 342 break; 343 } while ( uCounterReg != 0 344 && !(uEFlags & X86_EFL_ZF)); 345 346 /* 347 * Update the registers. 348 */ 349 pCtx->ADDR_rCX = uCounterReg; 350 pCtx->ADDR_rSI = uSrc1AddrReg; 351 pCtx->ADDR_rDI = uSrc2AddrReg; 352 pCtx->eflags.u = uEFlags; 353 if (rcStrict == VINF_SUCCESS) 354 iemRegAddToRip(pIemCpu, cbInstr); 355 356 return rcStrict; 357 } 358 359 360 /** 361 * Implements 'REPE SCAS'. 362 */ 363 IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE)) 364 { 365 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 366 367 /* 368 * Setup. 369 */ 370 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; 371 if (uCounterReg == 0) 372 return VINF_SUCCESS; 373 374 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES); 375 if (rcStrict != VINF_SUCCESS) 376 return rcStrict; 377 378 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8); 379 OP_TYPE const uValueReg = pCtx->OP_rAX; 380 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI; 381 uint32_t uEFlags = pCtx->eflags.u; 382 383 /* 384 * The loop. 385 */ 386 do 387 { 388 /* 389 * Do segmentation and virtual page stuff. 390 */ 391 #if ADDR_SIZE != 64 392 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->esHid.u64Base + uAddrReg; 393 #else 394 uint64_t uVirtAddr = uAddrReg; 395 #endif 396 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 397 if (cLeftPage > uCounterReg) 398 cLeftPage = uCounterReg; 399 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 400 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 401 #if ADDR_SIZE != 64 402 && uAddrReg < pCtx->esHid.u32Limit 403 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit 404 #endif 405 ) 406 { 407 RTGCPHYS GCPhysMem; 408 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem); 409 if (rcStrict != VINF_SUCCESS) 410 break; 411 412 /* 413 * If we can map the page without trouble, do a block processing 414 * until the end of the current page. 415 */ 416 OP_TYPE const *puMem; 417 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem); 418 if (rcStrict == VINF_SUCCESS) 419 { 420 /* Search till we find a mismatching item. */ 421 OP_TYPE uTmpValue; 422 bool fQuit; 423 uint32_t i = 0; 424 do 425 { 426 uTmpValue = puMem[i++]; 427 fQuit = uTmpValue != uValueReg; 428 } while (i < cLeftPage && !fQuit); 429 430 /* Update the regs. */ 431 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags); 432 uCounterReg -= i; 433 uAddrReg += i * cbIncr; 434 Assert(!(uEFlags & X86_EFL_ZF) == (i < cLeftPage)); 435 if (fQuit) 436 break; 437 438 439 /* If unaligned, we drop thru and do the page crossing access 440 below. Otherwise, do the next page. */ 441 if (!(uVirtAddr & (OP_SIZE - 1))) 442 continue; 443 if (uCounterReg == 0) 444 break; 445 cLeftPage = 0; 446 } 447 } 448 449 /* 450 * Fallback - slow processing till the end of the current page. 451 * In the cross page boundrary case we will end up here with cLeftPage 452 * as 0, we execute one loop then. 453 */ 454 do 455 { 456 OP_TYPE uTmpValue; 457 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg); 458 if (rcStrict != VINF_SUCCESS) 459 break; 460 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags); 461 462 uAddrReg += cbIncr; 463 uCounterReg--; 464 cLeftPage--; 465 } while ( (int32_t)cLeftPage > 0 466 && (uEFlags & X86_EFL_ZF)); 467 if (rcStrict != VINF_SUCCESS) 468 break; 469 } while ( uCounterReg != 0 470 && (uEFlags & X86_EFL_ZF)); 471 472 /* 473 * Update the registers. 474 */ 475 pCtx->ADDR_rCX = uCounterReg; 476 pCtx->ADDR_rDI = uAddrReg; 477 pCtx->eflags.u = uEFlags; 478 if (rcStrict == VINF_SUCCESS) 479 iemRegAddToRip(pIemCpu, cbInstr); 480 481 return rcStrict; 482 } 483 484 485 /** 486 * Implements 'REPNE SCAS'. 487 */ 488 IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE)) 489 { 490 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 491 492 /* 493 * Setup. 494 */ 495 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; 496 if (uCounterReg == 0) 497 return VINF_SUCCESS; 498 499 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES); 500 if (rcStrict != VINF_SUCCESS) 501 return rcStrict; 502 503 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8); 504 OP_TYPE const uValueReg = pCtx->OP_rAX; 505 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI; 506 uint32_t uEFlags = pCtx->eflags.u; 507 508 /* 509 * The loop. 510 */ 511 do 512 { 513 /* 514 * Do segmentation and virtual page stuff. 515 */ 516 #if ADDR_SIZE != 64 517 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->esHid.u64Base + uAddrReg; 518 #else 519 uint64_t uVirtAddr = uAddrReg; 520 #endif 521 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 522 if (cLeftPage > uCounterReg) 523 cLeftPage = uCounterReg; 524 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 525 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 526 #if ADDR_SIZE != 64 527 && uAddrReg < pCtx->esHid.u32Limit 528 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit 529 #endif 530 ) 531 { 532 RTGCPHYS GCPhysMem; 533 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem); 534 if (rcStrict != VINF_SUCCESS) 535 break; 536 537 /* 538 * If we can map the page without trouble, do a block processing 539 * until the end of the current page. 540 */ 541 OP_TYPE const *puMem; 542 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem); 543 if (rcStrict == VINF_SUCCESS) 544 { 545 /* Search till we find a mismatching item. */ 546 OP_TYPE uTmpValue; 547 bool fQuit; 548 uint32_t i = 0; 549 do 550 { 551 uTmpValue = puMem[i++]; 552 fQuit = uTmpValue == uValueReg; 553 } while (i < cLeftPage && !fQuit); 554 555 /* Update the regs. */ 556 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags); 557 uCounterReg -= i; 558 uAddrReg += i * cbIncr; 559 Assert(!(uEFlags & X86_EFL_ZF) != (i < cLeftPage)); 560 if (fQuit) 561 break; 562 563 564 /* If unaligned, we drop thru and do the page crossing access 565 below. Otherwise, do the next page. */ 566 if (!(uVirtAddr & (OP_SIZE - 1))) 567 continue; 568 if (uCounterReg == 0) 569 break; 570 cLeftPage = 0; 571 } 572 } 573 574 /* 575 * Fallback - slow processing till the end of the current page. 576 * In the cross page boundrary case we will end up here with cLeftPage 577 * as 0, we execute one loop then. 578 */ 579 do 580 { 581 OP_TYPE uTmpValue; 582 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg); 583 if (rcStrict != VINF_SUCCESS) 584 break; 585 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags); 586 587 uAddrReg += cbIncr; 588 uCounterReg--; 589 cLeftPage--; 590 } while ( (int32_t)cLeftPage > 0 591 && !(uEFlags & X86_EFL_ZF)); 592 if (rcStrict != VINF_SUCCESS) 593 break; 594 } while ( uCounterReg != 0 595 && !(uEFlags & X86_EFL_ZF)); 596 597 /* 598 * Update the registers. 599 */ 600 pCtx->ADDR_rCX = uCounterReg; 601 pCtx->ADDR_rDI = uAddrReg; 602 pCtx->eflags.u = uEFlags; 603 if (rcStrict == VINF_SUCCESS) 604 iemRegAddToRip(pIemCpu, cbInstr); 605 606 return rcStrict; 607 } 608 609 610 56 611 57 612 /** … … 132 687 { 133 688 OP_TYPE const *puSrcMem; 134 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_ W, (void **)&puSrcMem);689 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem); 135 690 if (rcStrict == VINF_SUCCESS) 136 691 { … … 179 734 if (rcStrict == VINF_SUCCESS) 180 735 iemRegAddToRip(pIemCpu, cbInstr); 736 else 737 AssertFailed(); 181 738 182 739 return rcStrict; … … 437 994 438 995 uint32_t u32Value; 439 # if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 440 rcStrict = IOMIOPortRead(pVM, pCtx->dx, &u32Value, OP_SIZE / 8); 441 # else 442 iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8); 443 # endif 996 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 997 rcStrict = IOMIOPortRead(pVM, pCtx->dx, &u32Value, OP_SIZE / 8); 998 else 999 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8); 444 1000 if (IOM_SUCCESS(rcStrict)) 445 1001 { … … 536 1092 { 537 1093 uint32_t u32Value; 538 # if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 539 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8); 540 # else 541 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8); 542 # endif 1094 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 1095 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8); 1096 else 1097 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8); 543 1098 if (!IOM_SUCCESS(rcStrict)) 544 1099 break; … … 584 1139 585 1140 uint32_t u32Value; 586 # if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 587 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8); 588 # else 589 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8); 590 # endif 1141 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 1142 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8); 1143 else 1144 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8); 591 1145 if (!IOM_SUCCESS(rcStrict)) 592 1146 break; … … 642 1196 if (rcStrict == VINF_SUCCESS) 643 1197 { 644 # if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 645 rcStrict = IOMIOPortWrite(pVM, pCtx->dx, uValue, OP_SIZE / 8); 646 # else 647 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8); 648 # endif 1198 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 1199 rcStrict = IOMIOPortWrite(pVM, pCtx->dx, uValue, OP_SIZE / 8); 1200 else 1201 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8); 649 1202 if (IOM_SUCCESS(rcStrict)) 650 1203 { … … 734 1287 { 735 1288 uint32_t u32Value = *puMem++; 736 # if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 737 rcStrict = IOMIOPortWrite(pVM, u16Port, u32Value, OP_SIZE / 8); 738 # else 739 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8); 740 # endif 1289 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 1290 rcStrict = IOMIOPortWrite(pVM, u16Port, u32Value, OP_SIZE / 8); 1291 else 1292 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8); 741 1293 if (!IOM_SUCCESS(rcStrict)) 742 1294 break; … … 780 1332 break; 781 1333 782 # if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) 783 rcStrict = IOMIOPortWrite(pVM, u16Port, uValue, OP_SIZE / 8); 784 # else 785 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8); 786 # endif 1334 if (IEM_VERIFICATION_ENABLED(pIemCpu)) 1335 rcStrict = IOMIOPortWrite(pVM, u16Port, uValue, OP_SIZE / 8); 1336 else 1337 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8); 787 1338 if (!IOM_SUCCESS(rcStrict)) 788 1339 break; -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r36828 r36829 1647 1647 1648 1648 /** Opcode 0x0f 0x90. */ 1649 FNIEMOP_STUB(iemOp_seto_Jv); 1649 FNIEMOP_DEF(iemOp_seto_Eb) 1650 { 1651 IEMOP_MNEMONIC("seto Eb"); 1652 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 1653 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 1654 1655 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 1656 * any way. AMD says it's "unused", whatever that means. We're 1657 * ignoring for now. */ 1658 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 1659 { 1660 /* register target */ 1661 IEM_MC_BEGIN(0, 0); 1662 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) { 1663 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 1664 } IEM_MC_ELSE() { 1665 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 1666 } IEM_MC_ENDIF(); 1667 IEM_MC_ADVANCE_RIP(); 1668 IEM_MC_END(); 1669 } 1670 else 1671 { 1672 /* memory target */ 1673 IEM_MC_BEGIN(0, 1); 1674 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 1675 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 1676 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) { 1677 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 1678 } IEM_MC_ELSE() { 1679 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 1680 } IEM_MC_ENDIF(); 1681 IEM_MC_ADVANCE_RIP(); 1682 IEM_MC_END(); 1683 } 1684 return VINF_SUCCESS; 1685 } 1686 1687 1650 1688 /** Opcode 0x0f 0x91. */ 1651 FNIEMOP_STUB(iemOp_setno_Jv); 1689 FNIEMOP_DEF(iemOp_setno_Eb) 1690 { 1691 IEMOP_MNEMONIC("setno Eb"); 1692 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 1693 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 1694 1695 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 1696 * any way. AMD says it's "unused", whatever that means. We're 1697 * ignoring for now. */ 1698 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 1699 { 1700 /* register target */ 1701 IEM_MC_BEGIN(0, 0); 1702 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) { 1703 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 1704 } IEM_MC_ELSE() { 1705 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 1706 } IEM_MC_ENDIF(); 1707 IEM_MC_ADVANCE_RIP(); 1708 IEM_MC_END(); 1709 } 1710 else 1711 { 1712 /* memory target */ 1713 IEM_MC_BEGIN(0, 1); 1714 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 1715 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 1716 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) { 1717 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 1718 } IEM_MC_ELSE() { 1719 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 1720 } IEM_MC_ENDIF(); 1721 IEM_MC_ADVANCE_RIP(); 1722 IEM_MC_END(); 1723 } 1724 return VINF_SUCCESS; 1725 } 1726 1727 1652 1728 /** Opcode 0x0f 0x92. */ 1653 FNIEMOP_STUB(iemOp_setc_Jv); 1729 FNIEMOP_DEF(iemOp_setc_Eb) 1730 { 1731 IEMOP_MNEMONIC("setc Eb"); 1732 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 1733 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 1734 1735 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 1736 * any way. AMD says it's "unused", whatever that means. We're 1737 * ignoring for now. */ 1738 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 1739 { 1740 /* register target */ 1741 IEM_MC_BEGIN(0, 0); 1742 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) { 1743 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 1744 } IEM_MC_ELSE() { 1745 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 1746 } IEM_MC_ENDIF(); 1747 IEM_MC_ADVANCE_RIP(); 1748 IEM_MC_END(); 1749 } 1750 else 1751 { 1752 /* memory target */ 1753 IEM_MC_BEGIN(0, 1); 1754 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 1755 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 1756 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) { 1757 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 1758 } IEM_MC_ELSE() { 1759 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 1760 } IEM_MC_ENDIF(); 1761 IEM_MC_ADVANCE_RIP(); 1762 IEM_MC_END(); 1763 } 1764 return VINF_SUCCESS; 1765 } 1766 1767 1654 1768 /** Opcode 0x0f 0x93. */ 1655 FNIEMOP_STUB(iemOp_setnc_Jv); 1769 FNIEMOP_DEF(iemOp_setnc_Eb) 1770 { 1771 IEMOP_MNEMONIC("setnc Eb"); 1772 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 1773 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 1774 1775 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 1776 * any way. AMD says it's "unused", whatever that means. We're 1777 * ignoring for now. */ 1778 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 1779 { 1780 /* register target */ 1781 IEM_MC_BEGIN(0, 0); 1782 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) { 1783 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 1784 } IEM_MC_ELSE() { 1785 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 1786 } IEM_MC_ENDIF(); 1787 IEM_MC_ADVANCE_RIP(); 1788 IEM_MC_END(); 1789 } 1790 else 1791 { 1792 /* memory target */ 1793 IEM_MC_BEGIN(0, 1); 1794 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 1795 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 1796 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) { 1797 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 1798 } IEM_MC_ELSE() { 1799 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 1800 } IEM_MC_ENDIF(); 1801 IEM_MC_ADVANCE_RIP(); 1802 IEM_MC_END(); 1803 } 1804 return VINF_SUCCESS; 1805 } 1806 1807 1656 1808 /** Opcode 0x0f 0x94. */ 1657 FNIEMOP_STUB(iemOp_sete_Jv); 1809 FNIEMOP_DEF(iemOp_sete_Eb) 1810 { 1811 IEMOP_MNEMONIC("sete Eb"); 1812 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 1813 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 1814 1815 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 1816 * any way. AMD says it's "unused", whatever that means. We're 1817 * ignoring for now. */ 1818 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 1819 { 1820 /* register target */ 1821 IEM_MC_BEGIN(0, 0); 1822 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) { 1823 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 1824 } IEM_MC_ELSE() { 1825 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 1826 } IEM_MC_ENDIF(); 1827 IEM_MC_ADVANCE_RIP(); 1828 IEM_MC_END(); 1829 } 1830 else 1831 { 1832 /* memory target */ 1833 IEM_MC_BEGIN(0, 1); 1834 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 1835 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 1836 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) { 1837 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 1838 } IEM_MC_ELSE() { 1839 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 1840 } IEM_MC_ENDIF(); 1841 IEM_MC_ADVANCE_RIP(); 1842 IEM_MC_END(); 1843 } 1844 return VINF_SUCCESS; 1845 } 1846 1847 1658 1848 /** Opcode 0x0f 0x95. */ 1659 FNIEMOP_STUB(iemOp_setne_Jv); 1849 FNIEMOP_DEF(iemOp_setne_Eb) 1850 { 1851 IEMOP_MNEMONIC("setne Eb"); 1852 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 1853 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 1854 1855 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 1856 * any way. AMD says it's "unused", whatever that means. We're 1857 * ignoring for now. */ 1858 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 1859 { 1860 /* register target */ 1861 IEM_MC_BEGIN(0, 0); 1862 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) { 1863 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 1864 } IEM_MC_ELSE() { 1865 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 1866 } IEM_MC_ENDIF(); 1867 IEM_MC_ADVANCE_RIP(); 1868 IEM_MC_END(); 1869 } 1870 else 1871 { 1872 /* memory target */ 1873 IEM_MC_BEGIN(0, 1); 1874 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 1875 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 1876 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) { 1877 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 1878 } IEM_MC_ELSE() { 1879 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 1880 } IEM_MC_ENDIF(); 1881 IEM_MC_ADVANCE_RIP(); 1882 IEM_MC_END(); 1883 } 1884 return VINF_SUCCESS; 1885 } 1886 1887 1660 1888 /** Opcode 0x0f 0x96. */ 1661 FNIEMOP_STUB(iemOp_setbe_Jv); 1889 FNIEMOP_DEF(iemOp_setbe_Eb) 1890 { 1891 IEMOP_MNEMONIC("setbe Eb"); 1892 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 1893 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 1894 1895 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 1896 * any way. AMD says it's "unused", whatever that means. We're 1897 * ignoring for now. */ 1898 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 1899 { 1900 /* register target */ 1901 IEM_MC_BEGIN(0, 0); 1902 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) { 1903 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 1904 } IEM_MC_ELSE() { 1905 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 1906 } IEM_MC_ENDIF(); 1907 IEM_MC_ADVANCE_RIP(); 1908 IEM_MC_END(); 1909 } 1910 else 1911 { 1912 /* memory target */ 1913 IEM_MC_BEGIN(0, 1); 1914 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 1915 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 1916 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) { 1917 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 1918 } IEM_MC_ELSE() { 1919 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 1920 } IEM_MC_ENDIF(); 1921 IEM_MC_ADVANCE_RIP(); 1922 IEM_MC_END(); 1923 } 1924 return VINF_SUCCESS; 1925 } 1926 1927 1662 1928 /** Opcode 0x0f 0x97. */ 1663 FNIEMOP_STUB(iemOp_setnbe_Jv); 1929 FNIEMOP_DEF(iemOp_setnbe_Eb) 1930 { 1931 IEMOP_MNEMONIC("setnbe Eb"); 1932 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 1933 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 1934 1935 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 1936 * any way. AMD says it's "unused", whatever that means. We're 1937 * ignoring for now. */ 1938 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 1939 { 1940 /* register target */ 1941 IEM_MC_BEGIN(0, 0); 1942 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) { 1943 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 1944 } IEM_MC_ELSE() { 1945 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 1946 } IEM_MC_ENDIF(); 1947 IEM_MC_ADVANCE_RIP(); 1948 IEM_MC_END(); 1949 } 1950 else 1951 { 1952 /* memory target */ 1953 IEM_MC_BEGIN(0, 1); 1954 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 1955 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 1956 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) { 1957 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 1958 } IEM_MC_ELSE() { 1959 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 1960 } IEM_MC_ENDIF(); 1961 IEM_MC_ADVANCE_RIP(); 1962 IEM_MC_END(); 1963 } 1964 return VINF_SUCCESS; 1965 } 1966 1967 1664 1968 /** Opcode 0x0f 0x98. */ 1665 FNIEMOP_STUB(iemOp_sets_Jv); 1969 FNIEMOP_DEF(iemOp_sets_Eb) 1970 { 1971 IEMOP_MNEMONIC("sets Eb"); 1972 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 1973 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 1974 1975 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 1976 * any way. AMD says it's "unused", whatever that means. We're 1977 * ignoring for now. */ 1978 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 1979 { 1980 /* register target */ 1981 IEM_MC_BEGIN(0, 0); 1982 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) { 1983 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 1984 } IEM_MC_ELSE() { 1985 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 1986 } IEM_MC_ENDIF(); 1987 IEM_MC_ADVANCE_RIP(); 1988 IEM_MC_END(); 1989 } 1990 else 1991 { 1992 /* memory target */ 1993 IEM_MC_BEGIN(0, 1); 1994 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 1995 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 1996 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) { 1997 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 1998 } IEM_MC_ELSE() { 1999 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 2000 } IEM_MC_ENDIF(); 2001 IEM_MC_ADVANCE_RIP(); 2002 IEM_MC_END(); 2003 } 2004 return VINF_SUCCESS; 2005 } 2006 2007 1666 2008 /** Opcode 0x0f 0x99. */ 1667 FNIEMOP_STUB(iemOp_setns_Jv); 2009 FNIEMOP_DEF(iemOp_setns_Eb) 2010 { 2011 IEMOP_MNEMONIC("setns Eb"); 2012 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 2013 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 2014 2015 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 2016 * any way. AMD says it's "unused", whatever that means. We're 2017 * ignoring for now. */ 2018 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2019 { 2020 /* register target */ 2021 IEM_MC_BEGIN(0, 0); 2022 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) { 2023 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 2024 } IEM_MC_ELSE() { 2025 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 2026 } IEM_MC_ENDIF(); 2027 IEM_MC_ADVANCE_RIP(); 2028 IEM_MC_END(); 2029 } 2030 else 2031 { 2032 /* memory target */ 2033 IEM_MC_BEGIN(0, 1); 2034 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 2035 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 2036 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) { 2037 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 2038 } IEM_MC_ELSE() { 2039 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 2040 } IEM_MC_ENDIF(); 2041 IEM_MC_ADVANCE_RIP(); 2042 IEM_MC_END(); 2043 } 2044 return VINF_SUCCESS; 2045 } 2046 2047 1668 2048 /** Opcode 0x0f 0x9a. */ 1669 FNIEMOP_STUB(iemOp_setp_Jv); 2049 FNIEMOP_DEF(iemOp_setp_Eb) 2050 { 2051 IEMOP_MNEMONIC("setnp Eb"); 2052 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 2053 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 2054 2055 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 2056 * any way. AMD says it's "unused", whatever that means. We're 2057 * ignoring for now. */ 2058 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2059 { 2060 /* register target */ 2061 IEM_MC_BEGIN(0, 0); 2062 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) { 2063 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 2064 } IEM_MC_ELSE() { 2065 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 2066 } IEM_MC_ENDIF(); 2067 IEM_MC_ADVANCE_RIP(); 2068 IEM_MC_END(); 2069 } 2070 else 2071 { 2072 /* memory target */ 2073 IEM_MC_BEGIN(0, 1); 2074 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 2075 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 2076 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) { 2077 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 2078 } IEM_MC_ELSE() { 2079 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 2080 } IEM_MC_ENDIF(); 2081 IEM_MC_ADVANCE_RIP(); 2082 IEM_MC_END(); 2083 } 2084 return VINF_SUCCESS; 2085 } 2086 2087 1670 2088 /** Opcode 0x0f 0x9b. */ 1671 FNIEMOP_STUB(iemOp_setnp_Jv); 2089 FNIEMOP_DEF(iemOp_setnp_Eb) 2090 { 2091 IEMOP_MNEMONIC("setnp Eb"); 2092 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 2093 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 2094 2095 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 2096 * any way. AMD says it's "unused", whatever that means. We're 2097 * ignoring for now. */ 2098 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2099 { 2100 /* register target */ 2101 IEM_MC_BEGIN(0, 0); 2102 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) { 2103 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 2104 } IEM_MC_ELSE() { 2105 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 2106 } IEM_MC_ENDIF(); 2107 IEM_MC_ADVANCE_RIP(); 2108 IEM_MC_END(); 2109 } 2110 else 2111 { 2112 /* memory target */ 2113 IEM_MC_BEGIN(0, 1); 2114 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 2115 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 2116 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) { 2117 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 2118 } IEM_MC_ELSE() { 2119 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 2120 } IEM_MC_ENDIF(); 2121 IEM_MC_ADVANCE_RIP(); 2122 IEM_MC_END(); 2123 } 2124 return VINF_SUCCESS; 2125 } 2126 2127 1672 2128 /** Opcode 0x0f 0x9c. */ 1673 FNIEMOP_STUB(iemOp_setl_Jv); 2129 FNIEMOP_DEF(iemOp_setl_Eb) 2130 { 2131 IEMOP_MNEMONIC("setl Eb"); 2132 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 2133 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 2134 2135 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 2136 * any way. AMD says it's "unused", whatever that means. We're 2137 * ignoring for now. */ 2138 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2139 { 2140 /* register target */ 2141 IEM_MC_BEGIN(0, 0); 2142 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) { 2143 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 2144 } IEM_MC_ELSE() { 2145 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 2146 } IEM_MC_ENDIF(); 2147 IEM_MC_ADVANCE_RIP(); 2148 IEM_MC_END(); 2149 } 2150 else 2151 { 2152 /* memory target */ 2153 IEM_MC_BEGIN(0, 1); 2154 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 2155 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 2156 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) { 2157 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 2158 } IEM_MC_ELSE() { 2159 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 2160 } IEM_MC_ENDIF(); 2161 IEM_MC_ADVANCE_RIP(); 2162 IEM_MC_END(); 2163 } 2164 return VINF_SUCCESS; 2165 } 2166 2167 1674 2168 /** Opcode 0x0f 0x9d. */ 1675 FNIEMOP_STUB(iemOp_setnl_Jv); 2169 FNIEMOP_DEF(iemOp_setnl_Eb) 2170 { 2171 IEMOP_MNEMONIC("setnl Eb"); 2172 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 2173 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 2174 2175 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 2176 * any way. AMD says it's "unused", whatever that means. We're 2177 * ignoring for now. */ 2178 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2179 { 2180 /* register target */ 2181 IEM_MC_BEGIN(0, 0); 2182 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) { 2183 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 2184 } IEM_MC_ELSE() { 2185 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 2186 } IEM_MC_ENDIF(); 2187 IEM_MC_ADVANCE_RIP(); 2188 IEM_MC_END(); 2189 } 2190 else 2191 { 2192 /* memory target */ 2193 IEM_MC_BEGIN(0, 1); 2194 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 2195 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 2196 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) { 2197 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 2198 } IEM_MC_ELSE() { 2199 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 2200 } IEM_MC_ENDIF(); 2201 IEM_MC_ADVANCE_RIP(); 2202 IEM_MC_END(); 2203 } 2204 return VINF_SUCCESS; 2205 } 2206 2207 1676 2208 /** Opcode 0x0f 0x9e. */ 1677 FNIEMOP_STUB(iemOp_setle_Jv); 2209 FNIEMOP_DEF(iemOp_setle_Eb) 2210 { 2211 IEMOP_MNEMONIC("setle Eb"); 2212 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 2213 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 2214 2215 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 2216 * any way. AMD says it's "unused", whatever that means. We're 2217 * ignoring for now. */ 2218 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2219 { 2220 /* register target */ 2221 IEM_MC_BEGIN(0, 0); 2222 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) { 2223 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 2224 } IEM_MC_ELSE() { 2225 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 2226 } IEM_MC_ENDIF(); 2227 IEM_MC_ADVANCE_RIP(); 2228 IEM_MC_END(); 2229 } 2230 else 2231 { 2232 /* memory target */ 2233 IEM_MC_BEGIN(0, 1); 2234 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 2235 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 2236 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) { 2237 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 2238 } IEM_MC_ELSE() { 2239 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 2240 } IEM_MC_ENDIF(); 2241 IEM_MC_ADVANCE_RIP(); 2242 IEM_MC_END(); 2243 } 2244 return VINF_SUCCESS; 2245 } 2246 2247 1678 2248 /** Opcode 0x0f 0x9f. */ 1679 FNIEMOP_STUB(iemOp_setnle_Jv); 2249 FNIEMOP_DEF(iemOp_setnle_Eb) 2250 { 2251 IEMOP_MNEMONIC("setnle Eb"); 2252 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 2253 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 2254 2255 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in 2256 * any way. AMD says it's "unused", whatever that means. We're 2257 * ignoring for now. */ 2258 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2259 { 2260 /* register target */ 2261 IEM_MC_BEGIN(0, 0); 2262 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) { 2263 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0); 2264 } IEM_MC_ELSE() { 2265 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1); 2266 } IEM_MC_ENDIF(); 2267 IEM_MC_ADVANCE_RIP(); 2268 IEM_MC_END(); 2269 } 2270 else 2271 { 2272 /* memory target */ 2273 IEM_MC_BEGIN(0, 1); 2274 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 2275 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 2276 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) { 2277 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0); 2278 } IEM_MC_ELSE() { 2279 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1); 2280 } IEM_MC_ENDIF(); 2281 IEM_MC_ADVANCE_RIP(); 2282 IEM_MC_END(); 2283 } 2284 return VINF_SUCCESS; 2285 } 1680 2286 1681 2287 … … 2270 2876 /* 0x88 */ iemOp_js_Jv, iemOp_jns_Jv, iemOp_jp_Jv, iemOp_jnp_Jv, 2271 2877 /* 0x8c */ iemOp_jl_Jv, iemOp_jnl_Jv, iemOp_jle_Jv, iemOp_jnle_Jv, 2272 /* 0x90 */ iemOp_seto_ Jv, iemOp_setno_Jv, iemOp_setc_Jv, iemOp_setnc_Jv,2273 /* 0x94 */ iemOp_sete_ Jv, iemOp_setne_Jv, iemOp_setbe_Jv, iemOp_setnbe_Jv,2274 /* 0x98 */ iemOp_sets_ Jv, iemOp_setns_Jv, iemOp_setp_Jv, iemOp_setnp_Jv,2275 /* 0x9c */ iemOp_setl_ Jv, iemOp_setnl_Jv, iemOp_setle_Jv, iemOp_setnle_Jv,2878 /* 0x90 */ iemOp_seto_Eb, iemOp_setno_Eb, iemOp_setc_Eb, iemOp_setnc_Eb, 2879 /* 0x94 */ iemOp_sete_Eb, iemOp_setne_Eb, iemOp_setbe_Eb, iemOp_setnbe_Eb, 2880 /* 0x98 */ iemOp_sets_Eb, iemOp_setns_Eb, iemOp_setp_Eb, iemOp_setnp_Eb, 2881 /* 0x9c */ iemOp_setl_Eb, iemOp_setnl_Eb, iemOp_setle_Eb, iemOp_setnle_Eb, 2276 2882 /* 0xa0 */ iemOp_push_fs, iemOp_pop_fs, iemOp_cpuid, iemOp_bt_Ev_Gv, 2277 2883 /* 0xa4 */ iemOp_shld_Ev_Gv_Ib, iemOp_shld_Ev_Gv_CL, iemOp_Invalid, iemOp_Invalid, … … 5804 6410 } IEM_MC_ENDIF(); \ 5805 6411 IEM_MC_ADVANCE_RIP(); \ 5806 IEM_MC_END(); \ 5807 6412 IEM_MC_END(); 5808 6413 5809 6414 /** Opcode 0xa4. */ … … 5813 6418 5814 6419 /* 5815 * Use the C implementation if a repeat eprefix is encountered.6420 * Use the C implementation if a repeat prefix is encountered. 5816 6421 */ 5817 6422 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) … … 5848 6453 5849 6454 /* 5850 * Use the C implementation if a repeat eprefix is encountered.6455 * Use the C implementation if a repeat prefix is encountered. 5851 6456 */ 5852 6457 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) … … 5927 6532 #undef IEM_MOVS_CASE 5928 6533 6534 /** Macro used by iemOp_cmpsb_Xb_Yb and iemOp_cmpswd_Xv_Yv */ 6535 #define IEM_CMPS_CASE(ValBits, AddrBits) \ 6536 IEM_MC_BEGIN(3, 3); \ 6537 IEM_MC_ARG(uint##ValBits##_t *, puValue1, 0); \ 6538 IEM_MC_ARG(uint##ValBits##_t, uValue2, 1); \ 6539 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 6540 IEM_MC_LOCAL(uint##ValBits##_t, uValue1); \ 6541 IEM_MC_LOCAL(uint##AddrBits##_t, uAddr); \ 6542 \ 6543 IEM_MC_FETCH_GREG_U##AddrBits(uAddr, X86_GREG_xSI); \ 6544 IEM_MC_FETCH_MEM_U##ValBits(uValue1, pIemCpu->iEffSeg, uAddr); \ 6545 IEM_MC_FETCH_GREG_U##AddrBits(uAddr, X86_GREG_xDI); \ 6546 IEM_MC_FETCH_MEM_U##ValBits(uValue2, X86_SREG_ES, uAddr); \ 6547 IEM_MC_REF_LOCAL(puValue1, uValue1); \ 6548 IEM_MC_REF_EFLAGS(pEFlags); \ 6549 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puValue1, uValue2, pEFlags); \ 6550 \ 6551 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \ 6552 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \ 6553 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \ 6554 } IEM_MC_ELSE() { \ 6555 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \ 6556 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \ 6557 } IEM_MC_ENDIF(); \ 6558 IEM_MC_ADVANCE_RIP(); \ 6559 IEM_MC_END(); \ 6560 5929 6561 /** Opcode 0xa6. */ 5930 FNIEMOP_STUB(iemOp_cmpsb_Xb_Yb); 6562 FNIEMOP_DEF(iemOp_cmpsb_Xb_Yb) 6563 { 6564 IEMOP_HLP_NO_LOCK_PREFIX(); 6565 6566 /* 6567 * Use the C implementation if a repeat prefix is encountered. 6568 */ 6569 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ) 6570 { 6571 IEMOP_MNEMONIC("repe cmps Xb,Yb"); 6572 switch (pIemCpu->enmEffAddrMode) 6573 { 6574 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, pIemCpu->iEffSeg); 6575 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, pIemCpu->iEffSeg); 6576 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, pIemCpu->iEffSeg); 6577 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6578 } 6579 } 6580 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ) 6581 { 6582 IEMOP_MNEMONIC("repe cmps Xb,Yb"); 6583 switch (pIemCpu->enmEffAddrMode) 6584 { 6585 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, pIemCpu->iEffSeg); 6586 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, pIemCpu->iEffSeg); 6587 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, pIemCpu->iEffSeg); 6588 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6589 } 6590 } 6591 IEMOP_MNEMONIC("cmps Xb,Yb"); 6592 6593 /* 6594 * Sharing case implementation with cmps[wdq] below. 6595 */ 6596 switch (pIemCpu->enmEffAddrMode) 6597 { 6598 case IEMMODE_16BIT: IEM_CMPS_CASE(8, 16); break; 6599 case IEMMODE_32BIT: IEM_CMPS_CASE(8, 32); break; 6600 case IEMMODE_64BIT: IEM_CMPS_CASE(8, 64); break; 6601 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6602 } 6603 return VINF_SUCCESS; 6604 6605 } 6606 6607 5931 6608 /** Opcode 0xa7. */ 5932 FNIEMOP_STUB(iemOp_cmpswd_Xv_Yv); 5933 6609 FNIEMOP_DEF(iemOp_cmpswd_Xv_Yv) 6610 { 6611 IEMOP_HLP_NO_LOCK_PREFIX(); 6612 6613 /* 6614 * Use the C implementation if a repeat prefix is encountered. 6615 */ 6616 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ) 6617 { 6618 IEMOP_MNEMONIC("repe cmps Xv,Yv"); 6619 switch (pIemCpu->enmEffOpSize) 6620 { 6621 case IEMMODE_16BIT: 6622 switch (pIemCpu->enmEffAddrMode) 6623 { 6624 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, pIemCpu->iEffSeg); 6625 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, pIemCpu->iEffSeg); 6626 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, pIemCpu->iEffSeg); 6627 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6628 } 6629 break; 6630 case IEMMODE_32BIT: 6631 switch (pIemCpu->enmEffAddrMode) 6632 { 6633 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, pIemCpu->iEffSeg); 6634 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, pIemCpu->iEffSeg); 6635 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, pIemCpu->iEffSeg); 6636 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6637 } 6638 case IEMMODE_64BIT: 6639 switch (pIemCpu->enmEffAddrMode) 6640 { 6641 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); 6642 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, pIemCpu->iEffSeg); 6643 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, pIemCpu->iEffSeg); 6644 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6645 } 6646 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6647 } 6648 } 6649 6650 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ) 6651 { 6652 IEMOP_MNEMONIC("repne cmps Xv,Yv"); 6653 switch (pIemCpu->enmEffOpSize) 6654 { 6655 case IEMMODE_16BIT: 6656 switch (pIemCpu->enmEffAddrMode) 6657 { 6658 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, pIemCpu->iEffSeg); 6659 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, pIemCpu->iEffSeg); 6660 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, pIemCpu->iEffSeg); 6661 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6662 } 6663 break; 6664 case IEMMODE_32BIT: 6665 switch (pIemCpu->enmEffAddrMode) 6666 { 6667 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, pIemCpu->iEffSeg); 6668 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, pIemCpu->iEffSeg); 6669 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, pIemCpu->iEffSeg); 6670 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6671 } 6672 case IEMMODE_64BIT: 6673 switch (pIemCpu->enmEffAddrMode) 6674 { 6675 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); 6676 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, pIemCpu->iEffSeg); 6677 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, pIemCpu->iEffSeg); 6678 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6679 } 6680 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6681 } 6682 } 6683 6684 IEMOP_MNEMONIC("cmps Xv,Yv"); 6685 6686 /* 6687 * Annoying double switch here. 6688 * Using ugly macro for implementing the cases, sharing it with cmpsb. 6689 */ 6690 switch (pIemCpu->enmEffOpSize) 6691 { 6692 case IEMMODE_16BIT: 6693 switch (pIemCpu->enmEffAddrMode) 6694 { 6695 case IEMMODE_16BIT: IEM_CMPS_CASE(16, 16); break; 6696 case IEMMODE_32BIT: IEM_CMPS_CASE(16, 32); break; 6697 case IEMMODE_64BIT: IEM_CMPS_CASE(16, 64); break; 6698 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6699 } 6700 break; 6701 6702 case IEMMODE_32BIT: 6703 switch (pIemCpu->enmEffAddrMode) 6704 { 6705 case IEMMODE_16BIT: IEM_CMPS_CASE(32, 16); break; 6706 case IEMMODE_32BIT: IEM_CMPS_CASE(32, 32); break; 6707 case IEMMODE_64BIT: IEM_CMPS_CASE(32, 64); break; 6708 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6709 } 6710 break; 6711 6712 case IEMMODE_64BIT: 6713 switch (pIemCpu->enmEffAddrMode) 6714 { 6715 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break; 6716 case IEMMODE_32BIT: IEM_CMPS_CASE(64, 32); break; 6717 case IEMMODE_64BIT: IEM_CMPS_CASE(64, 64); break; 6718 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6719 } 6720 break; 6721 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 6722 } 6723 return VINF_SUCCESS; 6724 6725 } 6726 6727 #undef IEM_CMPS_CASE 5934 6728 5935 6729 /** Opcode 0xa8. */ … … 5971 6765 5972 6766 /* 5973 * Use the C implementation if a repeat eprefix is encountered.6767 * Use the C implementation if a repeat prefix is encountered. 5974 6768 */ 5975 6769 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) … … 6006 6800 6007 6801 /* 6008 * Use the C implementation if a repeat eprefix is encountered.6802 * Use the C implementation if a repeat prefix is encountered. 6009 6803 */ 6010 6804 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) … … 6107 6901 6108 6902 /* 6109 * Use the C implementation if a repeat eprefix is encountered.6903 * Use the C implementation if a repeat prefix is encountered. 6110 6904 */ 6111 6905 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) … … 6142 6936 6143 6937 /* 6144 * Use the C implementation if a repeat eprefix is encountered.6938 * Use the C implementation if a repeat prefix is encountered. 6145 6939 */ 6146 6940 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) … … 6221 7015 #undef IEM_LODS_CASE 6222 7016 7017 /** Macro used by iemOp_scasb_AL_Xb and iemOp_scaswd_eAX_Xv */ 7018 #define IEM_SCAS_CASE(ValBits, AddrBits) \ 7019 IEM_MC_BEGIN(1, 2); \ 7020 IEM_MC_ARG(uint##ValBits##_t *, puRax, 0); \ 7021 IEM_MC_ARG(uint##ValBits##_t, uValue, 1); \ 7022 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 7023 IEM_MC_LOCAL(uint##AddrBits##_t, uAddr); \ 7024 \ 7025 IEM_MC_FETCH_GREG_U##AddrBits(uAddr, X86_GREG_xDI); \ 7026 IEM_MC_FETCH_MEM_U##ValBits(uValue, X86_SREG_ES, uAddr); \ 7027 IEM_MC_REF_GREG_U##ValBits(puRax, X86_GREG_xAX); \ 7028 IEM_MC_REF_EFLAGS(pEFlags); \ 7029 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puRax, uValue, pEFlags); \ 7030 \ 7031 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \ 7032 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \ 7033 } IEM_MC_ELSE() { \ 7034 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \ 7035 } IEM_MC_ENDIF(); \ 7036 IEM_MC_ADVANCE_RIP(); \ 7037 IEM_MC_END(); 7038 6223 7039 /** Opcode 0xae. */ 6224 FNIEMOP_STUB(iemOp_scasb_AL_Xb); 7040 FNIEMOP_DEF(iemOp_scasb_AL_Xb) 7041 { 7042 IEMOP_HLP_NO_LOCK_PREFIX(); 7043 7044 /* 7045 * Use the C implementation if a repeat prefix is encountered. 7046 */ 7047 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ) 7048 { 7049 IEMOP_MNEMONIC("repe scasb al,Xb"); 7050 switch (pIemCpu->enmEffAddrMode) 7051 { 7052 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m16); 7053 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m32); 7054 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m64); 7055 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7056 } 7057 } 7058 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ) 7059 { 7060 IEMOP_MNEMONIC("repne scasb al,Xb"); 7061 switch (pIemCpu->enmEffAddrMode) 7062 { 7063 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m16); 7064 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m32); 7065 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m64); 7066 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7067 } 7068 } 7069 IEMOP_MNEMONIC("scasb al,Xb"); 7070 7071 /* 7072 * Sharing case implementation with stos[wdq] below. 7073 */ 7074 switch (pIemCpu->enmEffAddrMode) 7075 { 7076 case IEMMODE_16BIT: IEM_SCAS_CASE(8, 16); break; 7077 case IEMMODE_32BIT: IEM_SCAS_CASE(8, 32); break; 7078 case IEMMODE_64BIT: IEM_SCAS_CASE(8, 64); break; 7079 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7080 } 7081 return VINF_SUCCESS; 7082 } 7083 7084 6225 7085 /** Opcode 0xaf. */ 6226 FNIEMOP_STUB(iemOp_scaswd_eAX_Xv); 7086 FNIEMOP_DEF(iemOp_scaswd_eAX_Xv) 7087 { 7088 IEMOP_HLP_NO_LOCK_PREFIX(); 7089 7090 /* 7091 * Use the C implementation if a repeat prefix is encountered. 7092 */ 7093 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ) 7094 { 7095 IEMOP_MNEMONIC("repe scas rAX,Xv"); 7096 switch (pIemCpu->enmEffOpSize) 7097 { 7098 case IEMMODE_16BIT: 7099 switch (pIemCpu->enmEffAddrMode) 7100 { 7101 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16); 7102 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32); 7103 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64); 7104 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7105 } 7106 break; 7107 case IEMMODE_32BIT: 7108 switch (pIemCpu->enmEffAddrMode) 7109 { 7110 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16); 7111 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32); 7112 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64); 7113 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7114 } 7115 case IEMMODE_64BIT: 7116 switch (pIemCpu->enmEffAddrMode) 7117 { 7118 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); /** @todo It's this wrong, we can do 16-bit addressing in 64-bit mode, but not 32-bit. right? */ 7119 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32); 7120 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64); 7121 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7122 } 7123 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7124 } 7125 } 7126 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ) 7127 { 7128 IEMOP_MNEMONIC("repne scas rAX,Xv"); 7129 switch (pIemCpu->enmEffOpSize) 7130 { 7131 case IEMMODE_16BIT: 7132 switch (pIemCpu->enmEffAddrMode) 7133 { 7134 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16); 7135 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32); 7136 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64); 7137 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7138 } 7139 break; 7140 case IEMMODE_32BIT: 7141 switch (pIemCpu->enmEffAddrMode) 7142 { 7143 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16); 7144 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32); 7145 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64); 7146 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7147 } 7148 case IEMMODE_64BIT: 7149 switch (pIemCpu->enmEffAddrMode) 7150 { 7151 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); 7152 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32); 7153 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64); 7154 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7155 } 7156 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7157 } 7158 } 7159 IEMOP_MNEMONIC("scas rAX,Xv"); 7160 7161 /* 7162 * Annoying double switch here. 7163 * Using ugly macro for implementing the cases, sharing it with scasb. 7164 */ 7165 switch (pIemCpu->enmEffOpSize) 7166 { 7167 case IEMMODE_16BIT: 7168 switch (pIemCpu->enmEffAddrMode) 7169 { 7170 case IEMMODE_16BIT: IEM_SCAS_CASE(16, 16); break; 7171 case IEMMODE_32BIT: IEM_SCAS_CASE(16, 32); break; 7172 case IEMMODE_64BIT: IEM_SCAS_CASE(16, 64); break; 7173 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7174 } 7175 break; 7176 7177 case IEMMODE_32BIT: 7178 switch (pIemCpu->enmEffAddrMode) 7179 { 7180 case IEMMODE_16BIT: IEM_SCAS_CASE(32, 16); break; 7181 case IEMMODE_32BIT: IEM_SCAS_CASE(32, 32); break; 7182 case IEMMODE_64BIT: IEM_SCAS_CASE(32, 64); break; 7183 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7184 } 7185 break; 7186 7187 case IEMMODE_64BIT: 7188 switch (pIemCpu->enmEffAddrMode) 7189 { 7190 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break; 7191 case IEMMODE_32BIT: IEM_SCAS_CASE(64, 32); break; 7192 case IEMMODE_64BIT: IEM_SCAS_CASE(64, 64); break; 7193 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7194 } 7195 break; 7196 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7197 } 7198 return VINF_SUCCESS; 7199 } 7200 7201 #undef IEM_SCAS_CASE 6227 7202 6228 7203 /** … … 8678 9653 * @param bRm The RM byte. 8679 9654 */ 8680 FNIEMOP_ DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm)8681 {8682 /* decode and use a C worker. */8683 AssertFailed(); // FNIEMOP_STUB8684 return VERR_NOT_IMPLEMENTED;8685 }9655 FNIEMOP_STUB_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm); 9656 //{ 9657 // /* decode and use a C worker. */ 9658 // AssertFailed(); // FNIEMOP_STUB 9659 // return VERR_NOT_IMPLEMENTED; 9660 //} 8686 9661 8687 9662 -
trunk/src/VBox/VMM/include/IEMInternal.h
r36821 r36829 171 171 * than what AMD and REM does. */ 172 172 bool fShiftOfHack; 173 bool afAlignment1[6]; 173 /** Set if no comparison to REM is currently performed. 174 * This is used to skip past really slow bits. */ 175 bool fNoRem; 176 bool afAlignment1[5]; 174 177 /** The physical address corresponding to abOpcodes[0]. */ 175 178 RTGCPHYS GCPhysOpcodes; … … 338 341 /** @} */ 339 342 340 343 /** 344 * Tests if verification mode is enabled. 345 * 346 * This expands to @c false when IEM_VERIFICATION_MODE is not defined and 347 * should therefore cause the compiler to eliminate the verification branch 348 * of an if statement. */ 349 #ifdef IEM_VERIFICATION_MODE 350 # define IEM_VERIFICATION_ENABLED(a_pIemCpu) ((a_pIemCpu)->fNoRem) 351 #else 352 # define IEM_VERIFICATION_ENABLED(a_pIemCpu) (false) 353 #endif 341 354 342 355
Note:
See TracChangeset
for help on using the changeset viewer.