Changeset 100266 in vbox for trunk/src/VBox/VMM/VMMAll/IEMAllThreadedRecompiler.cpp
- Timestamp:
- Jun 23, 2023 2:15:10 PM (18 months ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllThreadedRecompiler.cpp
r100231 r100266 48 48 # define LOG_GROUP LOG_GROUP_IEM_RE_THREADED 49 49 #endif 50 #define IEM_WITH_CODE_TLB_AND_OPCODE_BUF /* A bit hackish, but its all in IEMInline.h. */ 50 51 #define VMCPU_INCL_CPUM_GST_CTX 51 52 #include <VBox/vmm/iem.h> … … 161 162 struct 162 163 { 163 /** @todo we actually need BASE, LIM and CS? If we don't tie a TB to a RIP164 * range, because that's bad for PIC/PIE code on unix with address space165 * randomization enabled, the assumption is that anything involving PC166 * (RIP/EIP/IP, maybe + CS.BASE) will be done by reading current register167 * values and not embedding presumed values into the code. Thus the uCsBase168 * member here shouldn't be needed. For the same reason, uCsLimit isn't helpful169 * either as RIP/EIP/IP may differ between address spaces. So, before TB170 * execution we'd need to check CS.LIM against RIP+cbPC (ditto for 64-bit171 * canonicallity).172 *173 * We could bake instruction limit / canonicallity checks into the generated174 * code if we find ourselves close to the limit and should expect to run into175 * it by the end of the translation block. That would just be using a very176 * simple threshold distance and be a special IEMTB_F_XXX flag so we figure out177 * it out when picking the TB.178 *179 * The CS value is likewise useless as we'll always be using the actual CS180 * register value whenever it is relevant (mainly pushing to the stack in a181 * call, trap, whatever).182 *183 * The segment attributes should be handled via the IEM_F_MODE_XXX and184 * IEM_F_X86_CPL_MASK portions of fFlags, so we could skip those too, I think.185 * All the places where they matter, we would be in CIMPL code which would186 * consult the actual CS.ATTR and not depend on the recompiled code block.187 */188 /** The CS base. */189 uint32_t uCsBase;190 /** The CS limit (UINT32_MAX for 64-bit code). */191 uint32_t uCsLimit;192 /** The CS selector value. */193 uint16_t CS;194 164 /**< Relevant CS X86DESCATTR_XXX bits. */ 195 uint16_t fAttr;165 uint16_t fAttr; 196 166 } x86; 197 167 }; 198 168 /** @} */ 199 200 /** Number of bytes of opcodes covered by this block.201 * @todo Support discontiguous chunks of opcodes in same block, though maybe202 * restrict to the initial page or smth. */203 uint32_t cbPC;204 169 205 170 union … … 215 180 } Thrd; 216 181 }; 182 183 184 /** Number of bytes of opcodes stored in pabOpcodes. */ 185 uint16_t cbOpcodes; 186 /** The max storage available in the pabOpcodes block. */ 187 uint16_t cbOpcodesAllocated; 188 /** Pointer to the opcode bytes this block was recompiled from. */ 189 uint8_t *pabOpcodes; 217 190 } IEMTB; 218 191 … … 241 214 #endif 242 215 216 #define IEM_MC2_PRE_EMIT_CALLS() do { \ 217 AssertMsg(pVCpu->iem.s.offOpcode == IEM_GET_INSTR_LEN(pVCpu), \ 218 ("%u vs %u (%04x:%08RX64)\n", pVCpu->iem.s.offOpcode, IEM_GET_INSTR_LEN(pVCpu), \ 219 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); \ 220 } while (0) 243 221 #define IEM_MC2_EMIT_CALL_0(a_enmFunction) do { \ 244 222 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \ … … 310 288 } 311 289 290 /** 291 * Calculates the effective address of a ModR/M memory operand, extended version 292 * for use in the recompilers. 293 * 294 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR. 295 * 296 * May longjmp on internal error. 297 * 298 * @return The effective address. 299 * @param pVCpu The cross context virtual CPU structure of the calling thread. 300 * @param bRm The ModRM byte. 301 * @param cbImmAndRspOffset - First byte: The size of any immediate 302 * following the effective address opcode bytes 303 * (only for RIP relative addressing). 304 * - Second byte: RSP displacement (for POP [ESP]). 305 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and 306 * SIB byte (bits 39:32). 307 * 308 * @note This must be defined in a source file with matching 309 * IEM_WITH_CODE_TLB_AND_OPCODE_BUF define till the define is made default 310 * or implemented differently... 311 */ 312 RTGCPTR iemOpHlpCalcRmEffAddrJmpEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, uint64_t *puInfo) IEM_NOEXCEPT_MAY_LONGJMP 313 { 314 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm)); 315 # define SET_SS_DEF() \ 316 do \ 317 { \ 318 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \ 319 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \ 320 } while (0) 321 322 if (!IEM_IS_64BIT_CODE(pVCpu)) 323 { 324 /** @todo Check the effective address size crap! */ 325 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT) 326 { 327 uint16_t u16EffAddr; 328 329 /* Handle the disp16 form with no registers first. */ 330 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6) 331 { 332 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); 333 *puInfo = u16EffAddr; 334 } 335 else 336 { 337 /* Get the displacment. */ 338 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 339 { 340 case 0: u16EffAddr = 0; break; 341 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break; 342 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break; 343 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */ 344 } 345 *puInfo = u16EffAddr; 346 347 /* Add the base and index registers to the disp. */ 348 switch (bRm & X86_MODRM_RM_MASK) 349 { 350 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break; 351 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break; 352 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break; 353 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break; 354 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break; 355 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break; 356 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break; 357 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break; 358 } 359 } 360 361 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16 uInfo=%#RX64\n", u16EffAddr, *puInfo)); 362 return u16EffAddr; 363 } 364 365 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT); 366 uint32_t u32EffAddr; 367 uint64_t uInfo; 368 369 /* Handle the disp32 form with no registers first. */ 370 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) 371 { 372 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr); 373 uInfo = u32EffAddr; 374 } 375 else 376 { 377 /* Get the register (or SIB) value. */ 378 uInfo = 0; 379 switch ((bRm & X86_MODRM_RM_MASK)) 380 { 381 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break; 382 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break; 383 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break; 384 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break; 385 case 4: /* SIB */ 386 { 387 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib); 388 uInfo = (uint64_t)bSib << 32; 389 390 /* Get the index and scale it. */ 391 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) 392 { 393 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break; 394 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break; 395 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break; 396 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break; 397 case 4: u32EffAddr = 0; /*none */ break; 398 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break; 399 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break; 400 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break; 401 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 402 } 403 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 404 405 /* add base */ 406 switch (bSib & X86_SIB_BASE_MASK) 407 { 408 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break; 409 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break; 410 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break; 411 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break; 412 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break; 413 case 5: 414 if ((bRm & X86_MODRM_MOD_MASK) != 0) 415 { 416 u32EffAddr += pVCpu->cpum.GstCtx.ebp; 417 SET_SS_DEF(); 418 } 419 else 420 { 421 uint32_t u32Disp; 422 IEM_OPCODE_GET_NEXT_U32(&u32Disp); 423 u32EffAddr += u32Disp; 424 uInfo |= u32Disp; 425 } 426 break; 427 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break; 428 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break; 429 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 430 } 431 break; 432 } 433 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break; 434 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break; 435 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break; 436 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 437 } 438 439 /* Get and add the displacement. */ 440 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 441 { 442 case 0: 443 break; 444 case 1: 445 { 446 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp); 447 u32EffAddr += i8Disp; 448 uInfo |= (uint32_t)(int32_t)i8Disp; 449 break; 450 } 451 case 2: 452 { 453 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp); 454 u32EffAddr += u32Disp; 455 uInfo |= u32Disp; 456 break; 457 } 458 default: 459 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */ 460 } 461 } 462 463 *puInfo = uInfo; 464 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32 uInfo=%#RX64\n", u32EffAddr, uInfo)); 465 return u32EffAddr; 466 } 467 468 uint64_t u64EffAddr; 469 uint64_t uInfo; 470 471 /* Handle the rip+disp32 form with no registers first. */ 472 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) 473 { 474 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr); 475 uInfo = (uint32_t)u64EffAddr; 476 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff)); 477 } 478 else 479 { 480 /* Get the register (or SIB) value. */ 481 uInfo = 0; 482 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB) 483 { 484 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 485 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; 486 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break; 487 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break; 488 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break; 489 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break; 490 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break; 491 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break; 492 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break; 493 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break; 494 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break; 495 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break; 496 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break; 497 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break; 498 /* SIB */ 499 case 4: 500 case 12: 501 { 502 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib); 503 uInfo = (uint64_t)bSib << 32; 504 505 /* Get the index and scale it. */ 506 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex) 507 { 508 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 509 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; 510 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break; 511 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break; 512 case 4: u64EffAddr = 0; /*none */ break; 513 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break; 514 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break; 515 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break; 516 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break; 517 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break; 518 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break; 519 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break; 520 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break; 521 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break; 522 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break; 523 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break; 524 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 525 } 526 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 527 528 /* add base */ 529 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB) 530 { 531 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break; 532 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break; 533 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break; 534 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break; 535 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break; 536 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break; 537 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break; 538 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break; 539 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break; 540 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break; 541 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break; 542 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break; 543 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break; 544 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break; 545 /* complicated encodings */ 546 case 5: 547 case 13: 548 if ((bRm & X86_MODRM_MOD_MASK) != 0) 549 { 550 if (!pVCpu->iem.s.uRexB) 551 { 552 u64EffAddr += pVCpu->cpum.GstCtx.rbp; 553 SET_SS_DEF(); 554 } 555 else 556 u64EffAddr += pVCpu->cpum.GstCtx.r13; 557 } 558 else 559 { 560 uint32_t u32Disp; 561 IEM_OPCODE_GET_NEXT_U32(&u32Disp); 562 u64EffAddr += (int32_t)u32Disp; 563 uInfo |= u32Disp; 564 } 565 break; 566 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 567 } 568 break; 569 } 570 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 571 } 572 573 /* Get and add the displacement. */ 574 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 575 { 576 case 0: 577 break; 578 case 1: 579 { 580 int8_t i8Disp; 581 IEM_OPCODE_GET_NEXT_S8(&i8Disp); 582 u64EffAddr += i8Disp; 583 uInfo |= (uint32_t)(int32_t)i8Disp; 584 break; 585 } 586 case 2: 587 { 588 uint32_t u32Disp; 589 IEM_OPCODE_GET_NEXT_U32(&u32Disp); 590 u64EffAddr += (int32_t)u32Disp; 591 uInfo |= u32Disp; 592 break; 593 } 594 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */ 595 } 596 597 } 598 599 *puInfo = uInfo; 600 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT) 601 { 602 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv uInfo=%#RX64\n", u64EffAddr, uInfo)); 603 return u64EffAddr; 604 } 605 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT); 606 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv uInfo=%#RX64\n", u64EffAddr & UINT32_MAX, uInfo)); 607 return u64EffAddr & UINT32_MAX; 608 } 609 312 610 313 611 /* … … 351 649 if (pTb) 352 650 { 353 pTb->Thrd.paCalls = (PIEMTHRDEDCALLENTRY)RTMemAlloc(sizeof(IEMTHRDEDCALLENTRY) * 128); 651 unsigned const cCalls = 128; 652 pTb->Thrd.paCalls = (PIEMTHRDEDCALLENTRY)RTMemAlloc(sizeof(IEMTHRDEDCALLENTRY) * cCalls); 354 653 if (pTb->Thrd.paCalls) 355 654 { 356 pTb->Thrd.cAllocated = 128; 357 pTb->Thrd.cCalls = 0; 358 pTb->pNext = NULL; 359 RTListInit(&pTb->LocalList); 360 pTb->cbPC = 0; 361 pTb->GCPhysPc = GCPhysPc; 362 pTb->x86.uCsBase = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base; 363 pTb->x86.uCsLimit = (uint32_t)pVCpu->cpum.GstCtx.cs.u32Limit; 364 pTb->x86.CS = (uint32_t)pVCpu->cpum.GstCtx.cs.Sel; 365 pTb->x86.fAttr = (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u; 366 pTb->fFlags = (pVCpu->iem.s.fExec & IEMTB_F_IEM_F_MASK) | fExtraFlags; 367 pVCpu->iem.s.cTbAllocs++; 368 return pTb; 655 pTb->pabOpcodes = (uint8_t *)RTMemAlloc(cCalls * 16); /* This will be reallocated later. */ 656 if (pTb->pabOpcodes) 657 { 658 pTb->Thrd.cAllocated = cCalls; 659 pTb->cbOpcodesAllocated = cCalls * 16; 660 pTb->Thrd.cCalls = 0; 661 pTb->cbOpcodes = 0; 662 pTb->pNext = NULL; 663 RTListInit(&pTb->LocalList); 664 pTb->GCPhysPc = GCPhysPc; 665 pTb->x86.fAttr = (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u; 666 pTb->fFlags = (pVCpu->iem.s.fExec & IEMTB_F_IEM_F_MASK) | fExtraFlags; 667 pVCpu->iem.s.cTbAllocs++; 668 return pTb; 669 } 670 RTMemFree(pTb->Thrd.paCalls); 369 671 } 370 672 RTMemFree(pTb); … … 388 690 AssertPtr(pTb); 389 691 390 AssertCompile( (IEMTB_F_STATE_OBSOLETE >> IEMTB_F_STATE_SHIFT) == (IEMTB_F_STATE_MASK >> IEMTB_F_STATE_SHIFT));692 AssertCompile(IEMTB_F_STATE_OBSOLETE == IEMTB_F_STATE_MASK); 391 693 pTb->fFlags |= IEMTB_F_STATE_OBSOLETE; /* works, both bits set */ 392 694 695 /* Unlink it from the hash table: */ 696 uint32_t const idxHash = IEMTBCACHE_HASH(&g_TbCache, pTb->fFlags, pTb->GCPhysPc); 697 PIEMTB pTbCur = g_TbCache.apHash[idxHash]; 698 if (pTbCur == pTb) 699 g_TbCache.apHash[idxHash] = pTb->pNext; 700 else 701 while (pTbCur) 702 { 703 PIEMTB const pNextTb = pTbCur->pNext; 704 if (pNextTb == pTb) 705 { 706 pTbCur->pNext = pTb->pNext; 707 break; 708 } 709 pTbCur = pNextTb; 710 } 711 712 /* Free it. */ 393 713 RTMemFree(pTb->Thrd.paCalls); 394 714 pTb->Thrd.paCalls = NULL; 715 716 RTMemFree(pTb->pabOpcodes); 717 pTb->pabOpcodes = NULL; 395 718 396 719 RTMemFree(pTb); … … 415 738 #ifdef VBOX_WITH_STATISTICS 416 739 pVCpu->iem.s.cTbLookupHits++; 740 #endif 417 741 return pTb; 418 #endif419 742 } 420 743 Log11(("TB miss: CS: %#x, wanted %#x\n", pTb->x86.fAttr, (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u)); … … 439 762 pTb->pNext = g_TbCache.apHash[idxHash]; 440 763 g_TbCache.apHash[idxHash] = pTb; 441 Log12(("TB added: %p %RGp LB %#x fl=%#x idxHash=%#x\n", pTb, pTb->GCPhysPc, pTb->cb PC, pTb->fFlags, idxHash));764 Log12(("TB added: %p %RGp LB %#x fl=%#x idxHash=%#x\n", pTb, pTb->GCPhysPc, pTb->cbOpcodes, pTb->fFlags, idxHash)); 442 765 RT_NOREF(pVM, pVCpu); 443 766 } … … 588 911 pVCpu->iem.s.offInstrNextByte = 0; 589 912 pVCpu->iem.s.offCurInstrStart = 0; 913 #ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 914 pVCpu->iem.s.offOpcode = 0; 915 #endif 590 916 #ifdef VBOX_STRICT 591 917 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS; … … 641 967 #endif 642 968 } 969 #ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 970 pVCpu->iem.s.offOpcode = 0; 971 #endif 643 972 } 644 973 … … 698 1027 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b); 699 1028 uint16_t const cCallsPrev = pTb->Thrd.cCalls; 1029 700 1030 rcStrict = FNIEMOP_CALL(g_apfnIemThreadedRecompilerOneByteMap[b]); 701 1031 if ( rcStrict == VINF_SUCCESS … … 704 1034 Assert(pTb->Thrd.cCalls > cCallsPrev); 705 1035 Assert(cCallsPrev - pTb->Thrd.cCalls < 5); 1036 1037 memcpy(&pTb->pabOpcodes[pTb->cbOpcodes], pVCpu->iem.s.abOpcode, pVCpu->iem.s.offOpcode); 1038 pTb->cbOpcodes += pVCpu->iem.s.offOpcode; 1039 Assert(pTb->cbOpcodes <= pTb->cbOpcodesAllocated); 706 1040 } 707 1041 else if (pTb->Thrd.cCalls > 0) 708 1042 { 709 1043 Log8(("%04x:%08RX64: End TB - %u calls, rc=%d\n", uCsLog, uRipLog, pTb->Thrd.cCalls, VBOXSTRICTRC_VAL(rcStrict))); 1044 1045 if (cCallsPrev != pTb->Thrd.cCalls) 1046 { 1047 memcpy(&pTb->pabOpcodes[pTb->cbOpcodes], pVCpu->iem.s.abOpcode, pVCpu->iem.s.offOpcode); 1048 pTb->cbOpcodes += pVCpu->iem.s.offOpcode; 1049 Assert(pTb->cbOpcodes <= pTb->cbOpcodesAllocated); 1050 } 710 1051 break; 711 1052 } … … 755 1096 static VBOXSTRICTRC iemThreadedTbExec(PVMCPUCC pVCpu, PIEMTB pTb) 756 1097 { 1098 if (memcmp(pTb->pabOpcodes, &pVCpu->iem.s.pbInstrBuf[pVCpu->iem.s.offInstrNextByte], 1099 RT_MIN(pTb->cbOpcodes, pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte)) == 0) 1100 { /* likely */ } 1101 else 1102 { 1103 Log11(("TB obsolete: %p GCPhys=%RGp\n", pTb, pTb->GCPhysPc)); 1104 iemThreadedTbFree(pVCpu->pVMR3, pVCpu, pTb); 1105 return VINF_SUCCESS; 1106 } 1107 757 1108 /* Set the current TB so CIMPL function may get at it. */ 758 1109 pVCpu->iem.s.pCurTbR3 = pTb;
Note:
See TracChangeset
for help on using the changeset viewer.