Changeset 101640 in vbox for trunk/src/VBox/VMM/include
- Timestamp:
- Oct 28, 2023 1:01:28 AM (15 months ago)
- Location:
- trunk/src/VBox/VMM/include
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/include/IEMInternal.h
r101547 r101640 929 929 { 930 930 /* kIemTbDbgEntryType_ThreadedCall. */ 931 uint32_t uType : 4; 932 uint32_t uUnused : 12; 931 uint32_t uType : 4; 932 /** Set if the call was recompiled to native code, clear if just calling 933 * threaded function. */ 934 uint32_t fRecompiled : 1; 935 uint32_t uUnused : 11; 933 936 /** The threaded call number (IEMTHREADEDFUNCS). */ 934 uint32_t enmCall : 16;937 uint32_t enmCall : 16; 935 938 } ThreadedCall; 936 939 -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r101626 r101640 263 263 { 264 264 kIemNativeLabelType_Invalid = 0, 265 /* Labels w/o data, only once instance per TB: */ 265 266 kIemNativeLabelType_Return, 266 267 kIemNativeLabelType_ReturnBreak, 268 kIemNativeLabelType_NonZeroRetOrPassUp, 269 kIemNativeLabelType_RaiseGp0, 270 /* Labels with data, potentially multiple instances per TB: */ 267 271 kIemNativeLabelType_If, 268 272 kIemNativeLabelType_Else, 269 273 kIemNativeLabelType_Endif, 270 kIemNativeLabelType_NonZeroRetOrPassUp, 271 kIemNativeLabelType_RaiseGp0, 274 kIemNativeLabelType_CheckIrq, 272 275 kIemNativeLabelType_End 273 276 } IEMNATIVELABELTYPE; … … 338 341 kIemNativeGstReg_End 339 342 } IEMNATIVEGSTREG; 343 344 /** 345 * Intended use statement for iemNativeRegAllocTmpForGuestReg(). 346 */ 347 typedef enum IEMNATIVEGSTREGUSE 348 { 349 /** The usage is read-only, the register holding the guest register 350 * shadow copy will not be modified by the caller. */ 351 kIemNativeGstRegUse_ReadOnly = 0, 352 /** The caller will update the guest register (think: PC += cbInstr). 353 * The guest shadow copy will follow the returned register. */ 354 kIemNativeGstRegUse_ForUpdate, 355 /** The caller will use the guest register value as input in a calculation 356 * and the host register will be modified. 357 * This means that the returned host register will not be marked as a shadow 358 * copy of the guest register. */ 359 kIemNativeGstRegUse_Calculation 360 } IEMNATIVEGSTREGUSE; 340 361 341 362 /** … … 591 612 /** Condition sequence number (for generating unique labels). */ 592 613 uint16_t uCondSeqNo; 593 uint32_t uPadding3; 614 /** Check IRQ seqeunce number (for generating unique lables). */ 615 uint16_t uCheckIrqSeqNo; 616 uint16_t uPadding3; 594 617 595 618 /** Core state requiring care with branches. */ … … 624 647 #define IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(a_Name) FNIEMNATIVERECOMPFUNC a_Name 625 648 626 627 649 DECLHIDDEN(uint32_t) iemNativeLabelCreate(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, 628 650 uint32_t offWhere = UINT32_MAX, uint16_t uData = 0) RT_NOEXCEPT; … … 637 659 DECLHIDDEN(uint8_t) iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm, 638 660 bool fPreferVolatile = true) RT_NOEXCEPT; 639 DECLHIDDEN(uint8_t) iemNativeRegAllocTmpForGuest(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 640 IEMNATIVEGSTREG enmGstReg) RT_NOEXCEPT; 661 DECLHIDDEN(uint8_t) iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 662 IEMNATIVEGSTREG enmGstReg, 663 IEMNATIVEGSTREGUSE enmIntendedUse) RT_NOEXCEPT; 664 DECLHIDDEN(uint8_t) iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 665 IEMNATIVEGSTREG enmGstReg) RT_NOEXCEPT; 666 641 667 DECLHIDDEN(uint8_t) iemNativeRegAllocVar(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint8_t idxVar) RT_NOEXCEPT; 642 668 DECLHIDDEN(uint32_t) iemNativeRegAllocArgs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs) RT_NOEXCEPT; … … 970 996 pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R; 971 997 pbCodeBuf[off++] = 0x8b; 972 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off,iGpr, offVCpu);998 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off,iGpr, offVCpu); 973 999 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 974 1000 … … 1263 1289 * Emits a load effective address to a GRP with an BP relative source address. 1264 1290 */ 1265 DECLINLINE(uint32_t) iemNativeEmitLeaG rpByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp)1291 DECLINLINE(uint32_t) iemNativeEmitLeaGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp) 1266 1292 { 1267 1293 /* lea gprdst, [rbp + offDisp] */ … … 1374 1400 1375 1401 1402 #ifdef RT_ARCH_AMD64 1403 /** 1404 * Common bit of iemNativeEmitLoadGprByGpr and friends. 1405 */ 1406 DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByGprDisp(uint8_t *pbCodeBuf, uint32_t off, 1407 uint8_t iGprReg, uint8_t iGprBase, int32_t offDisp) 1408 { 1409 if (offDisp == 0 && (iGprBase & 7) != X86_GREG_xBP) /* Can use encoding w/o displacement field. */ 1410 { 1411 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM0, iGprReg & 7, iGprBase & 7); 1412 if ((iGprBase & 7) == X86_GREG_xSP) /* for RSP/R12 relative addressing we have to use a SIB byte. */ 1413 pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_xSP, X86_GREG_xSP, 0); /* -> [RSP/R12] */ 1414 } 1415 else if (offDisp == (int8_t)offDisp) 1416 { 1417 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGprReg & 7, iGprBase & 7); 1418 if ((iGprBase & 7) == X86_GREG_xSP) /* for RSP/R12 relative addressing we have to use a SIB byte. */ 1419 pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_xSP, X86_GREG_xSP, 0); /* -> [RSP/R12] */ 1420 pbCodeBuf[off++] = (uint8_t)offDisp; 1421 } 1422 else 1423 { 1424 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGprReg & 7, iGprBase & 7); 1425 if ((iGprBase & 7) == X86_GREG_xSP) /* for RSP/R12 relative addressing we have to use a SIB byte. */ 1426 pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_xSP, X86_GREG_xSP, 0); /* -> [RSP/R12] */ 1427 pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp); 1428 pbCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp); 1429 pbCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp); 1430 pbCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp); 1431 } 1432 return off; 1433 } 1434 #elif RT_ARCH_ARM64 1435 /** 1436 * Common bit of iemNativeEmitLoadGprFromVCpuU64 and friends. 1437 */ 1438 DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByGprLdSt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprReg, 1439 uint8_t iGprBase, int32_t offDisp, 1440 ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData) 1441 { 1442 /* 1443 * There are a couple of ldr variants that takes an immediate offset, so 1444 * try use those if we can, otherwise we have to use the temporary register 1445 * help with the addressing. 1446 */ 1447 if ((uint32_t)offDisp < _4K * cbData && !((uint32_t)offDisp & (cbData - 1))) 1448 { 1449 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */ 1450 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1451 AssertReturn(pu32CodeBuf, UINT32_MAX); 1452 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGprReg, iGprBase, (uint32_t)offDisp / cbData); 1453 } 1454 else 1455 { 1456 /* The offset is too large, so we must load it into a register and use 1457 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. */ 1458 /** @todo reduce by offVCpu by >> 3 or >> 2? if it saves instructions? */ 1459 uint8_t const idxTmpReg = iemNativeRegAllocTmpImm(pReNative, off, (uint64)offDisp); 1460 AssertReturn(idxTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX); 1461 1462 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1463 AssertReturn(pu32CodeBuf, UINT32_MAX); 1464 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(enmOperation, iGprReg, iGprBase, idxTmpReg); 1465 1466 iemNativeRegFreeTmpImm(pReNative, idxTmpReg); 1467 } 1468 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1469 return off; 1470 } 1471 #endif 1472 1473 1474 /** 1475 * Emits a 64-bit GPR load via a GPR base address with a displacement. 1476 */ 1477 DECLINLINE(uint32_t) iemNativeEmitLoadGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1478 uint8_t iGprDst, uint8_t iGprBase, int32_t offDisp) 1479 { 1480 #ifdef RT_ARCH_AMD64 1481 /* mov reg64, mem64 */ 1482 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1483 AssertReturn(pbCodeBuf, UINT32_MAX); 1484 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B); 1485 pbCodeBuf[off++] = 0x8b; 1486 off = iemNativeEmitGprByGprDisp(pbCodeBuf, off, iGprDst, iGprBase, offDisp); 1487 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1488 1489 #elif RT_ARCH_ARM64 1490 off = iemNativeEmitGprByGprLdSt(pReNative, off, iGprDst, offDisp, kArmv8A64InstrLdStType_Ld_Dword, sizeof(uint64_t)); 1491 1492 #else 1493 # error "port me" 1494 #endif 1495 return off; 1496 } 1497 1498 1499 /** 1500 * Emits a 32-bit GPR load via a GPR base address with a displacement. 1501 * @note Bits 63 thru 32 in @a iGprDst will be cleared. 1502 */ 1503 DECLINLINE(uint32_t) iemNativeEmitLoadGpr32ByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1504 uint8_t iGprDst, uint8_t iGprBase, int32_t offDisp) 1505 { 1506 #ifdef RT_ARCH_AMD64 1507 /* mov reg32, mem32 */ 1508 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1509 AssertReturn(pbCodeBuf, UINT32_MAX); 1510 if (iGprDst >= 8 || iGprBase >= 8) 1511 pbCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B); 1512 pbCodeBuf[off++] = 0x8b; 1513 off = iemNativeEmitGprByGprDisp(pbCodeBuf, off, iGprDst, iGprBase, offDisp); 1514 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1515 1516 #elif RT_ARCH_ARM64 1517 off = iemNativeEmitGprByGprLdSt(pReNative, off, iGprDst, offDisp, kArmv8A64InstrLdStType_Ld_Word, sizeof(uint32_t)); 1518 1519 #else 1520 # error "port me" 1521 #endif 1522 return off; 1523 } 1524 1525 1376 1526 /********************************************************************************************************************************* 1377 1527 * Subtraction and Additions * … … 1683 1833 /** 1684 1834 * Emits code for AND'ing two 64-bit GPRs. 1685 */ 1686 DECLINLINE(uint32_t ) iemNativeEmitAndGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc) 1835 * 1836 * @note When fSetFlags=true, JZ/JNZ jumps can be used afterwards on both AMD64 1837 * and ARM64 hosts. 1838 */ 1839 DECLINLINE(uint32_t ) iemNativeEmitAndGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc, 1840 bool fSetFlags = false) 1687 1841 { 1688 1842 #if defined(RT_ARCH_AMD64) … … 1693 1847 pbCodeBuf[off++] = 0x23; 1694 1848 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7); 1849 RT_NOREF(fSetFlags); 1695 1850 1696 1851 #elif defined(RT_ARCH_ARM64) 1697 1852 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1698 1853 AssertReturn(pu32CodeBuf, UINT32_MAX); 1699 pu32CodeBuf[off++] = Armv8A64MkInstrAnd(iGprDst, iGprDst, iGprSrc); 1854 if (!fSetFlags) 1855 pu32CodeBuf[off++] = Armv8A64MkInstrAnd(iGprDst, iGprDst, iGprSrc); 1856 else 1857 pu32CodeBuf[off++] = Armv8A64MkInstrAnds(iGprDst, iGprDst, iGprSrc); 1700 1858 1701 1859 #else … … 1735 1893 1736 1894 /** 1737 * Emits code for AND'ing an 32-bit GPRs with a constant. 1738 */ 1739 DECLINLINE(uint32_t ) iemNativeEmitAndGpr32ByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint32_t uImm) 1895 * Emits code for AND'ing a 64-bit GPRs with a constant. 1896 * 1897 * @note When fSetFlags=true, JZ/JNZ jumps can be used afterwards on both AMD64 1898 * and ARM64 hosts. 1899 */ 1900 DECLINLINE(uint32_t ) iemNativeEmitAndGprByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint64_t uImm, 1901 bool fSetFlags = false) 1740 1902 { 1741 1903 #if defined(RT_ARCH_AMD64) 1742 /* and Ev, imm */ 1743 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1744 AssertReturn(pbCodeBuf, UINT32_MAX); 1745 if (iGprDst >= 8) 1746 pbCodeBuf[off++] = X86_OP_REX_R; 1747 if (uImm < 128) 1748 { 1904 if ((int64_t)uImm == (int8_t)uImm) 1905 { 1906 /* and Ev, imm8 */ 1907 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 1908 AssertReturn(pbCodeBuf, UINT32_MAX); 1909 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R); 1749 1910 pbCodeBuf[off++] = 0x83; 1750 1911 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7); 1751 1912 pbCodeBuf[off++] = (uint8_t)uImm; 1752 1913 } 1753 else 1754 { 1914 else if ((int64_t)uImm == (int32_t)uImm) 1915 { 1916 /* and Ev, imm32 */ 1917 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1918 AssertReturn(pbCodeBuf, UINT32_MAX); 1919 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R); 1755 1920 pbCodeBuf[off++] = 0x81; 1756 1921 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7); … … 1760 1925 pbCodeBuf[off++] = RT_BYTE4(uImm); 1761 1926 } 1927 else 1928 { 1929 /* Use temporary register for the 64-bit immediate. */ 1930 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm); 1931 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX); 1932 off = iemNativeEmitAndGprByGpr(pReNative, off, iGprDst, iTmpReg); 1933 iemNativeRegFreeTmpImm(pReNative, iTmpReg); 1934 } 1935 RT_NOREF(fSetFlags); 1762 1936 1763 1937 #elif defined(RT_ARCH_ARM64) … … 1768 1942 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1769 1943 AssertReturn(pu32CodeBuf, UINT32_MAX); 1770 pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/); 1771 } 1772 else 1773 { 1774 /* Use temporary register for the immediate. */ 1944 if (!fSetFlags) 1945 pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, uImmNandS, uImmR); 1946 else 1947 pu32CodeBuf[off++] = Armv8A64MkInstrAndsImm(iGprDst, iGprDst, uImmNandS, uImmR); 1948 } 1949 else 1950 { 1951 /* Use temporary register for the 64-bit immediate. */ 1775 1952 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm); 1776 1953 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX); 1777 1778 /* and gprdst, gprdst, tmpreg */ 1954 off = iemNativeEmitAndGprByGpr(pReNative, off, iGprDst, iTmpReg, fSetFlags); 1955 iemNativeRegFreeTmpImm(pReNative, iTmpReg); 1956 } 1957 1958 #else 1959 # error "Port me" 1960 #endif 1961 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1962 return off; 1963 } 1964 1965 1966 /** 1967 * Emits code for AND'ing an 32-bit GPRs with a constant. 1968 */ 1969 DECLINLINE(uint32_t ) iemNativeEmitAndGpr32ByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint32_t uImm, 1970 bool fSetFlags = false) 1971 { 1972 #if defined(RT_ARCH_AMD64) 1973 /* and Ev, imm */ 1974 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1975 AssertReturn(pbCodeBuf, UINT32_MAX); 1976 if (iGprDst >= 8) 1977 pbCodeBuf[off++] = X86_OP_REX_R; 1978 if ((int32_t)uImm == (int8_t)uImm) 1979 { 1980 pbCodeBuf[off++] = 0x83; 1981 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7); 1982 pbCodeBuf[off++] = (uint8_t)uImm; 1983 } 1984 else 1985 { 1986 pbCodeBuf[off++] = 0x81; 1987 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7); 1988 pbCodeBuf[off++] = RT_BYTE1(uImm); 1989 pbCodeBuf[off++] = RT_BYTE2(uImm); 1990 pbCodeBuf[off++] = RT_BYTE3(uImm); 1991 pbCodeBuf[off++] = RT_BYTE4(uImm); 1992 } 1993 RT_NOREF(fSetFlags); 1994 1995 #elif defined(RT_ARCH_ARM64) 1996 uint32_t uImmR = 0; 1997 uint32_t uImmNandS = 0; 1998 if (Armv8A64ConvertMaskToImmRImmS(uImm, &uImmNandS, &uImmR)) 1999 { 1779 2000 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1780 2001 AssertReturn(pu32CodeBuf, UINT32_MAX); 1781 pu32CodeBuf[off++] = Armv8A64MkInstrAnd(iGprDst, iGprDst, iTmpReg, false /*f64Bit*/); 1782 2002 if (!fSetFlags) 2003 pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/); 2004 else 2005 pu32CodeBuf[off++] = Armv8A64MkInstrAndsImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/); 2006 } 2007 else 2008 { 2009 /* Use temporary register for the 64-bit immediate. */ 2010 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm); 2011 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX); 2012 if (!fSetFlags) 2013 off = iemNativeEmitAndGpr32ByGpr32(pReNative, off, iGprDst, iTmpReg); 2014 else 2015 off = iemNativeEmitAndsGpr32ByGpr32(pReNative, off, iGprDst, iTmpReg); 1783 2016 iemNativeRegFreeTmpImm(pReNative, iTmpReg); 1784 2017 } … … 2724 2957 iemNativeRegFreeTmpImm(pReNative, iTmpReg); 2725 2958 } 2726 else 2959 else if (fBits <= UINT32_MAX) 2727 2960 { 2728 2961 /* test Eb, imm8 or test Ev, imm32 */ … … 2747 2980 } 2748 2981 } 2982 /** @todo implement me. */ 2983 else 2984 AssertFailedReturn(UINT32_MAX); 2749 2985 2750 2986 #elif defined(RT_ARCH_ARM64)
Note:
See TracChangeset
for help on using the changeset viewer.