Changeset 76903 in vbox for trunk/src/VBox/ValidationKit/utils
- Timestamp:
- Jan 20, 2019 5:14:01 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/ValidationKit/utils/fs/FsPerf.cpp
r76899 r76903 40 40 #include <iprt/mem.h> 41 41 #include <iprt/message.h> 42 #include <iprt/param.h> 42 43 #include <iprt/path.h> 43 44 #include <iprt/process.h> … … 49 50 #include <iprt/thread.h> 50 51 #include <iprt/zero.h> 52 53 #ifdef RT_OS_WINDOWS 54 # include <iprt/nt/nt-and-windows.h> 55 #endif 51 56 52 57 … … 1384 1389 1385 1390 1391 /** 1392 * Checks the content read from the file fsPerfIoPrepFile() prepared. 1393 */ 1394 bool fsPrefCheckReadBuf(unsigned uLineNo, uint64_t off, uint8_t const *pbBuf, size_t cbBuf, uint8_t bFiller = 0xf6) 1395 { 1396 uint32_t cMismatches = 0; 1397 size_t offBuf = 0; 1398 uint32_t offBlock = (uint32_t)(off & (_1K - 1)); 1399 while (offBuf < cbBuf) 1400 { 1401 /* 1402 * Check the offset marker: 1403 */ 1404 if (offBlock < sizeof(uint64_t)) 1405 { 1406 RTUINT64U uMarker; 1407 uMarker.u = off + offBuf - offBlock; 1408 unsigned offMarker = offBlock & (sizeof(uint64_t) - 1); 1409 while (offMarker < sizeof(uint64_t) && offBuf < cbBuf) 1410 { 1411 if (uMarker.au8[offMarker] != pbBuf[offBuf]) 1412 { 1413 RTTestIFailed("%u: Mismatch at buffer/file offset %#zx/%#RX64: %#x, expected %#x", 1414 uLineNo, offBuf, off + offBuf, pbBuf[offBuf], uMarker.au8[offMarker]); 1415 if (cMismatches++ > 32) 1416 return false; 1417 } 1418 offMarker++; 1419 offBuf++; 1420 } 1421 offBlock = sizeof(uint64_t); 1422 } 1423 1424 /* 1425 * Check the filling: 1426 */ 1427 size_t cbFilling = RT_MIN(_1K - offBlock, cbBuf - offBuf); 1428 if ( cbFilling == 0 1429 || ASMMemIsAllU8(&pbBuf[offBuf], cbFilling, bFiller)) 1430 offBuf += cbFilling; 1431 else 1432 { 1433 /* Some mismatch, locate it/them: */ 1434 while (cbFilling > 0 && offBuf < cbBuf) 1435 { 1436 if (pbBuf[offBuf] != bFiller) 1437 { 1438 RTTestIFailed("%u: Mismatch at buffer/file offset %#zx/%#RX64: %#x, expected %#04x", 1439 uLineNo, offBuf, off + offBuf, pbBuf[offBuf], bFiller); 1440 if (cMismatches++ > 32) 1441 return false; 1442 } 1443 offBuf++; 1444 cbFilling--; 1445 } 1446 } 1447 offBlock = 0; 1448 } 1449 return cMismatches == 0; 1450 } 1451 1452 1453 /** 1454 * Sets up write buffer with offset markers and fillers. 1455 */ 1456 void fsPrefFillWriteBuf(uint64_t off, uint8_t *pbBuf, size_t cbBuf, uint8_t bFiller = 0xf6) 1457 { 1458 uint32_t offBlock = (uint32_t)(off & (_1K - 1)); 1459 while (cbBuf > 0) 1460 { 1461 /* The marker. */ 1462 if (offBlock < sizeof(uint64_t)) 1463 { 1464 RTUINT64U uMarker; 1465 uMarker.u = off + offBlock; 1466 if (cbBuf > sizeof(uMarker) - offBlock) 1467 { 1468 memcpy(pbBuf, &uMarker.au8[offBlock], sizeof(uMarker) - offBlock); 1469 pbBuf += sizeof(uMarker) - offBlock; 1470 cbBuf -= sizeof(uMarker) - offBlock; 1471 off += sizeof(uMarker) - offBlock; 1472 } 1473 else 1474 { 1475 memcpy(pbBuf, &uMarker.au8[offBlock], cbBuf); 1476 return; 1477 } 1478 offBlock = sizeof(uint64_t); 1479 } 1480 1481 /* Do the filling. */ 1482 size_t cbFilling = RT_MIN(_1K - offBlock, cbBuf); 1483 memset(pbBuf, bFiller, cbFilling); 1484 pbBuf += cbFilling; 1485 cbBuf -= cbFilling; 1486 off += cbFilling; 1487 1488 offBlock = 0; 1489 } 1490 } 1491 1492 1493 1386 1494 void fsPerfIoSeek(RTFILE hFile1, uint64_t cbFile) 1387 1495 { … … 1551 1659 1552 1660 1661 /** 1662 * One RTFileRead profiling iteration. 1663 */ 1553 1664 DECL_FORCE_INLINE(int) fsPerfIoReadWorker(RTFILE hFile1, uint64_t cbFile, uint32_t cbBlock, uint8_t *pbBlock, 1554 1665 uint64_t *poffActual, uint32_t *pcSeeks) … … 1579 1690 void fsPerfIoReadBlockSize(RTFILE hFile1, uint64_t cbFile, uint32_t cbBlock) 1580 1691 { 1581 RTTestISubF(" Sequential read %RU32", cbBlock);1692 RTTestISubF("IO - Sequential read %RU32", cbBlock); 1582 1693 1583 1694 uint8_t *pbBuf = (uint8_t *)RTMemPageAlloc(cbBlock); … … 1593 1704 1594 1705 1706 void fsPerfRead(RTFILE hFile1, RTFILE hFileNoCache, uint64_t cbFile) 1707 { 1708 RTTestISubF("IO - RTFileRead"); 1709 1710 /* 1711 * Allocate a big buffer we can play around with. Min size is 1MB. 1712 */ 1713 size_t cbBuf = cbFile < _64M ? (size_t)cbFile : _64M; 1714 uint8_t *pbBuf = (uint8_t *)RTMemPageAlloc(cbBuf); 1715 while (!pbBuf) 1716 { 1717 cbBuf /= 2; 1718 RTTESTI_CHECK_RETV(cbBuf >= _1M); 1719 pbBuf = (uint8_t *)RTMemPageAlloc(_32M); 1720 } 1721 1722 #if 1 1723 /* 1724 * Start at the beginning and read the full buffer in random small chunks, thereby 1725 * checking that unaligned buffer addresses, size and file offsets work fine. 1726 */ 1727 struct 1728 { 1729 uint64_t offFile; 1730 uint32_t cbMax; 1731 } aRuns[] = { { 0, 127 }, { cbFile - cbBuf, UINT32_MAX }, { 0, UINT32_MAX -1 }}; 1732 for (uint32_t i = 0; i < RT_ELEMENTS(aRuns); i++) 1733 { 1734 memset(pbBuf, 0x55, cbBuf); 1735 RTTESTI_CHECK_RC(RTFileSeek(hFile1, aRuns[i].offFile, RTFILE_SEEK_BEGIN, NULL), VINF_SUCCESS); 1736 for (size_t offBuf = 0; offBuf < cbBuf; ) 1737 { 1738 uint32_t const cbLeft = (uint32_t)(cbBuf - offBuf); 1739 uint32_t const cbToRead = aRuns[i].cbMax < UINT32_MAX / 2 ? RTRandU32Ex(1, aRuns[i].cbMax) 1740 : aRuns[i].cbMax == UINT32_MAX ? RTRandU32Ex(RT_MAX(cbLeft / 4, 1), cbLeft) 1741 : RTRandU32Ex(cbLeft >= _8K ? _8K : 1, RT_MIN(_1M, cbLeft)); 1742 size_t cbActual = 0; 1743 RTTESTI_CHECK_RC(RTFileRead(hFile1, &pbBuf[offBuf], cbToRead, &cbActual), VINF_SUCCESS); 1744 if (cbActual == cbToRead) 1745 offBuf += cbActual; 1746 else 1747 { 1748 RTTestIFailed("Attempting to read %#x bytes at %#zx, only got %#x bytes back!\n", cbToRead, offBuf, cbActual); 1749 if (cbActual) 1750 offBuf += cbActual; 1751 else 1752 pbBuf[offBuf++] = 0x11; 1753 } 1754 } 1755 fsPrefCheckReadBuf(__LINE__, aRuns[i].offFile, pbBuf, cbBuf); 1756 } 1757 #endif 1758 1759 /* 1760 * Test reading beyond the end of the file. 1761 */ 1762 size_t const acbMax[] = { cbBuf, _64K, _16K, _4K, 256 }; 1763 uint32_t const aoffFromEos[] = 1764 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 32, 63, 64, 127, 128, 255, 254, 256, 1023, 1024, 2048, 1765 4092, 4093, 4094, 4095, 4096, 4097, 4098, 4099, 4100, 8192, 16384, 32767, 32768, 32769, 65535, 65536, _1M - 1 1766 }; 1767 for (unsigned iMax = 0; iMax < RT_ELEMENTS(acbMax); iMax++) 1768 { 1769 size_t const cbMaxRead = acbMax[iMax]; 1770 for (uint32_t iOffFromEos = 0; iOffFromEos < RT_ELEMENTS(aoffFromEos); iOffFromEos++) 1771 { 1772 uint32_t off = aoffFromEos[iOffFromEos]; 1773 if (off >= cbMaxRead) 1774 continue; 1775 RTTESTI_CHECK_RC(RTFileSeek(hFile1, cbFile - off, RTFILE_SEEK_BEGIN, NULL), VINF_SUCCESS); 1776 size_t cbActual = ~(size_t)0; 1777 RTTESTI_CHECK_RC(RTFileRead(hFile1, pbBuf, cbMaxRead, &cbActual), VINF_SUCCESS); 1778 RTTESTI_CHECK(cbActual == off); 1779 1780 RTTESTI_CHECK_RC(RTFileSeek(hFile1, cbFile - off, RTFILE_SEEK_BEGIN, NULL), VINF_SUCCESS); 1781 cbActual = ~(size_t)0; 1782 RTTESTI_CHECK_RC(RTFileRead(hFile1, pbBuf, off, &cbActual), VINF_SUCCESS); 1783 RTTESTI_CHECK_MSG(cbActual == off, ("%#zx vs %#zx", cbActual, off)); 1784 1785 cbActual = ~(size_t)0; 1786 RTTESTI_CHECK_RC(RTFileRead(hFile1, pbBuf, 1, &cbActual), VINF_SUCCESS); 1787 RTTESTI_CHECK(cbActual == 0); 1788 1789 RTTESTI_CHECK_RC(RTFileRead(hFile1, pbBuf, cbMaxRead, NULL), VERR_EOF); 1790 } 1791 } 1792 1793 /* 1794 * Test reading beyond end of the file. 1795 */ 1796 for (unsigned iMax = 0; iMax < RT_ELEMENTS(acbMax); iMax++) 1797 { 1798 size_t const cbMaxRead = acbMax[iMax]; 1799 for (uint32_t off = 0; off < 256; off++) 1800 { 1801 RTTESTI_CHECK_RC(RTFileSeek(hFile1, cbFile + off, RTFILE_SEEK_BEGIN, NULL), VINF_SUCCESS); 1802 size_t cbActual = ~(size_t)0; 1803 RTTESTI_CHECK_RC(RTFileRead(hFile1, pbBuf, cbMaxRead, &cbActual), VINF_SUCCESS); 1804 RTTESTI_CHECK(cbActual == 0); 1805 1806 RTTESTI_CHECK_RC(RTFileRead(hFile1, pbBuf, cbMaxRead, NULL), VERR_EOF); 1807 } 1808 } 1809 1810 /* 1811 * Do uncached access, must be page aligned. 1812 */ 1813 uint32_t cbPage = PAGE_SIZE; 1814 memset(pbBuf, 0x66, cbBuf); 1815 RTTESTI_CHECK_RC(RTFileSeek(hFileNoCache, 0, RTFILE_SEEK_BEGIN, NULL), VINF_SUCCESS); 1816 for (size_t offBuf = 0; offBuf < cbBuf; ) 1817 { 1818 uint32_t const cPagesLeft = (uint32_t)((cbBuf - offBuf) / cbPage); 1819 uint32_t const cPagesToRead = RTRandU32Ex(1, cPagesLeft); 1820 size_t const cbToRead = cPagesToRead * (size_t)cbPage; 1821 size_t cbActual = 0; 1822 RTTESTI_CHECK_RC(RTFileRead(hFileNoCache, &pbBuf[offBuf], cbToRead, &cbActual), VINF_SUCCESS); 1823 if (cbActual == cbToRead) 1824 offBuf += cbActual; 1825 else 1826 { 1827 RTTestIFailed("Attempting to read %#zx bytes at %#zx, only got %#x bytes back!\n", cbToRead, offBuf, cbActual); 1828 if (cbActual) 1829 offBuf += cbActual; 1830 else 1831 { 1832 memset(&pbBuf[offBuf], 0x11, cbPage); 1833 offBuf += cbPage; 1834 } 1835 } 1836 } 1837 fsPrefCheckReadBuf(__LINE__, 0, pbBuf, cbBuf); 1838 1839 /* 1840 * Check reading zero bytes at the end of the file. 1841 * Requires native call because RTFileWrite doesn't call kernel on zero byte reads. 1842 */ 1843 RTTESTI_CHECK_RC(RTFileSeek(hFile1, 0, RTFILE_SEEK_END, NULL), VINF_SUCCESS); 1844 #ifdef RT_OS_WINDOWS 1845 IO_STATUS_BLOCK Ios = RTNT_IO_STATUS_BLOCK_INITIALIZER; 1846 NTSTATUS rcNt = NtReadFile((HANDLE)RTFileToNative(hFile1), NULL, NULL, NULL, &Ios, pbBuf, 0, NULL, NULL); 1847 RTTESTI_CHECK_MSG(rcNt == STATUS_SUCCESS, ("rcNt=%#x", rcNt)); 1848 RTTESTI_CHECK(Ios.Status == STATUS_SUCCESS); 1849 RTTESTI_CHECK(Ios.Information == 0); 1850 1851 RTNT_IO_STATUS_BLOCK_REINIT(&Ios); 1852 rcNt = NtReadFile((HANDLE)RTFileToNative(hFile1), NULL, NULL, NULL, &Ios, pbBuf, 1, NULL, NULL); 1853 RTTESTI_CHECK_MSG(rcNt == STATUS_END_OF_FILE, ("rcNt=%#x", rcNt)); 1854 RTTESTI_CHECK(Ios.Status == STATUS_END_OF_FILE); 1855 RTTESTI_CHECK(Ios.Information == 0); 1856 #endif 1857 1858 /* 1859 * Other OS specific stuff. 1860 */ 1861 #ifdef RT_OS_WINDOWS 1862 /* Check that reading at an offset modifies the position: */ 1863 RTTESTI_CHECK_RC(RTFileSeek(hFile1, 0, RTFILE_SEEK_END, NULL), VINF_SUCCESS); 1864 RTTESTI_CHECK(RTFileTell(hFile1) == cbFile); 1865 1866 RTNT_IO_STATUS_BLOCK_REINIT(&Ios); 1867 LARGE_INTEGER offNt; 1868 offNt.QuadPart = cbFile / 2; 1869 rcNt = NtReadFile((HANDLE)RTFileToNative(hFile1), NULL, NULL, NULL, &Ios, pbBuf, _4K, &offNt, NULL); 1870 RTTESTI_CHECK_MSG(rcNt == STATUS_SUCCESS, ("rcNt=%#x", rcNt)); 1871 RTTESTI_CHECK(Ios.Status == STATUS_SUCCESS); 1872 RTTESTI_CHECK(Ios.Information == _4K); 1873 RTTESTI_CHECK(RTFileTell(hFile1) == cbFile / 2 + _4K); 1874 fsPrefCheckReadBuf(__LINE__, cbFile / 2, pbBuf, _4K); 1875 #endif 1876 1877 RTMemPageFree(pbBuf, cbBuf); 1878 } 1879 1880 1881 /** 1882 * One RTFileWrite profiling iteration. 1883 */ 1595 1884 DECL_FORCE_INLINE(int) fsPerfIoWriteWorker(RTFILE hFile1, uint64_t cbFile, uint32_t cbBlock, uint8_t *pbBlock, 1596 1885 uint64_t *poffActual, uint32_t *pcSeeks) … … 1618 1907 } 1619 1908 1909 1620 1910 void fsPerfIoWriteBlockSize(RTFILE hFile1, uint64_t cbFile, uint32_t cbBlock) 1621 1911 { 1622 RTTestISubF(" Sequential write %RU32", cbBlock);1912 RTTestISubF("IO - Sequential write %RU32", cbBlock); 1623 1913 1624 1914 uint8_t *pbBuf = (uint8_t *)RTMemPageAlloc(cbBlock); … … 1631 1921 else 1632 1922 RTTestSkipped(g_hTest, "insufficient (virtual) memory available"); 1923 } 1924 1925 1926 void fsPerfWrite(RTFILE hFile1, RTFILE hFileNoCache, RTFILE hFileWriteThru, uint64_t cbFile) 1927 { 1928 RTTestISubF("IO - RTFileWrite"); 1929 1930 /* 1931 * Allocate a big buffer we can play around with. Min size is 1MB. 1932 */ 1933 size_t cbBuf = cbFile < _64M ? (size_t)cbFile : _64M; 1934 uint8_t *pbBuf = (uint8_t *)RTMemPageAlloc(cbBuf); 1935 while (!pbBuf) 1936 { 1937 cbBuf /= 2; 1938 RTTESTI_CHECK_RETV(cbBuf >= _1M); 1939 pbBuf = (uint8_t *)RTMemPageAlloc(_32M); 1940 } 1941 1942 /* 1943 * Start at the beginning and write out the full buffer in random small chunks, thereby 1944 * checking that unaligned buffer addresses, size and file offsets work fine. 1945 */ 1946 struct 1947 { 1948 uint64_t offFile; 1949 uint32_t cbMax; 1950 } aRuns[] = { { 0, 127 }, { cbFile - cbBuf, UINT32_MAX }, { 0, UINT32_MAX -1 }}; 1951 uint8_t bFiller = 0x88; 1952 for (uint32_t i = 0; i < RT_ELEMENTS(aRuns); i++, bFiller) 1953 { 1954 fsPrefFillWriteBuf(aRuns[i].offFile, pbBuf, cbBuf, bFiller); 1955 fsPrefCheckReadBuf(__LINE__, aRuns[i].offFile, pbBuf, cbBuf, bFiller); 1956 1957 RTTESTI_CHECK_RC(RTFileSeek(hFile1, aRuns[i].offFile, RTFILE_SEEK_BEGIN, NULL), VINF_SUCCESS); 1958 for (size_t offBuf = 0; offBuf < cbBuf; ) 1959 { 1960 uint32_t const cbLeft = (uint32_t)(cbBuf - offBuf); 1961 uint32_t const cbToWrite = aRuns[i].cbMax < UINT32_MAX / 2 ? RTRandU32Ex(1, aRuns[i].cbMax) 1962 : aRuns[i].cbMax == UINT32_MAX ? RTRandU32Ex(RT_MAX(cbLeft / 4, 1), cbLeft) 1963 : RTRandU32Ex(cbLeft >= _8K ? _8K : 1, RT_MIN(_1M, cbLeft)); 1964 size_t cbActual = 0; 1965 RTTESTI_CHECK_RC(RTFileWrite(hFile1, &pbBuf[offBuf], cbToWrite, &cbActual), VINF_SUCCESS); 1966 if (cbActual == cbToWrite) 1967 offBuf += cbActual; 1968 else 1969 { 1970 RTTestIFailed("Attempting to write %#x bytes at %#zx, only got %#x written!\n", cbToWrite, offBuf, cbActual); 1971 if (cbActual) 1972 offBuf += cbActual; 1973 else 1974 pbBuf[offBuf++] = 0x11; 1975 } 1976 } 1977 1978 RTTESTI_CHECK_RC(RTFileReadAt(hFile1, aRuns[i].offFile, pbBuf, cbBuf, NULL), VINF_SUCCESS); 1979 fsPrefCheckReadBuf(__LINE__, aRuns[i].offFile, pbBuf, cbBuf, bFiller); 1980 } 1981 1982 1983 /* 1984 * Do uncached and write-thru accesses, must be page aligned. 1985 */ 1986 RTFILE ahFiles[2] = { hFileWriteThru, hFileNoCache }; 1987 for (unsigned iFile = 0; iFile < RT_ELEMENTS(ahFiles); iFile++, bFiller++) 1988 { 1989 fsPrefFillWriteBuf(0, pbBuf, cbBuf, bFiller); 1990 fsPrefCheckReadBuf(__LINE__, 0, pbBuf, cbBuf, bFiller); 1991 RTTESTI_CHECK_RC(RTFileSeek(ahFiles[iFile], 0, RTFILE_SEEK_BEGIN, NULL), VINF_SUCCESS); 1992 1993 uint32_t cbPage = PAGE_SIZE; 1994 for (size_t offBuf = 0; offBuf < cbBuf; ) 1995 { 1996 uint32_t const cPagesLeft = (uint32_t)((cbBuf - offBuf) / cbPage); 1997 uint32_t const cPagesToWrite = RTRandU32Ex(1, cPagesLeft); 1998 size_t const cbToWrite = cPagesToWrite * (size_t)cbPage; 1999 size_t cbActual = 0; 2000 RTTESTI_CHECK_RC(RTFileWrite(ahFiles[iFile], &pbBuf[offBuf], cbToWrite, &cbActual), VINF_SUCCESS); 2001 if (cbActual == cbToWrite) 2002 { 2003 RTTESTI_CHECK_RC(RTFileReadAt(hFile1, offBuf, pbBuf, cbToWrite, NULL), VINF_SUCCESS); 2004 fsPrefCheckReadBuf(__LINE__, offBuf, pbBuf, cbToWrite, bFiller); 2005 offBuf += cbActual; 2006 } 2007 else 2008 { 2009 RTTestIFailed("Attempting to read %#zx bytes at %#zx, only got %#x written!\n", cbToWrite, offBuf, cbActual); 2010 if (cbActual) 2011 offBuf += cbActual; 2012 else 2013 { 2014 memset(&pbBuf[offBuf], 0x11, cbPage); 2015 offBuf += cbPage; 2016 } 2017 } 2018 } 2019 2020 RTTESTI_CHECK_RC(RTFileReadAt(ahFiles[iFile], 0, pbBuf, cbBuf, NULL), VINF_SUCCESS); 2021 fsPrefCheckReadBuf(__LINE__, 0, pbBuf, cbBuf, bFiller); 2022 } 2023 2024 /* 2025 * Check the behavior of writing zero bytes to the file _4K from the end 2026 * using native API. In the olden days zero sized write have been known 2027 * to be used to truncate a file. 2028 */ 2029 RTTESTI_CHECK_RC(RTFileSeek(hFile1, -_4K, RTFILE_SEEK_END, NULL), VINF_SUCCESS); 2030 #ifdef RT_OS_WINDOWS 2031 IO_STATUS_BLOCK Ios = RTNT_IO_STATUS_BLOCK_INITIALIZER; 2032 NTSTATUS rcNt = NtWriteFile((HANDLE)RTFileToNative(hFile1), NULL, NULL, NULL, &Ios, pbBuf, 0, NULL, NULL); 2033 RTTESTI_CHECK_MSG(rcNt == STATUS_SUCCESS, ("rcNt=%#x", rcNt)); 2034 RTTESTI_CHECK(Ios.Status == STATUS_SUCCESS); 2035 RTTESTI_CHECK(Ios.Information == 0); 2036 2037 RTTESTI_CHECK_RC(RTFileRead(hFile1, pbBuf, _4K, NULL), VINF_SUCCESS); 2038 fsPrefCheckReadBuf(__LINE__, cbFile - _4K, pbBuf, _4K, pbBuf[0x8]); 2039 #endif 2040 2041 /* 2042 * Other OS specific stuff. 2043 */ 2044 #ifdef RT_OS_WINDOWS 2045 /* Check that reading at an offset modifies the position: */ 2046 RTTESTI_CHECK_RC(RTFileReadAt(hFile1, cbFile / 2, pbBuf, _4K, NULL), VINF_SUCCESS); 2047 RTTESTI_CHECK_RC(RTFileSeek(hFile1, 0, RTFILE_SEEK_END, NULL), VINF_SUCCESS); 2048 RTTESTI_CHECK(RTFileTell(hFile1) == cbFile); 2049 2050 RTNT_IO_STATUS_BLOCK_REINIT(&Ios); 2051 LARGE_INTEGER offNt; 2052 offNt.QuadPart = cbFile / 2; 2053 rcNt = NtWriteFile((HANDLE)RTFileToNative(hFile1), NULL, NULL, NULL, &Ios, pbBuf, _4K, &offNt, NULL); 2054 RTTESTI_CHECK_MSG(rcNt == STATUS_SUCCESS, ("rcNt=%#x", rcNt)); 2055 RTTESTI_CHECK(Ios.Status == STATUS_SUCCESS); 2056 RTTESTI_CHECK(Ios.Information == _4K); 2057 RTTESTI_CHECK(RTFileTell(hFile1) == cbFile / 2 + _4K); 2058 #endif 2059 2060 RT_NOREF(hFileNoCache, hFileWriteThru); 2061 RTMemPageFree(pbBuf, cbBuf); 1633 2062 } 1634 2063 … … 1673 2102 RTTESTI_CHECK_RC_RETV(RTFileOpen(&hFile1, InDir(RT_STR_TUPLE("file21")), 1674 2103 RTFILE_O_CREATE_REPLACE | RTFILE_O_DENY_NONE | RTFILE_O_READWRITE), VINF_SUCCESS); 2104 RTFILE hFileNoCache; 2105 RTTESTI_CHECK_RC_RETV(RTFileOpen(&hFileNoCache, g_szDir, 2106 RTFILE_O_OPEN | RTFILE_O_DENY_NONE | RTFILE_O_READWRITE | RTFILE_O_NO_CACHE), 2107 VINF_SUCCESS); 2108 RTFILE hFileWriteThru; 2109 RTTESTI_CHECK_RC_RETV(RTFileOpen(&hFileWriteThru, g_szDir, 2110 RTFILE_O_OPEN | RTFILE_O_DENY_NONE | RTFILE_O_READWRITE | RTFILE_O_WRITE_THROUGH), 2111 VINF_SUCCESS); 2112 1675 2113 uint8_t *pbFree = NULL; 1676 2114 int rc = fsPerfIoPrepFile(hFile1, cbFile, &pbFree); … … 1685 2123 if (g_fRead) 1686 2124 { 2125 fsPerfRead(hFile1, hFileNoCache, cbFile); 1687 2126 for (unsigned i = 0; i < g_cIoBlocks; i++) 1688 2127 fsPerfIoReadBlockSize(hFile1, cbFile, g_acbIoBlocks[i]); 1689 2128 } 2129 /** @todo mmap */ 2130 1690 2131 if (g_fWrite) 1691 2132 { 2133 /* This is destructive to the file content. */ 2134 fsPerfWrite(hFile1, hFileNoCache, hFileWriteThru, cbFile); 1692 2135 for (unsigned i = 0; i < g_cIoBlocks; i++) 1693 2136 fsPerfIoWriteBlockSize(hFile1, cbFile, g_acbIoBlocks[i]); 1694 2137 } 2138 1695 2139 } 1696 2140 1697 2141 RTTESTI_CHECK_RC(RTFileSetSize(hFile1, 0), VINF_SUCCESS); 1698 2142 RTTESTI_CHECK_RC(RTFileClose(hFile1), VINF_SUCCESS); 2143 RTTESTI_CHECK_RC(RTFileClose(hFileNoCache), VINF_SUCCESS); 2144 RTTESTI_CHECK_RC(RTFileClose(hFileWriteThru), VINF_SUCCESS); 1699 2145 RTTESTI_CHECK_RC(RTFileDelete(g_szDir), VINF_SUCCESS); 1700 2146 }
Note:
See TracChangeset
for help on using the changeset viewer.