packet/serialize.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867
// Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Serialization.
use std::cmp;
use std::convert::Infallible as Never;
use std::fmt::{self, Debug, Formatter};
use std::ops::{Range, RangeBounds};
use arrayvec::ArrayVec;
use zerocopy::SplitByteSlice;
use crate::{
canonicalize_range, take_back, take_back_mut, take_front, take_front_mut,
AsFragmentedByteSlice, Buffer, BufferView, BufferViewMut, ContiguousBuffer, EmptyBuf,
FragmentedBuffer, FragmentedBufferMut, FragmentedBytes, FragmentedBytesMut, GrowBuffer,
GrowBufferMut, ParsablePacket, ParseBuffer, ParseBufferMut, ReusableBuffer, ShrinkBuffer,
};
/// Either of two buffers.
///
/// An `Either` wraps one of two different buffer types. It implements all of
/// the relevant traits by calling the corresponding methods on the wrapped
/// buffer.
#[derive(Copy, Clone, Debug)]
pub enum Either<A, B> {
A(A),
B(B),
}
impl<A, B> Either<A, B> {
/// Maps the `A` variant of an `Either`.
///
/// Given an `Either<A, B>` and a function from `A` to `AA`, `map_a`
/// produces an `Either<AA, B>` by applying the function to the `A` variant
/// or passing on the `B` variant unmodified.
pub fn map_a<AA, F: FnOnce(A) -> AA>(self, f: F) -> Either<AA, B> {
match self {
Either::A(a) => Either::A(f(a)),
Either::B(b) => Either::B(b),
}
}
/// Maps the `B` variant of an `Either`.
///
/// Given an `Either<A, B>` and a function from `B` to `BB`, `map_b`
/// produces an `Either<A, BB>` by applying the function to the `B` variant
/// or passing on the `A` variant unmodified.
pub fn map_b<BB, F: FnOnce(B) -> BB>(self, f: F) -> Either<A, BB> {
match self {
Either::A(a) => Either::A(a),
Either::B(b) => Either::B(f(b)),
}
}
/// Returns the `A` variant in an `Either<A, B>`.
///
/// # Panics
///
/// Panics if this `Either<A, B>` does not hold the `A` variant.
pub fn unwrap_a(self) -> A {
match self {
Either::A(x) => x,
Either::B(_) => panic!("This `Either<A, B>` does not hold the `A` variant"),
}
}
/// Returns the `B` variant in an `Either<A, B>`.
///
/// # Panics
///
/// Panics if this `Either<A, B>` does not hold the `B` variant.
pub fn unwrap_b(self) -> B {
match self {
Either::A(_) => panic!("This `Either<A, B>` does not hold the `B` variant"),
Either::B(x) => x,
}
}
}
impl<A> Either<A, A> {
/// Returns the inner value held by this `Either` when both possible values
/// `Either::A` and `Either::B` contain the same inner types.
pub fn into_inner(self) -> A {
match self {
Either::A(x) => x,
Either::B(x) => x,
}
}
}
impl<A> Either<A, Never> {
/// Returns the `A` value in an `Either<A, Never>`.
#[inline]
pub fn into_a(self) -> A {
match self {
Either::A(a) => a,
// TODO(https://fxbug.dev/360181296): audit this code to ensure it's working as intended
#[allow(unreachable_patterns)]
Either::B(never) => match never {},
}
}
}
impl<B> Either<Never, B> {
/// Returns the `B` value in an `Either<Never, B>`.
#[inline]
pub fn into_b(self) -> B {
match self {
// TODO(https://fxbug.dev/360181296): audit this code to ensure it's working as intended
#[allow(unreachable_patterns)]
Either::A(never) => match never {},
Either::B(b) => b,
}
}
}
macro_rules! call_method_on_either {
($val:expr, $method:ident, $($args:expr),*) => {
match $val {
Either::A(a) => a.$method($($args),*),
Either::B(b) => b.$method($($args),*),
}
};
($val:expr, $method:ident) => {
call_method_on_either!($val, $method,)
};
}
// NOTE(joshlf): We override the default implementations of all methods for
// Either. Many of the default implementations make multiple calls to other
// Buffer methods, each of which performs a match statement to figure out which
// Either variant is present. We assume that doing this match once is more
// performant than doing it multiple times.
impl<A, B> FragmentedBuffer for Either<A, B>
where
A: FragmentedBuffer,
B: FragmentedBuffer,
{
fn len(&self) -> usize {
call_method_on_either!(self, len)
}
fn with_bytes<R, F>(&self, f: F) -> R
where
F: for<'a, 'b> FnOnce(FragmentedBytes<'a, 'b>) -> R,
{
call_method_on_either!(self, with_bytes, f)
}
}
impl<A, B> ContiguousBuffer for Either<A, B>
where
A: ContiguousBuffer,
B: ContiguousBuffer,
{
}
impl<A, B> ShrinkBuffer for Either<A, B>
where
A: ShrinkBuffer,
B: ShrinkBuffer,
{
fn shrink<R: RangeBounds<usize>>(&mut self, range: R) {
call_method_on_either!(self, shrink, range)
}
fn shrink_front(&mut self, n: usize) {
call_method_on_either!(self, shrink_front, n)
}
fn shrink_back(&mut self, n: usize) {
call_method_on_either!(self, shrink_back, n)
}
}
impl<A, B> ParseBuffer for Either<A, B>
where
A: ParseBuffer,
B: ParseBuffer,
{
fn parse<'a, P: ParsablePacket<&'a [u8], ()>>(&'a mut self) -> Result<P, P::Error> {
call_method_on_either!(self, parse)
}
fn parse_with<'a, ParseArgs, P: ParsablePacket<&'a [u8], ParseArgs>>(
&'a mut self,
args: ParseArgs,
) -> Result<P, P::Error> {
call_method_on_either!(self, parse_with, args)
}
}
impl<A, B> FragmentedBufferMut for Either<A, B>
where
A: FragmentedBufferMut,
B: FragmentedBufferMut,
{
fn with_bytes_mut<R, F>(&mut self, f: F) -> R
where
F: for<'a, 'b> FnOnce(FragmentedBytesMut<'a, 'b>) -> R,
{
call_method_on_either!(self, with_bytes_mut, f)
}
}
impl<A, B> ParseBufferMut for Either<A, B>
where
A: ParseBufferMut,
B: ParseBufferMut,
{
fn parse_mut<'a, P: ParsablePacket<&'a mut [u8], ()>>(&'a mut self) -> Result<P, P::Error> {
call_method_on_either!(self, parse_mut)
}
fn parse_with_mut<'a, ParseArgs, P: ParsablePacket<&'a mut [u8], ParseArgs>>(
&'a mut self,
args: ParseArgs,
) -> Result<P, P::Error> {
call_method_on_either!(self, parse_with_mut, args)
}
}
impl<A, B> GrowBuffer for Either<A, B>
where
A: GrowBuffer,
B: GrowBuffer,
{
#[inline]
fn with_parts<O, F>(&self, f: F) -> O
where
F: for<'a, 'b> FnOnce(&'a [u8], FragmentedBytes<'a, 'b>, &'a [u8]) -> O,
{
call_method_on_either!(self, with_parts, f)
}
fn capacity(&self) -> usize {
call_method_on_either!(self, capacity)
}
fn prefix_len(&self) -> usize {
call_method_on_either!(self, prefix_len)
}
fn suffix_len(&self) -> usize {
call_method_on_either!(self, suffix_len)
}
fn grow_front(&mut self, n: usize) {
call_method_on_either!(self, grow_front, n)
}
fn grow_back(&mut self, n: usize) {
call_method_on_either!(self, grow_back, n)
}
fn reset(&mut self) {
call_method_on_either!(self, reset)
}
}
impl<A, B> GrowBufferMut for Either<A, B>
where
A: GrowBufferMut,
B: GrowBufferMut,
{
fn with_parts_mut<O, F>(&mut self, f: F) -> O
where
F: for<'a, 'b> FnOnce(&'a mut [u8], FragmentedBytesMut<'a, 'b>, &'a mut [u8]) -> O,
{
call_method_on_either!(self, with_parts_mut, f)
}
fn serialize<BB: PacketBuilder>(&mut self, builder: BB) {
call_method_on_either!(self, serialize, builder)
}
}
impl<A, B> Buffer for Either<A, B>
where
A: Buffer,
B: Buffer,
{
fn parse_with_view<'a, ParseArgs, P: ParsablePacket<&'a [u8], ParseArgs>>(
&'a mut self,
args: ParseArgs,
) -> Result<(P, &'a [u8]), P::Error> {
call_method_on_either!(self, parse_with_view, args)
}
}
impl<A: AsRef<[u8]>, B: AsRef<[u8]>> AsRef<[u8]> for Either<A, B> {
fn as_ref(&self) -> &[u8] {
call_method_on_either!(self, as_ref)
}
}
impl<A: AsMut<[u8]>, B: AsMut<[u8]>> AsMut<[u8]> for Either<A, B> {
fn as_mut(&mut self) -> &mut [u8] {
call_method_on_either!(self, as_mut)
}
}
/// A byte slice wrapper providing buffer functionality.
///
/// A `Buf` wraps a byte slice (a type which implements `AsRef<[u8]>` or
/// `AsMut<[u8]>`) and implements various buffer traits by keeping track of
/// prefix, body, and suffix offsets within the byte slice.
#[derive(Clone, Debug)]
pub struct Buf<B> {
buf: B,
body: Range<usize>,
}
impl<B: AsRef<[u8]>> PartialEq for Buf<B> {
fn eq(&self, other: &Self) -> bool {
let self_slice = AsRef::<[u8]>::as_ref(self);
let other_slice = AsRef::<[u8]>::as_ref(other);
PartialEq::eq(self_slice, other_slice)
}
}
impl<B: AsRef<[u8]>> Eq for Buf<B> {}
impl Buf<Vec<u8>> {
/// Extracts the contained data trimmed to the buffer's range.
pub fn into_inner(self) -> Vec<u8> {
let Buf { mut buf, body } = self;
let len = body.end - body.start;
let _ = buf.drain(..body.start);
buf.truncate(len);
buf
}
}
impl<B: AsRef<[u8]>> Buf<B> {
/// Constructs a new `Buf`.
///
/// `new` constructs a new `Buf` from a buffer and a body range. The bytes
/// within the range will be the body, the bytes before the range will be
/// the prefix, and the bytes after the range will be the suffix.
///
/// # Panics
///
/// Panics if `range` is out of bounds of `buf`, or if it is nonsensical
/// (the end precedes the start).
pub fn new<R: RangeBounds<usize>>(buf: B, body: R) -> Buf<B> {
let len = buf.as_ref().len();
Buf { buf, body: canonicalize_range(len, &body) }
}
/// Constructs a [`BufView`] which will be a [`BufferView`] into this `Buf`.
pub fn buffer_view(&mut self) -> BufView<'_> {
BufView { buf: &self.buf.as_ref()[self.body.clone()], body: &mut self.body }
}
}
impl<B: AsRef<[u8]> + AsMut<[u8]>> Buf<B> {
/// Constructs a [`BufViewMut`] which will be a [`BufferViewMut`] into this `Buf`.
pub fn buffer_view_mut(&mut self) -> BufViewMut<'_> {
BufViewMut { buf: &mut self.buf.as_mut()[self.body.clone()], body: &mut self.body }
}
}
impl<B: AsRef<[u8]>> FragmentedBuffer for Buf<B> {
fragmented_buffer_method_impls!();
}
impl<B: AsRef<[u8]>> ContiguousBuffer for Buf<B> {}
impl<B: AsRef<[u8]>> ShrinkBuffer for Buf<B> {
fn shrink<R: RangeBounds<usize>>(&mut self, range: R) {
let len = self.len();
let mut range = canonicalize_range(len, &range);
range.start += self.body.start;
range.end += self.body.start;
self.body = range;
}
fn shrink_front(&mut self, n: usize) {
assert!(n <= self.len());
self.body.start += n;
}
fn shrink_back(&mut self, n: usize) {
assert!(n <= self.len());
self.body.end -= n;
}
}
impl<B: AsRef<[u8]>> ParseBuffer for Buf<B> {
fn parse_with<'a, ParseArgs, P: ParsablePacket<&'a [u8], ParseArgs>>(
&'a mut self,
args: ParseArgs,
) -> Result<P, P::Error> {
P::parse(self.buffer_view(), args)
}
}
impl<B: AsRef<[u8]> + AsMut<[u8]>> FragmentedBufferMut for Buf<B> {
fragmented_buffer_mut_method_impls!();
}
impl<B: AsRef<[u8]> + AsMut<[u8]>> ParseBufferMut for Buf<B> {
fn parse_with_mut<'a, ParseArgs, P: ParsablePacket<&'a mut [u8], ParseArgs>>(
&'a mut self,
args: ParseArgs,
) -> Result<P, P::Error> {
P::parse_mut(self.buffer_view_mut(), args)
}
}
impl<B: AsRef<[u8]>> GrowBuffer for Buf<B> {
fn with_parts<O, F>(&self, f: F) -> O
where
F: for<'a, 'b> FnOnce(&'a [u8], FragmentedBytes<'a, 'b>, &'a [u8]) -> O,
{
let (prefix, buf) = self.buf.as_ref().split_at(self.body.start);
let (body, suffix) = buf.split_at(self.body.end - self.body.start);
let mut body = [&body[..]];
f(prefix, body.as_fragmented_byte_slice(), suffix)
}
fn capacity(&self) -> usize {
self.buf.as_ref().len()
}
fn prefix_len(&self) -> usize {
self.body.start
}
fn suffix_len(&self) -> usize {
self.buf.as_ref().len() - self.body.end
}
fn grow_front(&mut self, n: usize) {
assert!(n <= self.body.start);
self.body.start -= n;
}
fn grow_back(&mut self, n: usize) {
assert!(n <= self.buf.as_ref().len() - self.body.end);
self.body.end += n;
}
}
impl<B: AsRef<[u8]> + AsMut<[u8]>> GrowBufferMut for Buf<B> {
fn with_parts_mut<O, F>(&mut self, f: F) -> O
where
F: for<'a, 'b> FnOnce(&'a mut [u8], FragmentedBytesMut<'a, 'b>, &'a mut [u8]) -> O,
{
let (prefix, buf) = self.buf.as_mut().split_at_mut(self.body.start);
let (body, suffix) = buf.split_at_mut(self.body.end - self.body.start);
let mut body = [&mut body[..]];
f(prefix, body.as_fragmented_byte_slice(), suffix)
}
}
impl<B: AsRef<[u8]>> AsRef<[u8]> for Buf<B> {
fn as_ref(&self) -> &[u8] {
&self.buf.as_ref()[self.body.clone()]
}
}
impl<B: AsMut<[u8]>> AsMut<[u8]> for Buf<B> {
fn as_mut(&mut self) -> &mut [u8] {
&mut self.buf.as_mut()[self.body.clone()]
}
}
impl<B: AsRef<[u8]>> Buffer for Buf<B> {
fn parse_with_view<'a, ParseArgs, P: ParsablePacket<&'a [u8], ParseArgs>>(
&'a mut self,
args: ParseArgs,
) -> Result<(P, &'a [u8]), P::Error> {
let Self { body, ref buf } = self;
let body_before = body.clone();
let view = BufView { buf: &buf.as_ref()[body.clone()], body };
P::parse(view, args).map(|r| (r, &buf.as_ref()[body_before]))
}
}
/// A [`BufferView`] into a [`Buf`].
///
/// A `BufView` is constructed by [`Buf::buffer_view`], and implements
/// `BufferView`, providing a view into the `Buf` from which it was constructed.
pub struct BufView<'a> {
buf: &'a [u8],
body: &'a mut Range<usize>,
}
impl<'a> BufferView<&'a [u8]> for BufView<'a> {
fn take_front(&mut self, n: usize) -> Option<&'a [u8]> {
if self.len() < n {
return None;
}
self.body.start += n;
Some(take_front(&mut self.buf, n))
}
fn take_back(&mut self, n: usize) -> Option<&'a [u8]> {
if self.len() < n {
return None;
}
self.body.end -= n;
Some(take_back(&mut self.buf, n))
}
fn into_rest(self) -> &'a [u8] {
self.buf
}
}
impl<'a> AsRef<[u8]> for BufView<'a> {
fn as_ref(&self) -> &[u8] {
self.buf
}
}
/// A [`BufferViewMut`] into a [`Buf`].
///
/// A `BufViewMut` is constructed by [`Buf::buffer_view_mut`], and implements
/// `BufferViewMut`, providing a mutable view into the `Buf` from which it was
/// constructed.
pub struct BufViewMut<'a> {
buf: &'a mut [u8],
body: &'a mut Range<usize>,
}
impl<'a> BufferView<&'a mut [u8]> for BufViewMut<'a> {
fn take_front(&mut self, n: usize) -> Option<&'a mut [u8]> {
if self.len() < n {
return None;
}
self.body.start += n;
Some(take_front_mut(&mut self.buf, n))
}
fn take_back(&mut self, n: usize) -> Option<&'a mut [u8]> {
if self.len() < n {
return None;
}
self.body.end -= n;
Some(take_back_mut(&mut self.buf, n))
}
fn into_rest(self) -> &'a mut [u8] {
self.buf
}
}
impl<'a> BufferViewMut<&'a mut [u8]> for BufViewMut<'a> {}
impl<'a> AsRef<[u8]> for BufViewMut<'a> {
fn as_ref(&self) -> &[u8] {
self.buf
}
}
impl<'a> AsMut<[u8]> for BufViewMut<'a> {
fn as_mut(&mut self) -> &mut [u8] {
self.buf
}
}
/// The constraints required by a [`PacketBuilder`].
///
/// `PacketConstraints` represents the constraints that must be satisfied in
/// order to serialize a `PacketBuilder`.
///
/// A `PacketConstraints`, `c`, guarantees two properties:
/// - `c.max_body_len() >= c.min_body_len()`
/// - `c.header_len() + c.min_body_len() + c.footer_len()` does not overflow
/// `usize`
///
/// It is not possible (using safe code) to obtain a `PacketConstraints` which
/// violates these properties, so code may rely for its correctness on the
/// assumption that these properties hold.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct PacketConstraints {
header_len: usize,
footer_len: usize,
min_body_len: usize,
max_body_len: usize,
}
impl PacketConstraints {
/// A no-op `PacketConstraints` which does not add any constraints - there
/// is no header, footer, minimum body length requirement, or maximum body
/// length requirement.
pub const UNCONSTRAINED: Self =
Self { header_len: 0, footer_len: 0, min_body_len: 0, max_body_len: usize::MAX };
/// Constructs a new `PacketConstraints`.
///
/// # Panics
///
/// `new` panics if the arguments violate the validity properties of
/// `PacketConstraints` - if `max_body_len < min_body_len`, or if
/// `header_len + min_body_len + footer_len` overflows `usize`.
#[inline]
pub fn new(
header_len: usize,
footer_len: usize,
min_body_len: usize,
max_body_len: usize,
) -> PacketConstraints {
PacketConstraints::try_new(header_len, footer_len, min_body_len, max_body_len).expect(
"max_body_len < min_body_len or header_len + min_body_len + footer_len overflows usize",
)
}
/// Tries to construct a new `PacketConstraints`.
///
/// `new` returns `None` if the provided values violate the validity
/// properties of `PacketConstraints` - if `max_body_len < min_body_len`, or
/// if `header_len + min_body_len + footer_len` overflows `usize`.
#[inline]
pub fn try_new(
header_len: usize,
footer_len: usize,
min_body_len: usize,
max_body_len: usize,
) -> Option<PacketConstraints> {
// Test case 3 in test_packet_constraints
let header_min_body_footer_overflows = header_len
.checked_add(min_body_len)
.and_then(|sum| sum.checked_add(footer_len))
.is_none();
// Test case 5 in test_packet_constraints
let max_less_than_min = max_body_len < min_body_len;
if max_less_than_min || header_min_body_footer_overflows {
return None;
}
Some(PacketConstraints { header_len, footer_len, min_body_len, max_body_len })
}
/// Constructs a new `PacketConstraints` with a given `max_body_len`.
///
/// The `header_len`, `footer_len`, and `min_body_len` are all `0`.
#[inline]
pub fn with_max_body_len(max_body_len: usize) -> PacketConstraints {
// SAFETY:
// - `max_body_len >= min_body_len` by construction
// - `header_len + min_body_len + footer_len` is 0 and thus does not
// overflow `usize`
PacketConstraints { header_len: 0, footer_len: 0, min_body_len: 0, max_body_len }
}
/// The number of bytes in this packet's header.
#[inline]
pub fn header_len(&self) -> usize {
self.header_len
}
/// The number of bytes in this packet's footer.
#[inline]
pub fn footer_len(&self) -> usize {
self.footer_len
}
/// The minimum body length (in bytes) required by this packet in order to
/// avoid adding padding.
///
/// `min_body_len` returns the minimum number of body bytes required in
/// order to avoid adding padding. Note that, if padding bytes are required,
/// they may not necessarily belong immediately following the body,
/// depending on which packet layer imposes the minimum. In particular, in a
/// nested packet, padding goes after the body of the layer which imposes
/// the minimum. This means that, if the layer that imposes the minimum is
/// not the innermost one, then padding must be added not after the
/// innermost body, but instead in between footers.
/// [`NestedPacketBuilder::serialize_into`] is responsible for inserting
/// padding when serializing nested packets.
///
/// If there is no minimum body length, this returns 0.
#[inline]
pub fn min_body_len(&self) -> usize {
self.min_body_len
}
/// The maximum length (in bytes) of a body allowed by this packet.
///
/// If there is no maximum body length, this returns [`core::usize::MAX`].
#[inline]
pub fn max_body_len(&self) -> usize {
self.max_body_len
}
/// Attempts to encapsulate `self` in `outer`.
///
/// Upon success, `try_encapsulate` returns a `PacketConstraints` which
/// represents the encapsulation of `self` in `outer`. Its header length,
/// footer length, minimum body length, and maximum body length are set
/// accordingly.
///
/// This is probably not the method you want to use; consider
/// [`Serializer::encapsulate`] instead.
pub fn try_encapsulate(&self, outer: &Self) -> Option<PacketConstraints> {
let inner = self;
// Test case 1 in test_packet_constraints
let header_len = inner.header_len.checked_add(outer.header_len)?;
// Test case 2 in test_packet_constraints
let footer_len = inner.footer_len.checked_add(outer.footer_len)?;
// This is guaranteed not to overflow by the invariants on
// PacketConstraint.
let inner_header_footer_len = inner.header_len + inner.footer_len;
// Note the saturating_sub here - it's OK if the inner PacketBuilder
// more than satisfies the outer PacketBuilder's minimum body length
// requirement.
let min_body_len = cmp::max(
outer.min_body_len.saturating_sub(inner_header_footer_len),
inner.min_body_len,
);
// Note the checked_sub here - it's NOT OK if the inner PacketBuilder
// exceeds the outer PacketBuilder's maximum body length requirement.
//
// Test case 4 in test_packet_constraints
let max_body_len =
cmp::min(outer.max_body_len.checked_sub(inner_header_footer_len)?, inner.max_body_len);
// It's still possible that `min_body_len > max_body_len` or that
// `header_len + min_body_len + footer_len` overflows `usize`; `try_new`
// checks those constraints for us.
PacketConstraints::try_new(header_len, footer_len, min_body_len, max_body_len)
}
}
/// The target buffers into which [`PacketBuilder::serialize`] serializes its
/// header and footer.
pub struct SerializeTarget<'a> {
#[allow(missing_docs)]
pub header: &'a mut [u8],
#[allow(missing_docs)]
pub footer: &'a mut [u8],
}
/// A builder capable of serializing a packet's headers and footers.
///
/// A `PacketBuilder` describes a packet's headers and footers, and is capable
/// of serializing that packet into an existing buffer via the `serialize`
/// method. A `PacketBuilder` never describes a body - one must always be
/// provided in order to call `serialize`.
///
/// Thanks to a blanket implementation, every `PacketBuilder` is also a
/// [`NestedPacketBuilder`]. While a `PacketBuilder` represents exactly one
/// "layer" of a packet - a pair of a header and a footer - a
/// `NestedPacketBuilder` can represent multiple nested layers of a packet, and
/// can be composed from other `NestedPacketBuilder`s. See the trait
/// documentation for more details.
///
/// `()` may be used as an "empty" `PacketBuilder` with no header, footer,
/// minimum body length requirement, or maximum body length requirement.
pub trait PacketBuilder {
/// Gets the constraints for this `PacketBuilder`.
fn constraints(&self) -> PacketConstraints;
/// Serializes this packet into an existing buffer.
///
/// *This method is usually called by this crate during the serialization of
/// a [`Serializer`], not directly by the user.*
///
/// # Preconditions
///
/// The caller is responsible for initializing `body` with the body to be
/// encapsulated, and for ensuring that the body satisfies both the minimum
/// and maximum body length requirements, possibly by adding padding or by
/// truncating the body.
///
/// # Postconditions
///
/// `serialize` is responsible for serializing its header and footer into
/// `target.header` and `target.footer` respectively.
///
/// # Security
///
/// `serialize` must initialize the bytes of the header and footer, even if
/// only to zero, in order to avoid leaking the contents of packets
/// previously stored in the same buffer.
///
/// # Panics
///
/// May panic if the `target.header` or `target.footer` are not large enough
/// to fit the packet's header and footer respectively, or if the body does
/// not satisfy the minimum or maximum body length requirements.
fn serialize(&self, target: &mut SerializeTarget<'_>, body: FragmentedBytesMut<'_, '_>);
}
impl<'a, B: PacketBuilder> PacketBuilder for &'a B {
#[inline]
fn constraints(&self) -> PacketConstraints {
B::constraints(self)
}
#[inline]
fn serialize(&self, target: &mut SerializeTarget<'_>, body: FragmentedBytesMut<'_, '_>) {
B::serialize(self, target, body)
}
}
impl<'a, B: PacketBuilder> PacketBuilder for &'a mut B {
#[inline]
fn constraints(&self) -> PacketConstraints {
B::constraints(self)
}
#[inline]
fn serialize(&self, target: &mut SerializeTarget<'_>, body: FragmentedBytesMut<'_, '_>) {
B::serialize(self, target, body)
}
}
impl PacketBuilder for () {
#[inline]
fn constraints(&self) -> PacketConstraints {
PacketConstraints::UNCONSTRAINED
}
#[inline]
fn serialize(&self, _target: &mut SerializeTarget<'_>, _body: FragmentedBytesMut<'_, '_>) {}
}
impl PacketBuilder for Never {
fn constraints(&self) -> PacketConstraints {
match *self {}
}
fn serialize(&self, _target: &mut SerializeTarget<'_>, _body: FragmentedBytesMut<'_, '_>) {}
}
/// One object encapsulated in another one.
///
/// `Nested`s are constructed using the [`NestedPacketBuilder::encapsulate`] and
/// [`Serializer::encapsulate`] methods.
///
/// When `I: NestedPacketBuilder` and `O: NestedPacketBuilder`, `Nested<I, O>`
/// implements [`NestedPacketBuilder`]. When `I: Serializer` and `O:
/// NestedPacketBuilder`, `Nested<I, O>` implements [`Serializer`].
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Nested<I, O> {
inner: I,
outer: O,
}
impl<I, O> Nested<I, O> {
/// Consumes this `Nested` and returns the inner object, discarding the
/// outer one.
#[inline]
pub fn into_inner(self) -> I {
self.inner
}
/// Consumes this `Nested` and returns the outer object, discarding the
/// inner one.
#[inline]
pub fn into_outer(self) -> O {
self.outer
}
#[inline]
pub fn inner(&self) -> &I {
&self.inner
}
#[inline]
pub fn inner_mut(&mut self) -> &mut I {
&mut self.inner
}
#[inline]
pub fn outer(&self) -> &O {
&self.outer
}
#[inline]
pub fn outer_mut(&mut self) -> &mut O {
&mut self.outer
}
}
/// A [`PacketBuilder`] which has no header or footer, but which imposes a
/// maximum body length constraint.
///
/// `LimitedSizePacketBuilder`s are constructed using the
/// [`Serializer::with_size_limit`] method.
#[derive(Copy, Clone, Debug)]
#[cfg_attr(test, derive(Eq, PartialEq))]
pub struct LimitedSizePacketBuilder {
/// The maximum body length.
pub limit: usize,
}
impl PacketBuilder for LimitedSizePacketBuilder {
fn constraints(&self) -> PacketConstraints {
PacketConstraints::with_max_body_len(self.limit)
}
fn serialize(&self, _target: &mut SerializeTarget<'_>, _body: FragmentedBytesMut<'_, '_>) {}
}
/// A builder capable of serializing packets - which do not encapsulate other
/// packets - into an existing buffer.
///
/// An `InnerPacketBuilder` describes a packet, and is capable of serializing
/// that packet into an existing buffer via the `serialize` method. Unlike the
/// [`PacketBuilder`] trait, it describes a packet which does not encapsulate
/// other packets.
///
/// # Notable implementations
///
/// `InnerPacketBuilder` is implemented for `&[u8]`, `&mut [u8]`, and `Vec<u8>`
/// by treating the contents of the slice/`Vec` as the contents of the packet to
/// be serialized.
pub trait InnerPacketBuilder {
/// The number of bytes consumed by this packet.
fn bytes_len(&self) -> usize;
/// Serializes this packet into an existing buffer.
///
/// `serialize` is called with a buffer of length `self.bytes_len()`, and is
/// responsible for serializing the packet into the buffer.
///
/// # Security
///
/// All of the bytes of the buffer should be initialized, even if only to
/// zero, in order to avoid leaking the contents of packets previously
/// stored in the same buffer.
///
/// # Panics
///
/// May panic if `buffer.len() != self.bytes_len()`.
fn serialize(&self, buffer: &mut [u8]);
/// Converts this `InnerPacketBuilder` into a [`Serializer`].
///
/// `into_serializer` is like [`into_serializer_with`], except that no
/// buffer is provided for reuse in serialization.
///
/// [`into_serializer_with`]: InnerPacketBuilder::into_serializer_with
#[inline]
fn into_serializer(self) -> InnerSerializer<Self, EmptyBuf>
where
Self: Sized,
{
self.into_serializer_with(EmptyBuf)
}
/// Converts this `InnerPacketBuilder` into a [`Serializer`] with a buffer
/// that can be used for serialization.
///
/// `into_serializer_with` consumes a buffer and converts `self` into a type
/// which implements `Serialize` by treating it as the innermost body to be
/// contained within any encapsulating [`PacketBuilder`]s. During
/// serialization, `buffer` will be provided to the [`BufferProvider`],
/// allowing it to reuse the buffer for serialization and avoid allocating a
/// new one if possible.
///
/// `buffer` will have its body shrunk to be zero bytes before the
/// `InnerSerializer` is constructed.
fn into_serializer_with<B: ShrinkBuffer>(self, mut buffer: B) -> InnerSerializer<Self, B>
where
Self: Sized,
{
buffer.shrink_back_to(0);
InnerSerializer { inner: self, buffer }
}
}
impl<'a, I: InnerPacketBuilder> InnerPacketBuilder for &'a I {
#[inline]
fn bytes_len(&self) -> usize {
I::bytes_len(self)
}
#[inline]
fn serialize(&self, buffer: &mut [u8]) {
I::serialize(self, buffer)
}
}
impl<'a, I: InnerPacketBuilder> InnerPacketBuilder for &'a mut I {
#[inline]
fn bytes_len(&self) -> usize {
I::bytes_len(self)
}
#[inline]
fn serialize(&self, buffer: &mut [u8]) {
I::serialize(self, buffer)
}
}
impl<'a> InnerPacketBuilder for &'a [u8] {
#[inline]
fn bytes_len(&self) -> usize {
self.len()
}
#[inline]
fn serialize(&self, buffer: &mut [u8]) {
buffer.copy_from_slice(self);
}
}
impl<'a> InnerPacketBuilder for &'a mut [u8] {
#[inline]
fn bytes_len(&self) -> usize {
self.len()
}
#[inline]
fn serialize(&self, buffer: &mut [u8]) {
buffer.copy_from_slice(self);
}
}
impl<'a> InnerPacketBuilder for Vec<u8> {
#[inline]
fn bytes_len(&self) -> usize {
self.len()
}
#[inline]
fn serialize(&self, buffer: &mut [u8]) {
buffer.copy_from_slice(self.as_slice());
}
}
impl<const N: usize> InnerPacketBuilder for ArrayVec<u8, N> {
fn bytes_len(&self) -> usize {
self.as_slice().bytes_len()
}
fn serialize(&self, buffer: &mut [u8]) {
self.as_slice().serialize(buffer);
}
}
/// An [`InnerPacketBuilder`] created from any [`B: SplitByteSlice`].
///
/// `ByteSliceInnerPacketBuilder<B>` implements `InnerPacketBuilder` so long as
/// `B: SplitByteSlice`.
///
/// [`B: SplitByteSlice`]: zerocopy::SplitByteSlice
pub struct ByteSliceInnerPacketBuilder<B>(pub B);
impl<B: SplitByteSlice> InnerPacketBuilder for ByteSliceInnerPacketBuilder<B> {
fn bytes_len(&self) -> usize {
self.0.deref().bytes_len()
}
fn serialize(&self, buffer: &mut [u8]) {
self.0.deref().serialize(buffer)
}
}
impl<B: SplitByteSlice> Debug for ByteSliceInnerPacketBuilder<B> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "ByteSliceInnerPacketBuilder({:?})", self.0.as_ref())
}
}
/// An error in serializing a packet.
///
/// `SerializeError` is the type of errors returned from methods on the
/// [`Serializer`] trait. The `Alloc` variant indicates that a new buffer could
/// not be allocated, while the `SizeLimitExceeded` variant indicates that a
/// size limit constraint was exceeded.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum SerializeError<A> {
/// A new buffer could not be allocated.
Alloc(A),
/// The size limit constraint was exceeded.
SizeLimitExceeded,
}
impl<A> SerializeError<A> {
/// Is this `SerializeError::Alloc`?
#[inline]
pub fn is_alloc(&self) -> bool {
match self {
SerializeError::Alloc(_) => true,
SerializeError::SizeLimitExceeded => false,
}
}
/// Is this `SerializeError::SizeLimitExceeded`?
#[inline]
pub fn is_size_limit_exceeded(&self) -> bool {
match self {
SerializeError::Alloc(_) => false,
SerializeError::SizeLimitExceeded => true,
}
}
/// Maps the [`SerializeError::Alloc`] error type.
pub fn map_alloc<T, F: FnOnce(A) -> T>(self, f: F) -> SerializeError<T> {
match self {
SerializeError::Alloc(a) => SerializeError::Alloc(f(a)),
SerializeError::SizeLimitExceeded => SerializeError::SizeLimitExceeded,
}
}
}
impl<A> From<A> for SerializeError<A> {
fn from(a: A) -> SerializeError<A> {
SerializeError::Alloc(a)
}
}
/// The error returned when a buffer is too short to hold a serialized packet,
/// and the [`BufferProvider`] is incapable of allocating a new one.
///
/// `BufferTooShortError` is returned by the [`Serializer`] methods
/// [`serialize_no_alloc`] and [`serialize_no_alloc_outer`].
///
/// [`serialize_no_alloc`]: Serializer::serialize_no_alloc
/// [`serialize_no_alloc_outer`]: Serializer::serialize_no_alloc_outer
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct BufferTooShortError;
/// An object capable of providing buffers which satisfy certain constraints.
///
/// A `BufferProvider<Input, Output>` is an object which is capable of consuming
/// a buffer of type `Input` and, either by reusing it or by allocating a new
/// one and copying the input buffer's body into it, producing a buffer of type
/// `Output` which meets certain prefix and suffix length constraints.
///
/// A `BufferProvider` must always be provided when serializing a
/// [`Serializer`].
///
/// Implementors may find the helper function [`try_reuse_buffer`] useful.
///
/// For clients who don't need the full expressive power of this trait, the
/// simpler [`BufferAlloc`] trait is provided. It only defines how to allocate
/// new buffers, and two blanket impls of `BufferProvider` are provided for all
/// `BufferAlloc` types.
pub trait BufferProvider<Input, Output> {
/// The type of errors returned from [`reuse_or_realloc`].
///
/// [`reuse_or_realloc`]: BufferProvider::reuse_or_realloc
type Error;
/// Attempts to produce an output buffer with the given constraints by
/// allocating a new one.
///
/// `alloc_no_reuse` produces a new buffer with the following invariants:
/// - The output buffer must have at least `prefix` bytes of prefix
/// - The output buffer must have at least `suffix` bytes of suffix
/// - The output buffer must have a body of length `body` bytes.
///
/// If these requirements cannot be met, then an error is returned.
fn alloc_no_reuse(
self,
prefix: usize,
body: usize,
suffix: usize,
) -> Result<Output, Self::Error>;
/// Consumes an input buffer and attempts to produce an output buffer with
/// the given constraints, either by reusing the input buffer or by
/// allocating a new one and copying the body into it.
///
/// `reuse_or_realloc` consumes a buffer by value, and produces a new buffer
/// with the following invariants:
/// - The output buffer must have at least `prefix` bytes of prefix
/// - The output buffer must have at least `suffix` bytes of suffix
/// - The output buffer must have the same body as the input buffer
///
/// If these requirements cannot be met, then an error is returned along
/// with the input buffer, which is unmodified.
fn reuse_or_realloc(
self,
buffer: Input,
prefix: usize,
suffix: usize,
) -> Result<Output, (Self::Error, Input)>;
}
/// An object capable of allocating new buffers.
///
/// A `BufferAlloc<Output>` is an object which is capable of allocating new
/// buffers of type `Output`.
///
/// [Two blanket implementations] of [`BufferProvider`] are given for any type
/// which implements `BufferAlloc<O>`. One blanket implementation works for any
/// input buffer type, `I`, and produces buffers of type `Either<I, O>` as
/// output. One blanket implementation works only when the input and output
/// buffer types are the same, and produces buffers of that type. See the
/// documentation on those impls for more details.
///
/// The following implementations of `BufferAlloc` are provided:
/// - Any `FnOnce(usize) -> Result<O, E>` implements `BufferAlloc<O, Error = E>`
/// - `()` implements `BufferAlloc<Never, Error = ()>` (an allocator which
/// always fails)
/// - [`new_buf_vec`] implements `BufferAlloc<Buf<Vec<u8>>, Error = Never>` (an
/// allocator which infallibly heap-allocates `Vec`s)
///
/// [Two blanket implementations]: trait.BufferProvider.html#implementors
pub trait BufferAlloc<Output> {
/// The type of errors returned from [`alloc`].
///
/// [`alloc`]: BufferAlloc::alloc
type Error;
/// Attempts to allocate a new buffer of size `len`.
fn alloc(self, len: usize) -> Result<Output, Self::Error>;
}
impl<O, E, F: FnOnce(usize) -> Result<O, E>> BufferAlloc<O> for F {
type Error = E;
#[inline]
fn alloc(self, len: usize) -> Result<O, E> {
self(len)
}
}
impl BufferAlloc<Never> for () {
type Error = ();
#[inline]
fn alloc(self, _len: usize) -> Result<Never, ()> {
Err(())
}
}
/// Allocates a new `Buf<Vec<u8>>`.
///
/// `new_buf_vec(len)` is shorthand for `Ok(Buf::new(vec![0; len], ..))`. It
/// implements [`BufferAlloc<Buf<Vec<u8>>, Error = Never>`], and, thanks to a
/// blanket impl, [`BufferProvider<I, Either<I, Buf<Vec<u8>>>, Error = Never>`]
/// for all `I: BufferMut`, and `BufferProvider<Buf<Vec<u8>>, Buf<Vec<u8>>,
/// Error = Never>`.
///
/// [`BufferAlloc<Buf<Vec<u8>>, Error = Never>`]: BufferAlloc
/// [`BufferProvider<I, Either<I, Buf<Vec<u8>>>, Error = Never>`]: BufferProvider
pub fn new_buf_vec(len: usize) -> Result<Buf<Vec<u8>>, Never> {
Ok(Buf::new(vec![0; len], ..))
}
/// Attempts to reuse a buffer for the purposes of implementing
/// [`BufferProvider::reuse_or_realloc`].
///
/// `try_reuse_buffer` attempts to reuse an existing buffer to satisfy the given
/// prefix and suffix constraints. If it succeeds, it returns `Ok` containing a
/// buffer with the same body as the input, and with at least `prefix` prefix
/// bytes and at least `suffix` suffix bytes. Otherwise, it returns `Err`
/// containing the original, unmodified input buffer.
///
/// Concretely, `try_reuse_buffer` has the following behavior:
/// - If the prefix and suffix constraints are already met, it returns `Ok` with
/// the input unmodified
/// - If the prefix and suffix constraints are not yet met, then...
/// - If there is enough capacity to meet the constraints and the body is not
/// larger than `max_copy_bytes`, the body will be moved within the buffer
/// in order to meet the constraints, and it will be returned
/// - Otherwise, if there is not enough capacity or the body is larger than
/// `max_copy_bytes`, it returns `Err` with the input unmodified
///
/// `max_copy_bytes` is meant to be an estimate of how many bytes can be copied
/// before allocating a new buffer will be cheaper than copying.
#[inline]
pub fn try_reuse_buffer<B: GrowBufferMut + ShrinkBuffer>(
mut buffer: B,
prefix: usize,
suffix: usize,
max_copy_bytes: usize,
) -> Result<B, B> {
let need_prefix = prefix;
let need_suffix = suffix;
let have_prefix = buffer.prefix_len();
let have_body = buffer.len();
let have_suffix = buffer.suffix_len();
let need_capacity = need_prefix + have_body + need_suffix;
if have_prefix >= need_prefix && have_suffix >= need_suffix {
// We already satisfy the prefix and suffix requirements.
Ok(buffer)
} else if buffer.capacity() >= need_capacity && have_body <= max_copy_bytes {
// The buffer is large enough, but the body is currently too far
// forward or too far backwards to satisfy the prefix or suffix
// requirements, so we need to move the body within the buffer.
buffer.reset();
// Copy the original body range to a point starting immediatley
// after `prefix`. This satisfies the `prefix` constraint by
// definition, and satisfies the `suffix` constraint since we know
// that the total buffer capacity is sufficient to hold the total
// length of the prefix, body, and suffix.
buffer.copy_within(have_prefix..(have_prefix + have_body), need_prefix);
buffer.shrink(need_prefix..(need_prefix + have_body));
debug_assert_eq!(buffer.prefix_len(), need_prefix);
debug_assert!(buffer.suffix_len() >= need_suffix);
debug_assert_eq!(buffer.len(), have_body);
Ok(buffer)
} else {
Err(buffer)
}
}
/// Provides an implementation of [`BufferProvider`] from a [`BufferAlloc`] `A`
/// that attempts to reuse the input buffer and falls back to the allocator if
/// the input buffer can't be reused.
pub struct MaybeReuseBufferProvider<A>(pub A);
impl<I: ReusableBuffer, O: ReusableBuffer, A: BufferAlloc<O>> BufferProvider<I, Either<I, O>>
for MaybeReuseBufferProvider<A>
{
type Error = A::Error;
fn alloc_no_reuse(
self,
prefix: usize,
body: usize,
suffix: usize,
) -> Result<Either<I, O>, Self::Error> {
let Self(alloc) = self;
let need_capacity = prefix + body + suffix;
BufferAlloc::alloc(alloc, need_capacity).map(|mut buf| {
buf.shrink(prefix..(prefix + body));
Either::B(buf)
})
}
/// If `buffer` has enough capacity to store `need_prefix + need_suffix +
/// buffer.len()` bytes, then reuse `buffer`. Otherwise, allocate a new
/// buffer using `A`'s [`BufferAlloc`] implementation.
///
/// If there is enough capacity, but the body is too far forwards or
/// backwards in the buffer to satisfy the prefix and suffix constraints,
/// the body will be moved within the buffer in order to satisfy the
/// constraints. This operation is linear in the length of the body.
#[inline]
fn reuse_or_realloc(
self,
buffer: I,
need_prefix: usize,
need_suffix: usize,
) -> Result<Either<I, O>, (A::Error, I)> {
// TODO(joshlf): Maybe it's worth coming up with a heuristic for when
// moving the body is likely to be more expensive than allocating
// (rather than just using `usize::MAX`)? This will be tough since we
// don't know anything about the performance of `A::alloc`.
match try_reuse_buffer(buffer, need_prefix, need_suffix, usize::MAX) {
Ok(buffer) => Ok(Either::A(buffer)),
Err(buffer) => {
let have_body = buffer.len();
let mut buf = match BufferProvider::<I, Either<I, O>>::alloc_no_reuse(
self,
need_prefix,
have_body,
need_suffix,
) {
Ok(buf) => buf,
Err(err) => return Err((err, buffer)),
};
buf.copy_from(&buffer);
debug_assert_eq!(buf.prefix_len(), need_prefix);
debug_assert!(buf.suffix_len() >= need_suffix);
debug_assert_eq!(buf.len(), have_body);
Ok(buf)
}
}
}
}
impl<B: ReusableBuffer, A: BufferAlloc<B>> BufferProvider<B, B> for MaybeReuseBufferProvider<A> {
type Error = A::Error;
fn alloc_no_reuse(self, prefix: usize, body: usize, suffix: usize) -> Result<B, Self::Error> {
BufferProvider::<B, Either<B, B>>::alloc_no_reuse(self, prefix, body, suffix)
.map(Either::into_inner)
}
/// If `buffer` has enough capacity to store `need_prefix + need_suffix +
/// buffer.len()` bytes, then reuse `buffer`. Otherwise, allocate a new
/// buffer using `A`'s [`BufferAlloc`] implementation.
///
/// If there is enough capacity, but the body is too far forwards or
/// backwards in the buffer to satisfy the prefix and suffix constraints,
/// the body will be moved within the buffer in order to satisfy the
/// constraints. This operation is linear in the length of the body.
#[inline]
fn reuse_or_realloc(self, buffer: B, prefix: usize, suffix: usize) -> Result<B, (A::Error, B)> {
BufferProvider::<B, Either<B, B>>::reuse_or_realloc(self, buffer, prefix, suffix)
.map(Either::into_inner)
}
}
/// Provides an implementation of [`BufferProvider`] from a [`BufferAlloc`] `A`
/// that never attempts to reuse the input buffer, and always create a new
/// buffer from the allocator `A`.
pub struct NoReuseBufferProvider<A>(pub A);
impl<I: FragmentedBuffer, O: ReusableBuffer, A: BufferAlloc<O>> BufferProvider<I, O>
for NoReuseBufferProvider<A>
{
type Error = A::Error;
fn alloc_no_reuse(self, prefix: usize, body: usize, suffix: usize) -> Result<O, A::Error> {
let Self(alloc) = self;
alloc.alloc(prefix + body + suffix).map(|mut b| {
b.shrink(prefix..prefix + body);
b
})
}
fn reuse_or_realloc(self, buffer: I, prefix: usize, suffix: usize) -> Result<O, (A::Error, I)> {
BufferProvider::<I, O>::alloc_no_reuse(self, prefix, buffer.len(), suffix)
.map(|mut b| {
b.copy_from(&buffer);
b
})
.map_err(|e| (e, buffer))
}
}
pub trait Serializer: Sized {
/// The type of buffers returned from serialization methods on this trait.
type Buffer;
/// Serializes this `Serializer`, producing a buffer.
///
/// `serialize` accepts a [`PacketBuilder`] and a [`BufferProvider`], and
/// produces a buffer which contains the contents of this `Serializer`
/// encapsulated in the header and footer described by the `PacketBuilder`.
///
/// As `Serializer`s can be nested using the [`Nested`] type (constructed
/// using the [`encapsulate`] method), the `serialize` method is recursive -
/// calling it on a `Nested` will recurse into the inner `Serializer`, which
/// might itself be a `Nested`, and so on. When the innermost `Serializer`
/// is reached, the contained buffer is passed to the `provider`, allowing
/// it to decide how to produce a buffer which is large enough to fit the
/// entire packet - either by reusing the existing buffer, or by discarding
/// it and allocating a new one.
///
/// [`encapsulate`]: Serializer::encapsulate
fn serialize<B: GrowBufferMut, P: BufferProvider<Self::Buffer, B>>(
self,
outer: PacketConstraints,
provider: P,
) -> Result<B, (SerializeError<P::Error>, Self)>;
/// Serializes the data into a new buffer without consuming `self`.
///
/// Creates a new buffer using `alloc` and serializes the data into that
/// that new buffer. Unlike all other serialize methods,
/// `serialize_new_buf` takes `self` by reference. This allows to use the
/// same `Serializer` to serialize the data more than once.
fn serialize_new_buf<B: ReusableBuffer, A: BufferAlloc<B>>(
&self,
outer: PacketConstraints,
alloc: A,
) -> Result<B, SerializeError<A::Error>>;
/// Serializes this `Serializer`, allocating a [`Buf<Vec<u8>>`] if the
/// contained buffer isn't large enough.
///
/// `serialize_vec` is like [`serialize`], except that, if the contained
/// buffer isn't large enough to contain the packet, a new `Vec<u8>` is
/// allocated and wrapped in a [`Buf`]. If the buffer is large enough, but
/// the body is too far forwards or backwards to fit the encapsulating
/// headers or footers, the body will be moved within the buffer (this
/// operation's cost is linear in the size of the body).
///
/// `serialize_vec` is equivalent to calling `serialize` with
/// [`new_buf_vec`] as the [`BufferProvider`].
///
/// [`Buf<Vec<u8>>`]: Buf
/// [`serialize`]: Serializer::serialize
#[inline]
#[allow(clippy::type_complexity)]
fn serialize_vec(
self,
outer: PacketConstraints,
) -> Result<Either<Self::Buffer, Buf<Vec<u8>>>, (SerializeError<Never>, Self)>
where
Self::Buffer: ReusableBuffer,
{
self.serialize(outer, MaybeReuseBufferProvider(new_buf_vec))
}
/// Serializes this `Serializer`, failing if the existing buffer is not
/// large enough.
///
/// `serialize_no_alloc` is like [`serialize`], except that it will fail if
/// the existing buffer isn't large enough. If the buffer is large enough,
/// but the body is too far forwards or backwards to fit the encapsulating
/// headers or footers, the body will be moved within the buffer (this
/// operation's cost is linear in the size of the body).
///
/// `serialize_no_alloc` is equivalent to calling `serialize` with a
/// `BufferProvider` which cannot allocate a new buffer (such as `()`).
///
/// [`serialize`]: Serializer::serialize
#[inline]
fn serialize_no_alloc(
self,
outer: PacketConstraints,
) -> Result<Self::Buffer, (SerializeError<BufferTooShortError>, Self)>
where
Self::Buffer: ReusableBuffer,
{
self.serialize(outer, MaybeReuseBufferProvider(())).map(Either::into_a).map_err(
|(err, slf)| {
(
match err {
SerializeError::Alloc(()) => BufferTooShortError.into(),
SerializeError::SizeLimitExceeded => SerializeError::SizeLimitExceeded,
},
slf,
)
},
)
}
/// Serializes this `Serializer` as the outermost packet.
///
/// `serialize_outer` is like [`serialize`], except that it is called when
/// this `Serializer` describes the outermost packet, not encapsulated in
/// any other packets. It is equivalent to calling `serialize` with an empty
/// [`PacketBuilder`] (such as `()`).
///
/// [`serialize`]: Serializer::serialize
#[inline]
fn serialize_outer<B: GrowBufferMut, P: BufferProvider<Self::Buffer, B>>(
self,
provider: P,
) -> Result<B, (SerializeError<P::Error>, Self)> {
self.serialize(PacketConstraints::UNCONSTRAINED, provider)
}
/// Serializes this `Serializer` as the outermost packet, allocating a
/// [`Buf<Vec<u8>>`] if the contained buffer isn't large enough.
///
/// `serialize_vec_outer` is like [`serialize_vec`], except that it is
/// called when this `Serializer` describes the outermost packet, not
/// encapsulated in any other packets. It is equivalent to calling
/// `serialize_vec` with an empty [`PacketBuilder`] (such as `()`).
///
/// [`Buf<Vec<u8>>`]: Buf
/// [`serialize_vec`]: Serializer::serialize_vec
#[inline]
#[allow(clippy::type_complexity)]
fn serialize_vec_outer(
self,
) -> Result<Either<Self::Buffer, Buf<Vec<u8>>>, (SerializeError<Never>, Self)>
where
Self::Buffer: ReusableBuffer,
{
self.serialize_vec(PacketConstraints::UNCONSTRAINED)
}
/// Serializes this `Serializer` as the outermost packet, failing if the
/// existing buffer is not large enough.
///
/// `serialize_no_alloc_outer` is like [`serialize_no_alloc`], except that
/// it is called when this `Serializer` describes the outermost packet, not
/// encapsulated in any other packets. It is equivalent to calling
/// `serialize_no_alloc` with an empty [`PacketBuilder`] (such as `()`).
///
/// [`serialize_no_alloc`]: Serializer::serialize_no_alloc
#[inline]
fn serialize_no_alloc_outer(
self,
) -> Result<Self::Buffer, (SerializeError<BufferTooShortError>, Self)>
where
Self::Buffer: ReusableBuffer,
{
self.serialize_no_alloc(PacketConstraints::UNCONSTRAINED)
}
/// Encapsulates this `Serializer` in another packet, producing a new
/// `Serializer`.
///
/// `encapsulate` consumes this `Serializer` and a [`PacketBuilder`], and
/// produces a new `Serializer` which describes encapsulating this one in
/// the packet described by `outer`.
#[inline]
fn encapsulate<B>(self, outer: B) -> Nested<Self, B> {
Nested { inner: self, outer }
}
/// Creates a new `Serializer` which will enforce a size limit.
///
/// `with_size_limit` consumes this `Serializer` and limit, and produces a
/// new `Serializer` which will enforce the given limit on all serialization
/// requests. Note that the given limit will be enforced at this layer -
/// serialization requests will be rejected if the body produced by the
/// request at this layer would exceed the limit. It has no effect on
/// headers or footers added by encapsulating layers outside of this one.
#[inline]
fn with_size_limit(self, limit: usize) -> Nested<Self, LimitedSizePacketBuilder> {
self.encapsulate(LimitedSizePacketBuilder { limit })
}
}
/// A [`Serializer`] constructed from an [`InnerPacketBuilder`].
///
/// An `InnerSerializer` wraps an `InnerPacketBuilder` and a buffer, and
/// implements the `Serializer` trait. When a serialization is requested, it
/// either reuses the stored buffer or allocates a new one large enough to hold
/// itself and all outer `PacketBuilder`s.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct InnerSerializer<I, B> {
inner: I,
// The buffer's length must be zero since we encapsulate the buffer in a
// PacketBuilder. If the length were non-zero, that would have the effect of
// retaining the contents of the buffer when serializing, and putting them
// immediately after the bytes of `inner`.
buffer: B,
}
impl<I, B> InnerSerializer<I, B> {
pub fn inner(&self) -> &I {
&self.inner
}
}
/// A wrapper for `InnerPacketBuilders` which implements `PacketBuilder` by
/// treating the entire `InnerPacketBuilder` as the header of the
/// `PacketBuilder`. This allows us to compose our InnerPacketBuilder with
/// the outer `PacketBuilders` into a single, large `PacketBuilder`, and then
/// serialize it using `self.buffer`.
struct InnerPacketBuilderWrapper<I>(I);
impl<I: InnerPacketBuilder> PacketBuilder for InnerPacketBuilderWrapper<I> {
fn constraints(&self) -> PacketConstraints {
let Self(wrapped) = self;
PacketConstraints::new(wrapped.bytes_len(), 0, 0, usize::MAX)
}
fn serialize(&self, target: &mut SerializeTarget<'_>, _body: FragmentedBytesMut<'_, '_>) {
let Self(wrapped) = self;
// Note that the body might be non-empty if an outer
// PacketBuilder added a minimum body length constraint that
// required padding.
debug_assert_eq!(target.header.len(), wrapped.bytes_len());
debug_assert_eq!(target.footer.len(), 0);
InnerPacketBuilder::serialize(wrapped, target.header);
}
}
impl<I: InnerPacketBuilder, B: GrowBuffer + ShrinkBuffer> Serializer for InnerSerializer<I, B> {
type Buffer = B;
#[inline]
fn serialize<BB: GrowBufferMut, P: BufferProvider<B, BB>>(
self,
outer: PacketConstraints,
provider: P,
) -> Result<BB, (SerializeError<P::Error>, InnerSerializer<I, B>)> {
let pb = InnerPacketBuilderWrapper(self.inner);
debug_assert_eq!(self.buffer.len(), 0);
self.buffer.encapsulate(pb).serialize(outer, provider).map_err(
|(err, Nested { inner: buffer, outer: pb })| {
(err, InnerSerializer { inner: pb.0, buffer })
},
)
}
#[inline]
fn serialize_new_buf<BB: ReusableBuffer, A: BufferAlloc<BB>>(
&self,
outer: PacketConstraints,
alloc: A,
) -> Result<BB, SerializeError<A::Error>> {
let pb = InnerPacketBuilderWrapper(&self.inner);
EmptyBuf.encapsulate(pb).serialize_new_buf(outer, alloc)
}
}
impl<B: GrowBuffer + ShrinkBuffer> Serializer for B {
type Buffer = B;
#[inline]
fn serialize<BB: GrowBufferMut, P: BufferProvider<Self::Buffer, BB>>(
self,
outer: PacketConstraints,
provider: P,
) -> Result<BB, (SerializeError<P::Error>, Self)> {
TruncatingSerializer::new(self, TruncateDirection::NoTruncating)
.serialize(outer, provider)
.map_err(|(err, ser)| (err, ser.buffer))
}
fn serialize_new_buf<BB: ReusableBuffer, A: BufferAlloc<BB>>(
&self,
outer: PacketConstraints,
alloc: A,
) -> Result<BB, SerializeError<A::Error>> {
if self.len() > outer.max_body_len() {
return Err(SerializeError::SizeLimitExceeded);
}
let padding = outer.min_body_len().saturating_sub(self.len());
let tail_size = padding + outer.footer_len();
let buffer_size = outer.header_len() + self.len() + tail_size;
let mut buffer = alloc.alloc(buffer_size)?;
buffer.shrink_front(outer.header_len());
buffer.shrink_back(tail_size);
buffer.copy_from(self);
buffer.grow_back(padding);
Ok(buffer)
}
}
/// Either of two serializers.
///
/// An `EitherSerializer` wraps one of two different serializer types.
pub enum EitherSerializer<A, B> {
A(A),
B(B),
}
impl<A: Serializer, B: Serializer<Buffer = A::Buffer>> Serializer for EitherSerializer<A, B> {
type Buffer = A::Buffer;
fn serialize<TB: GrowBufferMut, P: BufferProvider<Self::Buffer, TB>>(
self,
outer: PacketConstraints,
provider: P,
) -> Result<TB, (SerializeError<P::Error>, Self)> {
match self {
EitherSerializer::A(s) => {
s.serialize(outer, provider).map_err(|(err, s)| (err, EitherSerializer::A(s)))
}
EitherSerializer::B(s) => {
s.serialize(outer, provider).map_err(|(err, s)| (err, EitherSerializer::B(s)))
}
}
}
fn serialize_new_buf<TB: ReusableBuffer, BA: BufferAlloc<TB>>(
&self,
outer: PacketConstraints,
alloc: BA,
) -> Result<TB, SerializeError<BA::Error>> {
match self {
EitherSerializer::A(s) => s.serialize_new_buf(outer, alloc),
EitherSerializer::B(s) => s.serialize_new_buf(outer, alloc),
}
}
}
/// The direction a buffer's body should be truncated from to force
/// it to fit within a size limit.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum TruncateDirection {
/// If a buffer cannot fit within a limit, discard bytes from the
/// front of the body.
DiscardFront,
/// If a buffer cannot fit within a limit, discard bytes from the
/// end of the body.
DiscardBack,
/// Do not attempt to truncate a buffer to make it fit within a limit.
NoTruncating,
}
/// A [`Serializer`] that truncates its body if it would exceed a size limit.
///
/// `TruncatingSerializer` wraps a buffer, and implements `Serializer`. Unlike
/// the blanket impl of `Serializer` for `B: GrowBuffer + ShrinkBuffer`, if the
/// buffer's body exceeds the size limit constraint passed to
/// `Serializer::serialize`, the body is truncated to fit.
///
/// Note that this does not guarantee that size limit exceeded errors will not
/// occur. The size limit may be small enough that the encapsulating headers
/// alone exceed the size limit. There may also be a minimum body length
/// constraint which is larger than the size limit.
#[derive(Copy, Clone, Debug)]
#[cfg_attr(test, derive(Eq, PartialEq))]
pub struct TruncatingSerializer<B> {
buffer: B,
direction: TruncateDirection,
}
impl<B> TruncatingSerializer<B> {
/// Constructs a new `TruncatingSerializer`.
pub fn new(buffer: B, direction: TruncateDirection) -> TruncatingSerializer<B> {
TruncatingSerializer { buffer, direction }
}
}
impl<B: GrowBuffer + ShrinkBuffer> Serializer for TruncatingSerializer<B> {
type Buffer = B;
fn serialize<BB: GrowBufferMut, P: BufferProvider<B, BB>>(
mut self,
outer: PacketConstraints,
provider: P,
) -> Result<BB, (SerializeError<P::Error>, Self)> {
let original_len = self.buffer.len();
let excess_bytes = if original_len > outer.max_body_len {
Some(original_len - outer.max_body_len)
} else {
None
};
if let Some(excess_bytes) = excess_bytes {
match self.direction {
TruncateDirection::DiscardFront => self.buffer.shrink_front(excess_bytes),
TruncateDirection::DiscardBack => self.buffer.shrink_back(excess_bytes),
TruncateDirection::NoTruncating => {
return Err((SerializeError::SizeLimitExceeded, self))
}
}
}
let padding = outer.min_body_len().saturating_sub(self.buffer.len());
// At this point, the body and padding MUST fit within the limit. Note
// that PacketConstraints guarantees that min_body_len <= max_body_len,
// so the padding can't cause this assertion to fail.
debug_assert!(self.buffer.len() + padding <= outer.max_body_len());
match provider.reuse_or_realloc(
self.buffer,
outer.header_len(),
padding + outer.footer_len(),
) {
Ok(buffer) => Ok(buffer),
Err((err, mut buffer)) => {
// Undo the effects of shrinking the buffer so that the buffer
// we return is unmodified from its original (which is required
// by the contract of this method).
if let Some(excess_bytes) = excess_bytes {
match self.direction {
TruncateDirection::DiscardFront => buffer.grow_front(excess_bytes),
TruncateDirection::DiscardBack => buffer.grow_back(excess_bytes),
TruncateDirection::NoTruncating => unreachable!(),
}
}
Err((
SerializeError::Alloc(err),
TruncatingSerializer { buffer, direction: self.direction },
))
}
}
}
fn serialize_new_buf<BB: ReusableBuffer, A: BufferAlloc<BB>>(
&self,
outer: PacketConstraints,
alloc: A,
) -> Result<BB, SerializeError<A::Error>> {
let truncated_size = cmp::min(self.buffer.len(), outer.max_body_len());
let discarded_bytes = self.buffer.len() - truncated_size;
let padding = outer.min_body_len().saturating_sub(truncated_size);
let tail_size = padding + outer.footer_len();
let buffer_size = outer.header_len() + truncated_size + tail_size;
let mut buffer = alloc.alloc(buffer_size)?;
buffer.shrink_front(outer.header_len());
buffer.shrink_back(tail_size);
buffer.with_bytes_mut(|mut dst| {
self.buffer.with_bytes(|src| {
let src = match (discarded_bytes > 0, self.direction) {
(false, _) => src,
(true, TruncateDirection::DiscardFront) => src.slice(discarded_bytes..),
(true, TruncateDirection::DiscardBack) => src.slice(..truncated_size),
(true, TruncateDirection::NoTruncating) => {
return Err(SerializeError::SizeLimitExceeded)
}
};
dst.copy_from(&src);
Ok(())
})
})?;
buffer.grow_back_zero(padding);
Ok(buffer)
}
}
impl<I: Serializer, O: PacketBuilder> Serializer for Nested<I, O> {
type Buffer = I::Buffer;
#[inline]
fn serialize<B: GrowBufferMut, P: BufferProvider<I::Buffer, B>>(
self,
outer: PacketConstraints,
provider: P,
) -> Result<B, (SerializeError<P::Error>, Self)> {
let Some(outer) = self.outer.constraints().try_encapsulate(&outer) else {
return Err((SerializeError::SizeLimitExceeded, self));
};
match self.inner.serialize(outer, provider) {
Ok(mut buf) => {
buf.serialize(&self.outer);
Ok(buf)
}
Err((err, inner)) => Err((err, inner.encapsulate(self.outer))),
}
}
#[inline]
fn serialize_new_buf<B: ReusableBuffer, A: BufferAlloc<B>>(
&self,
outer: PacketConstraints,
alloc: A,
) -> Result<B, SerializeError<A::Error>> {
let Some(outer) = self.outer.constraints().try_encapsulate(&outer) else {
return Err(SerializeError::SizeLimitExceeded);
};
let mut buf = self.inner.serialize_new_buf(outer, alloc)?;
GrowBufferMut::serialize(&mut buf, &self.outer);
Ok(buf)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::BufferMut;
use std::fmt::Debug;
use test_case::test_case;
use test_util::{assert_geq, assert_leq};
// DummyPacketBuilder:
// - Implements PacketBuilder with the stored constraints; it fills the
// header with 0xFF and the footer with 0xFE
// - Implements InnerPacketBuilder by consuming a `header_len`-bytes body,
// and filling it with 0xFF
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct DummyPacketBuilder {
header_len: usize,
footer_len: usize,
min_body_len: usize,
max_body_len: usize,
}
impl DummyPacketBuilder {
fn new(
header_len: usize,
footer_len: usize,
min_body_len: usize,
max_body_len: usize,
) -> DummyPacketBuilder {
DummyPacketBuilder { header_len, footer_len, min_body_len, max_body_len }
}
}
fn fill(bytes: &mut [u8], byte: u8) {
for b in bytes {
*b = byte;
}
}
impl PacketBuilder for DummyPacketBuilder {
fn constraints(&self) -> PacketConstraints {
PacketConstraints::new(
self.header_len,
self.footer_len,
self.min_body_len,
self.max_body_len,
)
}
fn serialize(&self, target: &mut SerializeTarget<'_>, body: FragmentedBytesMut<'_, '_>) {
assert_eq!(target.header.len(), self.header_len);
assert_eq!(target.footer.len(), self.footer_len);
assert!(body.len() >= self.min_body_len);
assert!(body.len() <= self.max_body_len);
fill(target.header, 0xFF);
fill(target.footer, 0xFE);
}
}
impl InnerPacketBuilder for DummyPacketBuilder {
fn bytes_len(&self) -> usize {
self.header_len
}
fn serialize(&self, buffer: &mut [u8]) {
assert_eq!(buffer.len(), self.header_len);
fill(buffer, 0xFF);
}
}
// Helper for `VerifyingSerializer` used to verify the serialization result.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct SerializerVerifier {
// Total size if the inner body if not truncated or `None` if
// serialization is expected to fail due to size limit.
inner_len: Option<usize>,
// Is the inner Serializer truncating (a TruncatingSerializer with
// TruncateDirection::DiscardFront or DiscardBack)?
truncating: bool,
}
impl SerializerVerifier {
fn new<S: Serializer>(serializer: &S, truncating: bool) -> Self {
let inner_len = serializer
.serialize_new_buf(PacketConstraints::UNCONSTRAINED, new_buf_vec)
.map(|buf| buf.len())
.inspect_err(|err| assert!(err.is_size_limit_exceeded()))
.ok();
Self { inner_len, truncating }
}
fn verify_result<B: GrowBufferMut, A>(
&self,
result: Result<&B, &SerializeError<A>>,
outer: PacketConstraints,
) {
let should_exceed_size_limit = match self.inner_len {
Some(inner_len) => outer.max_body_len() < inner_len && !self.truncating,
None => true,
};
match result {
Ok(buf) => {
assert_geq!(buf.prefix_len(), outer.header_len());
assert_geq!(buf.suffix_len(), outer.footer_len());
assert_leq!(buf.len(), outer.max_body_len());
// It is `Serialize::serialize()`'s responsibility to ensure that there
// is enough suffix room to fit any post-body padding and the footer,
// but it is the caller's responsibility to actually add that padding
// (ie, move it from the suffix to the body).
let padding = outer.min_body_len().saturating_sub(buf.len());
assert_leq!(padding + outer.footer_len(), buf.suffix_len());
assert!(!should_exceed_size_limit);
}
Err(err) => {
// If we shouldn't fail as a result of a size limit exceeded
// error, we might still fail as a result of allocation.
if should_exceed_size_limit {
assert!(err.is_size_limit_exceeded());
} else {
assert!(err.is_alloc());
}
}
}
}
}
// A Serializer that verifies certain invariants while operating. In
// particular:
// - If serialization fails, the original Serializer is returned unmodified.
// - If `outer.try_constraints()` returns `None`, serialization fails.
// - If the size limit is exceeded and truncation is disabled, serialization
// fails.
// - If serialization succeeds, the body has the correct length, including
// taking into account `outer`'s minimum body length requirement
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct VerifyingSerializer<S> {
ser: S,
verifier: SerializerVerifier,
}
impl<S: Serializer + Debug + Clone + Eq> Serializer for VerifyingSerializer<S>
where
S::Buffer: ReusableBuffer,
{
type Buffer = S::Buffer;
fn serialize<B: GrowBufferMut, P: BufferProvider<Self::Buffer, B>>(
self,
outer: PacketConstraints,
provider: P,
) -> Result<B, (SerializeError<P::Error>, Self)> {
let Self { ser, verifier } = self;
let orig = ser.clone();
let result = ser.serialize(outer, provider).map_err(|(err, ser)| {
// If serialization fails, the original Serializer should be
// unmodified.
assert_eq!(ser, orig);
(err, Self { ser, verifier })
});
verifier.verify_result(result.as_ref().map_err(|(err, _ser)| err), outer);
result
}
fn serialize_new_buf<B: ReusableBuffer, A: BufferAlloc<B>>(
&self,
outer: PacketConstraints,
alloc: A,
) -> Result<B, SerializeError<A::Error>> {
let res = self.ser.serialize_new_buf(outer, alloc);
self.verifier.verify_result(res.as_ref(), outer);
res
}
}
trait SerializerExt: Serializer {
fn into_verifying(self, truncating: bool) -> VerifyingSerializer<Self>
where
Self::Buffer: ReusableBuffer,
{
let verifier = SerializerVerifier::new(&self, truncating);
VerifyingSerializer { ser: self, verifier }
}
fn encapsulate_verifying<B: PacketBuilder>(
self,
outer: B,
truncating: bool,
) -> VerifyingSerializer<Nested<Self, B>>
where
Self::Buffer: ReusableBuffer,
{
self.encapsulate(outer).into_verifying(truncating)
}
fn with_size_limit_verifying(
self,
limit: usize,
truncating: bool,
) -> VerifyingSerializer<Nested<Self, LimitedSizePacketBuilder>>
where
Self::Buffer: ReusableBuffer,
{
self.with_size_limit(limit).into_verifying(truncating)
}
}
impl<S: Serializer> SerializerExt for S {}
#[test]
fn test_either_into_inner() {
fn ret_either(a: u32, b: u32, c: bool) -> Either<u32, u32> {
if c {
Either::A(a)
} else {
Either::B(b)
}
}
assert_eq!(ret_either(1, 2, true).into_inner(), 1);
assert_eq!(ret_either(1, 2, false).into_inner(), 2);
}
#[test]
fn test_either_unwrap_success() {
assert_eq!(Either::<u16, u32>::A(5).unwrap_a(), 5);
assert_eq!(Either::<u16, u32>::B(10).unwrap_b(), 10);
}
#[test]
#[should_panic]
fn test_either_unwrap_a_panic() {
let _: u16 = Either::<u16, u32>::B(10).unwrap_a();
}
#[test]
#[should_panic]
fn test_either_unwrap_b_panic() {
let _: u32 = Either::<u16, u32>::A(5).unwrap_b();
}
#[test_case(Buf::new((0..100).collect(), ..); "entire buf")]
#[test_case(Buf::new((0..100).collect(), 0..0); "empty range")]
#[test_case(Buf::new((0..100).collect(), ..50); "prefix")]
#[test_case(Buf::new((0..100).collect(), 50..); "suffix")]
#[test_case(Buf::new((0..100).collect(), 25..75); "middle")]
fn test_buf_into_inner(buf: Buf<Vec<u8>>) {
assert_eq!(buf.clone().as_ref(), buf.into_inner());
}
#[test]
fn test_packet_constraints() {
use PacketConstraints as PC;
// Test try_new
// Sanity check.
assert!(PC::try_new(0, 0, 0, 0).is_some());
// header_len + min_body_len + footer_len doesn't overflow usize
assert!(PC::try_new(usize::MAX / 2, usize::MAX / 2, 0, 0).is_some());
// header_len + min_body_len + footer_len overflows usize
assert_eq!(PC::try_new(usize::MAX, 1, 0, 0), None);
// min_body_len > max_body_len
assert_eq!(PC::try_new(0, 0, 1, 0), None);
// Test PacketConstraints::try_encapsulate
// Sanity check.
let pc = PC::new(10, 10, 0, usize::MAX);
assert_eq!(pc.try_encapsulate(&pc).unwrap(), PC::new(20, 20, 0, usize::MAX - 20));
let pc = PC::new(10, 10, 0, usize::MAX);
assert_eq!(pc.try_encapsulate(&pc).unwrap(), PC::new(20, 20, 0, usize::MAX - 20));
// Starting here, each failure test case corresponds to one check in
// either PacketConstraints::try_encapsulate or PacketConstraints::new
// (which is called from PacketConstraints::try_encapsulate). Each test
// case is labeled "Test case N", and a corresponding comment in either
// of those two functions identifies which line is being tested.
// The outer PC's minimum body length requirement of 10 is more than
// satisfied by the inner PC's combined 20 bytes of header and footer.
// The resulting PC has its minimum body length requirement saturated to
// 0.
let inner = PC::new(10, 10, 0, usize::MAX);
let outer = PC::new(0, 0, 10, usize::MAX);
assert_eq!(inner.try_encapsulate(&outer).unwrap(), PC::new(10, 10, 0, usize::MAX - 20));
// Test case 1
//
// The sum of the inner and outer header lengths overflows `usize`.
let inner = PC::new(usize::MAX, 0, 0, usize::MAX);
let outer = PC::new(1, 0, 0, usize::MAX);
assert_eq!(inner.try_encapsulate(&outer), None);
// Test case 2
//
// The sum of the inner and outer footer lengths overflows `usize`.
let inner = PC::new(0, usize::MAX, 0, usize::MAX);
let outer = PC::new(0, 1, 0, usize::MAX);
assert_eq!(inner.try_encapsulate(&outer), None);
// Test case 3
//
// The sum of the resulting header, footer, and minimum body lengths
// overflows `usize`. We use usize::MAX / 5 + 1 as the constant so that
// none of the intermediate additions overflow, so we make sure to test
// that an overflow in the final addition will be caught.
let one_fifth_max = (usize::MAX / 5) + 1;
let inner = PC::new(one_fifth_max, one_fifth_max, one_fifth_max, usize::MAX);
let outer = PC::new(one_fifth_max, one_fifth_max, 0, usize::MAX);
assert_eq!(inner.try_encapsulate(&outer), None);
// Test case 4
//
// The header and footer of the inner PC exceed the maximum body length
// requirement of the outer PC.
let inner = PC::new(10, 10, 0, usize::MAX);
let outer = PC::new(0, 0, 0, 10);
assert_eq!(inner.try_encapsulate(&outer), None);
// Test case 5
//
// The resulting minimum body length (thanks to the inner
// PacketBuilder's minimum body length) is larger than the resulting
// maximum body length.
let inner = PC::new(0, 0, 10, usize::MAX);
let outer = PC::new(0, 0, 0, 5);
assert_eq!(inner.try_encapsulate(&outer), None);
}
#[test]
fn test_inner_serializer() {
const INNER: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
fn concat<'a, I: IntoIterator<Item = &'a &'a [u8]>>(slices: I) -> Vec<u8> {
let mut v = Vec::new();
for slc in slices.into_iter() {
v.extend_from_slice(slc);
}
v
}
// Sanity check.
let buf = INNER.into_serializer().serialize_vec_outer().unwrap();
assert_eq!(buf.as_ref(), INNER);
// A larger minimum body length requirement will cause padding to be
// added.
let buf = INNER
.into_serializer()
.into_verifying(false)
.encapsulate(DummyPacketBuilder::new(0, 0, 20, usize::MAX))
.serialize_vec_outer()
.unwrap();
assert_eq!(buf.as_ref(), concat(&[INNER, vec![0; 10].as_ref()]).as_slice());
// Headers and footers are added as appropriate (note that
// DummyPacketBuilder fills its header with 0xFF and its footer with
// 0xFE).
let buf = INNER
.into_serializer()
.into_verifying(false)
.encapsulate(DummyPacketBuilder::new(10, 10, 0, usize::MAX))
.serialize_vec_outer()
.unwrap();
assert_eq!(
buf.as_ref(),
concat(&[vec![0xFF; 10].as_ref(), INNER, vec![0xFE; 10].as_ref()]).as_slice()
);
// An exceeded maximum body size is rejected.
assert_eq!(
INNER
.into_serializer()
.into_verifying(false)
.encapsulate(DummyPacketBuilder::new(0, 0, 0, 9))
.serialize_vec_outer()
.unwrap_err()
.0,
SerializeError::SizeLimitExceeded
);
// `into_serializer_with` truncates the buffer's body to zero before
// returning, so those body bytes are not included in the serialized
// output.
assert_eq!(
INNER
.into_serializer_with(Buf::new(vec![0xFF], ..))
.into_verifying(false)
.serialize_vec_outer()
.unwrap()
.as_ref(),
INNER
);
}
#[test]
fn test_buffer_serializer_and_inner_serializer() {
fn verify_buffer_serializer<B: BufferMut + Debug>(
buffer: B,
header_len: usize,
footer_len: usize,
min_body_len: usize,
) {
let old_body = buffer.to_flattened_vec();
let serializer = buffer.encapsulate(DummyPacketBuilder::new(
header_len,
footer_len,
min_body_len,
usize::MAX,
));
let buffer0 = serializer
.serialize_new_buf(PacketConstraints::UNCONSTRAINED, new_buf_vec)
.unwrap();
verify(buffer0, &old_body, header_len, footer_len, min_body_len);
let buffer = serializer.serialize_vec_outer().unwrap();
verify(buffer, &old_body, header_len, footer_len, min_body_len);
}
fn verify_inner_packet_builder_serializer(
body: &[u8],
header_len: usize,
footer_len: usize,
min_body_len: usize,
) {
let buffer = body
.into_serializer()
.encapsulate(DummyPacketBuilder::new(
header_len,
footer_len,
min_body_len,
usize::MAX,
))
.serialize_vec_outer()
.unwrap();
verify(buffer, body, header_len, footer_len, min_body_len);
}
fn verify<B: Buffer>(
buffer: B,
body: &[u8],
header_len: usize,
footer_len: usize,
min_body_len: usize,
) {
let flat = buffer.to_flattened_vec();
let header_bytes = &flat[..header_len];
let body_bytes = &flat[header_len..header_len + body.len()];
let padding_len = min_body_len.saturating_sub(body.len());
let padding_bytes =
&flat[header_len + body.len()..header_len + body.len() + padding_len];
let total_body_len = body.len() + padding_len;
let footer_bytes = &flat[header_len + total_body_len..];
assert_eq!(
buffer.len() - total_body_len,
header_len + footer_len,
"buffer.len()({}) - total_body_len({}) != header_len({}) + footer_len({})",
buffer.len(),
header_len,
footer_len,
min_body_len,
);
// DummyPacketBuilder fills its header with 0xFF
assert!(
header_bytes.iter().all(|b| *b == 0xFF),
"header_bytes {:?} are not filled with 0xFF's",
header_bytes,
);
assert_eq!(body_bytes, body);
// Padding bytes must be initialized to zero
assert!(
padding_bytes.iter().all(|b| *b == 0),
"padding_bytes {:?} are not filled with 0s",
padding_bytes,
);
// DummyPacketBuilder fills its footer with 0xFE
assert!(
footer_bytes.iter().all(|b| *b == 0xFE),
"footer_bytes {:?} are not filled with 0xFE's",
footer_bytes,
);
}
// Test for every valid combination of buf_len, range_start, range_end,
// prefix, suffix, and min_body within [0, 8).
for buf_len in 0..8 {
for range_start in 0..buf_len {
for range_end in range_start..buf_len {
for prefix in 0..8 {
for suffix in 0..8 {
for min_body in 0..8 {
let mut vec = vec![0; buf_len];
// Initialize the vector with values 0, 1, 2,
// ... so that we can check to make sure that
// the range bytes have been properly copied if
// the buffer is reallocated.
#[allow(clippy::needless_range_loop)]
for i in 0..vec.len() {
vec[i] = i as u8;
}
verify_buffer_serializer(
Buf::new(vec.as_mut_slice(), range_start..range_end),
prefix,
suffix,
min_body,
);
if range_start == 0 {
// Unlike verify_buffer_serializer, this
// test doesn't make use of the prefix or
// suffix. In order to avoid running the
// exact same test multiple times, we only
// run this when `range_start == 0`, which
// has the effect of reducing the number of
// times that this test is run by roughly a
// factor of 8.
verify_inner_packet_builder_serializer(
&vec.as_slice()[range_start..range_end],
prefix,
suffix,
min_body,
);
}
}
}
}
}
}
}
}
#[test]
fn test_min_body_len() {
// Test that padding is added after the body of the packet whose minimum
// body length constraint requires it. A previous version of this code
// had a bug where padding was always added after the innermost body.
let body = &[1, 2];
// 4 bytes of header and footer for a total of 6 bytes (including the
// body).
let inner = DummyPacketBuilder::new(2, 2, 0, usize::MAX);
// Minimum body length of 8 will require 2 bytes of padding.
let outer = DummyPacketBuilder::new(2, 2, 8, usize::MAX);
let buf = body
.into_serializer()
.into_verifying(false)
.encapsulate_verifying(inner, false)
.encapsulate_verifying(outer, false)
.serialize_vec_outer()
.unwrap();
assert_eq!(buf.prefix_len(), 0);
assert_eq!(buf.suffix_len(), 0);
assert_eq!(
buf.as_ref(),
&[
0xFF, 0xFF, // Outer header
0xFF, 0xFF, // Inner header
1, 2, // Inner body
0xFE, 0xFE, // Inner footer
0, 0, // Padding to satisfy outer minimum body length requirement
0xFE, 0xFE // Outer footer
]
);
}
#[test]
fn test_size_limit() {
// ser is a Serializer that will consume 1 byte of buffer space
fn test<S: Serializer + Clone + Debug + Eq>(ser: S)
where
S::Buffer: ReusableBuffer,
{
// Each of these tests encapsulates ser in a DummyPacketBuilder
// which consumes 1 byte for the header and one byte for the footer.
// Thus, the inner serializer will consume 1 byte, while the
// DummyPacketBuilder will consume 2 bytes, for a total of 3 bytes.
let pb = DummyPacketBuilder::new(1, 1, 0, usize::MAX);
// Test that a size limit of 3 is OK. Note that this is an important
// test since it tests the case when the size limit is exactly
// sufficient. A previous version of this code had a bug where a
// packet which fit the size limit exactly would be rejected.
assert!(ser
.clone()
.encapsulate_verifying(pb, false)
.with_size_limit_verifying(3, false)
.serialize_vec_outer()
.is_ok());
// Test that a more-than-large-enough size limit of 4 is OK.
assert!(ser
.clone()
.encapsulate_verifying(pb, false)
.with_size_limit_verifying(4, false)
.serialize_vec_outer()
.is_ok());
// Test that the inner size limit of 1 only applies to the inner
// serializer, and so is still OK even though the outer serializer
// consumes 3 bytes total.
assert!(ser
.clone()
.with_size_limit_verifying(1, false)
.encapsulate_verifying(pb, false)
.with_size_limit_verifying(3, false)
.serialize_vec_outer()
.is_ok());
// Test that the inner size limit of 0 is exceeded by the inner
// serializer's 1 byte length.
assert!(ser
.clone()
.with_size_limit_verifying(0, false)
.encapsulate_verifying(pb, false)
.serialize_vec_outer()
.is_err());
// Test that a size limit which would be exceeded by the
// encapsulating layer is rejected by Nested's implementation. If
// this doesn't work properly, then the size limit should underflow,
// resulting in a panic (see the Nested implementation of
// Serialize).
assert!(ser
.clone()
.encapsulate_verifying(pb, false)
.with_size_limit_verifying(1, false)
.serialize_vec_outer()
.is_err());
}
// We use this as an InnerPacketBuilder which consumes 1 byte of body.
test(DummyPacketBuilder::new(1, 0, 0, usize::MAX).into_serializer().into_verifying(false));
test(Buf::new(vec![0], ..).into_verifying(false));
}
#[test]
fn test_truncating_serializer() {
fn verify_result<S: Serializer + Debug>(ser: S, expected: &[u8])
where
S::Buffer: ReusableBuffer + AsRef<[u8]>,
{
let buf = ser.serialize_new_buf(PacketConstraints::UNCONSTRAINED, new_buf_vec).unwrap();
assert_eq!(buf.as_ref(), &expected[..]);
let buf = ser.serialize_vec_outer().unwrap();
assert_eq!(buf.as_ref(), &expected[..]);
}
// Test truncate front.
let body = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let ser =
TruncatingSerializer::new(Buf::new(body.clone(), ..), TruncateDirection::DiscardFront)
.into_verifying(true)
.with_size_limit_verifying(4, true);
verify_result(ser, &[6, 7, 8, 9]);
// Test truncate back.
let ser =
TruncatingSerializer::new(Buf::new(body.clone(), ..), TruncateDirection::DiscardBack)
.into_verifying(true)
.with_size_limit_verifying(7, true);
verify_result(ser, &[0, 1, 2, 3, 4, 5, 6]);
// Test no truncating (default/original case).
let ser =
TruncatingSerializer::new(Buf::new(body.clone(), ..), TruncateDirection::NoTruncating)
.into_verifying(false)
.with_size_limit_verifying(5, true);
assert!(ser.clone().serialize_vec_outer().is_err());
assert!(ser.serialize_new_buf(PacketConstraints::UNCONSTRAINED, new_buf_vec).is_err());
assert!(ser.serialize_vec_outer().is_err());
// Test that, when serialization fails, any truncation is undone.
// `ser` has a body of `[1, 2]` and no prefix or suffix
fn test_serialization_failure<S: Serializer + Clone + Eq + Debug>(
ser: S,
err: SerializeError<BufferTooShortError>,
) where
S::Buffer: ReusableBuffer + Debug,
{
// Serialize with a PacketBuilder with a size limit of 1 so that the
// body (of length 2) is too large. If `ser` is configured not to
// truncate, it should result in a size limit error. If it is
// configured to truncate, the 2 + 2 = 4 combined bytes of header
// and footer will cause allocating a new buffer to fail, and it
// should result in an allocation failure. Even if the body was
// truncated, it should be returned to its original un-truncated
// state before being returned from `serialize`.
let (e, new_ser) = ser
.clone()
.encapsulate(DummyPacketBuilder::new(2, 2, 0, 1))
.serialize_no_alloc_outer()
.unwrap_err();
assert_eq!(err, e);
assert_eq!(new_ser.into_inner(), ser);
}
let body = Buf::new(vec![1, 2], ..);
test_serialization_failure(
TruncatingSerializer::new(body.clone(), TruncateDirection::DiscardFront)
.into_verifying(true),
SerializeError::Alloc(BufferTooShortError),
);
test_serialization_failure(
TruncatingSerializer::new(body.clone(), TruncateDirection::DiscardFront)
.into_verifying(true),
SerializeError::Alloc(BufferTooShortError),
);
test_serialization_failure(
TruncatingSerializer::new(body.clone(), TruncateDirection::NoTruncating)
.into_verifying(false),
SerializeError::SizeLimitExceeded,
);
}
#[test]
fn test_try_reuse_buffer() {
fn test_expect_success(
body_range: Range<usize>,
prefix: usize,
suffix: usize,
max_copy_bytes: usize,
) {
let mut bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let buffer = Buf::new(&mut bytes[..], body_range);
let body = buffer.as_ref().to_vec();
let buffer = try_reuse_buffer(buffer, prefix, suffix, max_copy_bytes).unwrap();
assert_eq!(buffer.as_ref(), body.as_slice());
assert!(buffer.prefix_len() >= prefix);
assert!(buffer.suffix_len() >= suffix);
}
fn test_expect_failure(
body_range: Range<usize>,
prefix: usize,
suffix: usize,
max_copy_bytes: usize,
) {
let mut bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let buffer = Buf::new(&mut bytes[..], body_range.clone());
let mut bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let orig = Buf::new(&mut bytes[..], body_range.clone());
let buffer = try_reuse_buffer(buffer, prefix, suffix, max_copy_bytes).unwrap_err();
assert_eq!(buffer, orig);
}
// No prefix or suffix trivially succeeds.
test_expect_success(0..10, 0, 0, 0);
// If we have enough prefix/suffix, it succeeds.
test_expect_success(1..9, 1, 1, 0);
// If we don't have enough prefix/suffix, but we have enough capacity to
// move the buffer within the body, it succeeds...
test_expect_success(0..9, 1, 0, 9);
test_expect_success(1..10, 0, 1, 9);
// ...but if we don't provide a large enough max_copy_bytes, it will fail.
test_expect_failure(0..9, 1, 0, 8);
test_expect_failure(1..10, 0, 1, 8);
}
#[test]
fn test_maybe_reuse_buffer_provider() {
fn test_expect(body_range: Range<usize>, prefix: usize, suffix: usize, expect_a: bool) {
let mut bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let buffer = Buf::new(&mut bytes[..], body_range);
let body = buffer.as_ref().to_vec();
let buffer = BufferProvider::reuse_or_realloc(
MaybeReuseBufferProvider(new_buf_vec),
buffer,
prefix,
suffix,
)
.unwrap();
match &buffer {
Either::A(_) if expect_a => {}
Either::B(_) if !expect_a => {}
Either::A(_) => panic!("expected Eitehr::B variant"),
Either::B(_) => panic!("expected Eitehr::A variant"),
}
let bytes: &[u8] = buffer.as_ref();
assert_eq!(bytes, body.as_slice());
assert!(buffer.prefix_len() >= prefix);
assert!(buffer.suffix_len() >= suffix);
}
// Expect that we'll be able to reuse the existing buffer.
fn test_expect_reuse(body_range: Range<usize>, prefix: usize, suffix: usize) {
test_expect(body_range, prefix, suffix, true);
}
// Expect that we'll need to allocate a new buffer.
fn test_expect_realloc(body_range: Range<usize>, prefix: usize, suffix: usize) {
test_expect(body_range, prefix, suffix, false);
}
// No prefix or suffix trivially succeeds.
test_expect_reuse(0..10, 0, 0);
// If we have enough prefix/suffix, it succeeds.
test_expect_reuse(1..9, 1, 1);
// If we don't have enough prefix/suffix, but we have enough capacity to
// move the buffer within the body, it succeeds.
test_expect_reuse(0..9, 1, 0);
test_expect_reuse(1..10, 0, 1);
// If we don't have enough capacity, it fails and must realloc.
test_expect_realloc(0..9, 1, 1);
test_expect_realloc(1..10, 1, 1);
}
#[test]
fn test_no_reuse_buffer_provider() {
#[track_caller]
fn test_expect(body_range: Range<usize>, prefix: usize, suffix: usize) {
let mut bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
// The buffer that will not be reused.
let internal_buffer: Buf<&mut [u8]> = Buf::new(&mut bytes[..], body_range);
let body = internal_buffer.as_ref().to_vec();
// The newly allocated buffer, note the type is different from
// internal_buffer.
let buffer: Buf<Vec<u8>> = BufferProvider::reuse_or_realloc(
NoReuseBufferProvider(new_buf_vec),
internal_buffer,
prefix,
suffix,
)
.unwrap();
let bytes: &[u8] = buffer.as_ref();
assert_eq!(bytes, body.as_slice());
assert_eq!(buffer.prefix_len(), prefix);
assert_eq!(buffer.suffix_len(), suffix);
}
// No prefix or suffix trivially succeeds, reuse opportunity is ignored.
test_expect(0..10, 0, 0);
// If we have enough prefix/suffix, reuse opportunity is ignored.
test_expect(1..9, 1, 1);
// Prefix and suffix and properly allocated and the body is copied.
test_expect(0..9, 10, 10);
test_expect(1..10, 15, 15);
}
/// Simple Vec-backed buffer to test fragmented buffers implementation.
///
/// `ScatterGatherBuf` keeps:
/// - an inner buffer `inner`, which is always part of its body.
/// - extra backing memory in `data`.
///
/// `data` has two "root" regions, marked by the midpoint `mid`. Everything
/// left of `mid` is this buffer's prefix, and after `mid` is this buffer's
/// suffix.
///
/// The `range` field keeps the range in `data` that contains *filled*
/// prefix and suffix information. `range.start` is always less than or
/// equal to `mid` and `range.end` is always greater than or equal to `mid`,
/// such that growing the front of the buffer means decrementing
/// `range.start` and growing the back of the buffer means incrementing
/// `range.end`.
///
/// At any time this buffer's parts are:
/// - Free prefix data in range `0..range.start`.
/// - Used prefix data (now part of body) in range `range.start..mid`.
/// - Inner buffer body in `inner`.
/// - Used suffix data (now part of body) in range `mid..range.end`.
/// - Free suffix data in range `range.end..`
struct ScatterGatherBuf<B> {
data: Vec<u8>,
mid: usize,
range: Range<usize>,
inner: B,
}
impl<B: BufferMut> FragmentedBuffer for ScatterGatherBuf<B> {
fn len(&self) -> usize {
self.inner.len() + (self.range.end - self.range.start)
}
fn with_bytes<R, F>(&self, f: F) -> R
where
F: for<'a, 'b> FnOnce(FragmentedBytes<'a, 'b>) -> R,
{
let (_, rest) = self.data.split_at(self.range.start);
let (prefix_b, rest) = rest.split_at(self.mid - self.range.start);
let (suffix_b, _) = rest.split_at(self.range.end - self.mid);
let mut bytes = [prefix_b, self.inner.as_ref(), suffix_b];
f(FragmentedBytes::new(&mut bytes[..]))
}
}
impl<B: BufferMut> FragmentedBufferMut for ScatterGatherBuf<B> {
fn with_bytes_mut<R, F>(&mut self, f: F) -> R
where
F: for<'a, 'b> FnOnce(FragmentedBytesMut<'a, 'b>) -> R,
{
let (_, rest) = self.data.split_at_mut(self.range.start);
let (prefix_b, rest) = rest.split_at_mut(self.mid - self.range.start);
let (suffix_b, _) = rest.split_at_mut(self.range.end - self.mid);
let mut bytes = [prefix_b, self.inner.as_mut(), suffix_b];
f(FragmentedBytesMut::new(&mut bytes[..]))
}
}
impl<B: BufferMut> GrowBuffer for ScatterGatherBuf<B> {
fn with_parts<O, F>(&self, f: F) -> O
where
F: for<'a, 'b> FnOnce(&'a [u8], FragmentedBytes<'a, 'b>, &'a [u8]) -> O,
{
let (prefix, rest) = self.data.split_at(self.range.start);
let (prefix_b, rest) = rest.split_at(self.mid - self.range.start);
let (suffix_b, suffix) = rest.split_at(self.range.end - self.mid);
let mut bytes = [prefix_b, self.inner.as_ref(), suffix_b];
f(prefix, bytes.as_fragmented_byte_slice(), suffix)
}
fn prefix_len(&self) -> usize {
self.range.start
}
fn suffix_len(&self) -> usize {
self.data.len() - self.range.end
}
fn grow_front(&mut self, n: usize) {
self.range.start -= n;
}
fn grow_back(&mut self, n: usize) {
self.range.end += n;
assert!(self.range.end <= self.data.len());
}
}
impl<B: BufferMut> GrowBufferMut for ScatterGatherBuf<B> {
fn with_parts_mut<O, F>(&mut self, f: F) -> O
where
F: for<'a, 'b> FnOnce(&'a mut [u8], FragmentedBytesMut<'a, 'b>, &'a mut [u8]) -> O,
{
let (prefix, rest) = self.data.split_at_mut(self.range.start);
let (prefix_b, rest) = rest.split_at_mut(self.mid - self.range.start);
let (suffix_b, suffix) = rest.split_at_mut(self.range.end - self.mid);
let mut bytes = [prefix_b, self.inner.as_mut(), suffix_b];
f(prefix, bytes.as_fragmented_byte_slice(), suffix)
}
}
struct ScatterGatherProvider;
impl<B: BufferMut> BufferProvider<B, ScatterGatherBuf<B>> for ScatterGatherProvider {
type Error = Never;
fn alloc_no_reuse(
self,
_prefix: usize,
_body: usize,
_suffix: usize,
) -> Result<ScatterGatherBuf<B>, Self::Error> {
unimplemented!("not used in tests")
}
fn reuse_or_realloc(
self,
buffer: B,
prefix: usize,
suffix: usize,
) -> Result<ScatterGatherBuf<B>, (Self::Error, B)> {
let inner = buffer;
let data = vec![0; prefix + suffix];
let range = Range { start: prefix, end: prefix };
let mid = prefix;
Ok(ScatterGatherBuf { inner, data, range, mid })
}
}
#[test]
fn test_scatter_gather_serialize() {
// Assert that a buffer composed of different allocations can be used as
// a serialization target, while reusing an internal body buffer.
let buf = Buf::new(vec![10, 20, 30, 40, 50], ..);
let pb = DummyPacketBuilder::new(3, 2, 0, usize::MAX);
let ser = buf.encapsulate(pb);
let result = ser.serialize_outer(ScatterGatherProvider {}).unwrap();
let flattened = result.to_flattened_vec();
assert_eq!(&flattened[..], &[0xFF, 0xFF, 0xFF, 10, 20, 30, 40, 50, 0xFE, 0xFE]);
}
}