1use core::iter::FromIterator;
8use core::ops::Range;
9
10use alloc::vec::Vec;
11use core::mem::MaybeUninit;
12use core::num::NonZeroU16;
13use net_types::ip::{Ip, IpVersion};
14use packet::InnerPacketBuilder;
15use static_assertions::const_assert;
16
17use crate::ip::Mms;
18use crate::tcp::segment::{Payload, PayloadLen, SegmentOptions};
19
20#[derive(Debug, Clone, Copy, PartialEq, Eq)]
22pub enum Control {
23 SYN,
25 FIN,
27 RST,
29}
30
31impl Control {
32 pub fn has_sequence_no(self) -> bool {
35 match self {
36 Control::SYN | Control::FIN => true,
37 Control::RST => false,
38 }
39 }
40}
41
42const TCP_HEADER_LEN: u32 = packet_formats::tcp::HDR_PREFIX_LEN as u32;
43
44#[derive(Clone, Copy, PartialEq, Eq, Debug, PartialOrd, Ord)]
48pub struct Mss(u16);
49
50const_assert!(Mss::MIN.get() <= Mss::DEFAULT_IPV4.get());
51const_assert!(Mss::MIN.get() <= Mss::DEFAULT_IPV6.get());
52const_assert!(Mss::MIN.get() as usize >= packet_formats::tcp::MAX_OPTIONS_LEN);
53
54impl Mss {
55 pub const MIN: Mss = Mss(216);
76
77 pub const DEFAULT_IPV4: Mss = Mss(536);
82
83 pub const DEFAULT_IPV6: Mss = Mss(1220);
88
89 pub const fn new(mss: u16) -> Option<Self> {
91 if mss < Self::MIN.get() { None } else { Some(Mss(mss)) }
92 }
93
94 pub fn from_mms(mms: Mms) -> Option<Self> {
96 let mss = u16::try_from(mms.get().get().saturating_sub(TCP_HEADER_LEN)).unwrap_or(u16::MAX);
97 Self::new(mss)
98 }
99
100 pub const fn default<I: Ip>() -> Self {
102 match I::VERSION {
103 IpVersion::V4 => Self::DEFAULT_IPV4,
104 IpVersion::V6 => Self::DEFAULT_IPV6,
105 }
106 }
107
108 pub const fn get(&self) -> u16 {
110 let Self(mss) = *self;
111 mss
112 }
113}
114
115#[derive(Clone, Copy, PartialEq, Eq, Debug)]
141pub struct EffectiveMss {
142 mss: Mss,
143 fixed_tcp_options_size: u16,
144}
145
146impl EffectiveMss {
147 pub const fn from_mss(mss: Mss, size_limits: MssSizeLimiters) -> Self {
149 let MssSizeLimiters { timestamp_enabled } = size_limits;
150 let fixed_tcp_options_size = if timestamp_enabled {
153 packet_formats::tcp::options::ALIGNED_TIMESTAMP_OPTION_LENGTH as u16
154 } else {
155 0
156 };
157 EffectiveMss { mss, fixed_tcp_options_size }
158 }
159
160 pub fn payload_size(&self, options: &SegmentOptions) -> NonZeroU16 {
165 let Self { mss, fixed_tcp_options_size: _ } = self;
168 let tcp_options_len = u16::try_from(options.builder().bytes_len()).unwrap();
171 NonZeroU16::new(mss.get() - tcp_options_len).unwrap()
174 }
175
176 pub fn mss(&self) -> &Mss {
178 &self.mss
179 }
180
181 pub fn update_mss(&mut self, new: Mss) {
183 self.mss = new
184 }
185
186 pub const fn get(&self) -> u16 {
188 let Self { mss, fixed_tcp_options_size } = *self;
189 mss.get() - fixed_tcp_options_size
190 }
191}
192
193pub struct MssSizeLimiters {
195 pub timestamp_enabled: bool,
197}
198
199impl From<EffectiveMss> for u32 {
200 fn from(mss: EffectiveMss) -> Self {
201 u32::from(mss.get())
202 }
203}
204
205impl From<EffectiveMss> for usize {
206 fn from(mss: EffectiveMss) -> Self {
207 usize::from(mss.get())
208 }
209}
210
211#[derive(Copy, Clone, Debug, PartialEq)]
213pub struct FragmentedPayload<'a, const N: usize> {
214 storage: [&'a [u8]; N],
215 start: usize,
220 end: usize,
221}
222
223impl<'a, const N: usize> FromIterator<&'a [u8]> for FragmentedPayload<'a, N> {
230 fn from_iter<T>(iter: T) -> Self
231 where
232 T: IntoIterator<Item = &'a [u8]>,
233 {
234 let Self { storage, start, end } = Self::new_empty();
235 let (storage, end) = iter.into_iter().fold((storage, end), |(mut storage, end), sl| {
236 storage[end] = sl;
237 (storage, end + 1)
238 });
239 Self { storage, start, end }
240 }
241}
242
243impl<'a, const N: usize> FragmentedPayload<'a, N> {
244 pub fn new(values: [&'a [u8]; N]) -> Self {
246 Self { storage: values, start: 0, end: N }
247 }
248
249 pub fn new_contiguous(value: &'a [u8]) -> Self {
251 core::iter::once(value).collect()
252 }
253
254 pub fn to_vec(self) -> Vec<u8> {
256 self.slices().concat()
257 }
258
259 fn slices(&self) -> &[&'a [u8]] {
260 let Self { storage, start, end } = self;
261 &storage[*start..*end]
262 }
263
264 fn apply_copy<T, F: Fn(&[u8], &mut [T])>(
267 &self,
268 mut offset: usize,
269 mut dst: &mut [T],
270 apply: F,
271 ) {
272 let mut slices = self.slices().into_iter();
273 while let Some(sl) = slices.next() {
274 let l = sl.len();
275 if offset >= l {
276 offset -= l;
277 continue;
278 }
279 let sl = &sl[offset..];
280 let cp = sl.len().min(dst.len());
281 let (target, new_dst) = dst.split_at_mut(cp);
282 apply(&sl[..cp], target);
283
284 if new_dst.len() == 0 {
286 return;
287 }
288
289 dst = new_dst;
290 offset = 0;
291 }
292 assert_eq!(dst.len(), 0, "failed to fill dst");
293 }
294}
295
296impl<'a, const N: usize> PayloadLen for FragmentedPayload<'a, N> {
297 fn len(&self) -> usize {
298 self.slices().into_iter().map(|s| s.len()).sum()
299 }
300}
301
302impl<'a, const N: usize> Payload for FragmentedPayload<'a, N> {
303 fn slice(self, byte_range: Range<u32>) -> Self {
304 let Self { mut storage, start: mut self_start, end: mut self_end } = self;
305 let Range { start: byte_start, end: byte_end } = byte_range;
306 let byte_start =
307 usize::try_from(byte_start).expect("range start index out of range for usize");
308 let byte_end = usize::try_from(byte_end).expect("range end index out of range for usize");
309 assert!(byte_end >= byte_start);
310 let mut storage_iter =
311 (&mut storage[self_start..self_end]).into_iter().scan(0, |total_len, slice| {
312 let slice_len = slice.len();
313 let item = Some((*total_len, slice));
314 *total_len += slice_len;
315 item
316 });
317
318 let mut start_offset = None;
321 let mut final_len = 0;
322 while let Some((sl_offset, sl)) = storage_iter.next() {
323 let orig_len = sl.len();
324
325 if sl_offset + orig_len < byte_start {
328 *sl = &[];
329 self_start += 1;
330 continue;
331 }
332 if sl_offset >= byte_end {
334 *sl = &[];
335 self_end -= 1;
336 continue;
337 }
338
339 let sl_start = byte_start.saturating_sub(sl_offset);
340 let sl_end = sl.len().min(byte_end - sl_offset);
341 *sl = &sl[sl_start..sl_end];
342
343 match start_offset {
344 Some(_) => (),
345 None => {
346 start_offset = Some(sl_offset + sl_start);
348 if sl.len() == 0 {
351 self_start += 1;
352 }
353 }
354 }
355 final_len += sl.len();
356 }
357 assert_eq!(
359 start_offset.unwrap_or(0),
362 byte_start,
363 "range start index out of range {byte_range:?}"
364 );
365 assert_eq!(byte_start + final_len, byte_end, "range end index out of range {byte_range:?}");
366
367 if self_start == self_end {
369 self_start = 0;
370 self_end = 0;
371 }
372 Self { storage, start: self_start, end: self_end }
373 }
374
375 fn new_empty() -> Self {
376 Self { storage: [&[]; N], start: 0, end: 0 }
377 }
378
379 fn partial_copy(&self, offset: usize, dst: &mut [u8]) {
380 self.apply_copy(offset, dst, |src, dst| {
381 dst.copy_from_slice(src);
382 });
383 }
384
385 fn partial_copy_uninit(&self, offset: usize, dst: &mut [MaybeUninit<u8>]) {
386 self.apply_copy(offset, dst, |src, dst| {
387 let uninit_src: &[MaybeUninit<u8>] = unsafe { core::mem::transmute(src) };
391 dst.copy_from_slice(&uninit_src);
392 });
393 }
394}
395
396impl<'a, const N: usize> InnerPacketBuilder for FragmentedPayload<'a, N> {
397 fn bytes_len(&self) -> usize {
398 self.len()
399 }
400
401 fn serialize(&self, buffer: &mut [u8]) {
402 self.partial_copy(0, buffer);
403 }
404}
405
406#[cfg(any(test, feature = "testutils"))]
407mod testutil {
408 use super::*;
409
410 impl From<Mss> for u32 {
411 fn from(Mss(mss): Mss) -> Self {
412 u32::from(mss)
413 }
414 }
415
416 impl From<Mss> for usize {
417 fn from(Mss(mss): Mss) -> Self {
418 usize::from(mss)
419 }
420 }
421}
422
423#[cfg(test)]
424mod test {
425 use super::*;
426 use alloc::format;
427
428 use packet::Serializer as _;
429 use proptest::test_runner::Config;
430 use proptest::{prop_assert_eq, proptest};
431 use proptest_support::failed_seeds_no_std;
432 use test_case::test_case;
433
434 use crate::{SackBlock, SackBlocks, SeqNum, Timestamp, TimestampOption};
435
436 const EXAMPLE_DATA: [u8; 10] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
437 #[test_case(FragmentedPayload::new([&EXAMPLE_DATA[..]]); "contiguous")]
438 #[test_case(FragmentedPayload::new([&EXAMPLE_DATA[0..2], &EXAMPLE_DATA[2..]]); "split once")]
439 #[test_case(FragmentedPayload::new([
440 &EXAMPLE_DATA[0..2],
441 &EXAMPLE_DATA[2..5],
442 &EXAMPLE_DATA[5..],
443 ]); "split twice")]
444 #[test_case(FragmentedPayload::<4>::from_iter([
445 &EXAMPLE_DATA[0..2],
446 &EXAMPLE_DATA[2..5],
447 &EXAMPLE_DATA[5..],
448 ]); "partial twice")]
449 fn fragmented_payload_serializer_data<const N: usize>(payload: FragmentedPayload<'_, N>) {
450 let serialized = payload
451 .into_serializer()
452 .serialize_vec_outer()
453 .expect("should serialize")
454 .unwrap_b()
455 .into_inner();
456 assert_eq!(&serialized[..], EXAMPLE_DATA);
457 }
458
459 #[test]
460 #[should_panic(expected = "range start index out of range")]
461 fn slice_start_out_of_bounds() {
462 let len = u32::try_from(EXAMPLE_DATA.len()).unwrap();
463 let bad_len = len + 1;
464 let _ = FragmentedPayload::<2>::new_contiguous(&EXAMPLE_DATA).slice(bad_len..bad_len);
467 }
468
469 #[test]
470 #[should_panic(expected = "range end index out of range")]
471 fn slice_end_out_of_bounds() {
472 let len = u32::try_from(EXAMPLE_DATA.len()).unwrap();
473 let bad_len = len + 1;
474 let _ = FragmentedPayload::<2>::new_contiguous(&EXAMPLE_DATA).slice(0..bad_len);
475 }
476
477 #[test]
478 fn canon_empty_payload() {
479 let len = u32::try_from(EXAMPLE_DATA.len()).unwrap();
480 assert_eq!(
481 FragmentedPayload::<1>::new_contiguous(&EXAMPLE_DATA).slice(len..len),
482 FragmentedPayload::new_empty()
483 );
484 assert_eq!(
485 FragmentedPayload::<2>::new_contiguous(&EXAMPLE_DATA).slice(len..len),
486 FragmentedPayload::new_empty()
487 );
488 assert_eq!(
489 FragmentedPayload::<2>::new_contiguous(&EXAMPLE_DATA).slice(2..2),
490 FragmentedPayload::new_empty()
491 );
492 }
493
494 const TEST_BYTES: &'static [u8] = b"Hello World!";
495 proptest! {
496 #![proptest_config(Config {
497 failure_persistence: failed_seeds_no_std!(),
499 ..Config::default()
500 })]
501
502 #[test]
503 fn fragmented_payload_to_vec(payload in fragmented_payload::with_payload()) {
504 prop_assert_eq!(payload.to_vec(), &TEST_BYTES[..]);
505 }
506
507 #[test]
508 fn fragmented_payload_len(payload in fragmented_payload::with_payload()) {
509 prop_assert_eq!(payload.len(), TEST_BYTES.len())
510 }
511
512 #[test]
513 fn fragmented_payload_slice((payload, (start, end)) in fragmented_payload::with_range()) {
514 let want = &TEST_BYTES[start..end];
515 let start = u32::try_from(start).unwrap();
516 let end = u32::try_from(end).unwrap();
517 prop_assert_eq!(payload.clone().slice(start..end).to_vec(), want);
518 }
519
520 #[test]
521 fn fragmented_payload_partial_copy((payload, (start, end)) in fragmented_payload::with_range()) {
522 let mut buffer = [0; TEST_BYTES.len()];
523 let buffer = &mut buffer[0..(end-start)];
524 payload.partial_copy(start, buffer);
525 prop_assert_eq!(buffer, &TEST_BYTES[start..end]);
526 }
527 }
528
529 mod fragmented_payload {
530 use super::*;
531
532 use proptest::strategy::{Just, Strategy};
533 use rand::Rng as _;
534
535 const TEST_STORAGE: usize = 5;
536 type TestFragmentedPayload = FragmentedPayload<'static, TEST_STORAGE>;
537 pub(super) fn with_payload() -> impl Strategy<Value = TestFragmentedPayload> {
538 (1..=TEST_STORAGE).prop_perturb(|slices, mut rng| {
539 (0..slices)
540 .scan(0, |st, slice| {
541 let len = if slice == slices - 1 {
542 TEST_BYTES.len() - *st
543 } else {
544 rng.random_range(0..=(TEST_BYTES.len() - *st))
545 };
546 let start = *st;
547 *st += len;
548 Some(&TEST_BYTES[start..*st])
549 })
550 .collect()
551 })
552 }
553
554 pub(super) fn with_range() -> impl Strategy<Value = (TestFragmentedPayload, (usize, usize))>
555 {
556 (
557 with_payload(),
558 (0..TEST_BYTES.len()).prop_flat_map(|start| (Just(start), start..TEST_BYTES.len())),
559 )
560 }
561 }
562
563 #[test_case(true; "timestamp_enabled")]
564 #[test_case(false; "timestamp_disabled")]
565 fn effective_mss_accounts_for_fixed_size_tcp_options(timestamp_enabled: bool) {
566 const SIZE: u16 = 1000;
567 let mss =
568 EffectiveMss::from_mss(Mss::new(SIZE).unwrap(), MssSizeLimiters { timestamp_enabled });
569 if timestamp_enabled {
570 assert_eq!(
571 mss.get(),
572 SIZE - packet_formats::tcp::options::ALIGNED_TIMESTAMP_OPTION_LENGTH as u16
573 )
574 } else {
575 assert_eq!(mss.get(), SIZE);
576 }
577 }
578
579 #[test_case(SegmentOptions {sack_blocks: SackBlocks::EMPTY, timestamp: None}; "empty")]
580 #[test_case(SegmentOptions {
581 sack_blocks: SackBlocks::from_iter([
582 SackBlock::try_new(SeqNum::new(1), SeqNum::new(2)).unwrap(),
583 SackBlock::try_new(SeqNum::new(4), SeqNum::new(6)).unwrap(),
584 ]),
585 timestamp: None
586 }; "sack_blocks")]
587 #[test_case(SegmentOptions {
588 sack_blocks: SackBlocks::EMPTY,
589 timestamp: Some(TimestampOption {
590 ts_val: Timestamp::new(12345), ts_echo_reply: Timestamp::new(54321)
591 }),
592 }; "timestamp")]
593 #[test_case(SegmentOptions {
594 sack_blocks: SackBlocks::from_iter([
595 SackBlock::try_new(SeqNum::new(1), SeqNum::new(2)).unwrap(),
596 SackBlock::try_new(SeqNum::new(4), SeqNum::new(6)).unwrap(),
597 ]),
598 timestamp: Some(TimestampOption {
599 ts_val: Timestamp::new(12345), ts_echo_reply: Timestamp::new(54321)
600 }),
601 }; "sack_blocks_and_timestamp")]
602
603 fn effective_mss_accounts_for_variable_size_tcp_options(options: SegmentOptions) {
604 const SIZE: u16 = 1000;
605 let timestamp_enabled = options.timestamp.is_some();
606 let mss =
607 EffectiveMss::from_mss(Mss::new(SIZE).unwrap(), MssSizeLimiters { timestamp_enabled });
608 let options_len = u16::try_from(options.builder().bytes_len()).unwrap();
609 assert_eq!(mss.payload_size(&options).get(), SIZE - options_len);
610 }
611}