1use alloc::vec::Vec;
8use core::convert::Infallible as Never;
9
10use derivative::Derivative;
11use log::trace;
12use netstack3_base::sync::Mutex;
13use netstack3_base::{Device, DeviceIdContext, ErrorAndSerializer};
14use packet::{
15 new_buf_vec, Buf, BufferAlloc, ContiguousBuffer, GrowBufferMut, NoReuseBufferProvider,
16 ReusableBuffer, Serializer,
17};
18
19use crate::internal::base::DeviceSendFrameError;
20use crate::internal::queue::{fifo, DequeueState, EnqueueResult, TransmitQueueFrameError};
21use crate::internal::socket::{DeviceSocketHandler, ParseSentFrameError, SentFrame};
22
23#[derive(Derivative)]
25#[derivative(Default(bound = "Allocator: Default"))]
26pub struct TransmitQueueState<Meta, Buffer, Allocator> {
27 pub(super) allocator: Allocator,
28 pub(super) queue: Option<fifo::Queue<Meta, Buffer>>,
29}
30
31#[derive(Derivative)]
33#[derivative(Default(bound = "Allocator: Default"))]
34pub struct TransmitQueue<Meta, Buffer, Allocator> {
35 pub(crate) deque: Mutex<DequeueState<Meta, Buffer>>,
39 pub(crate) queue: Mutex<TransmitQueueState<Meta, Buffer, Allocator>>,
44}
45
46pub trait TransmitQueueBindingsContext<DeviceId> {
48 fn wake_tx_task(&mut self, device_id: &DeviceId);
55}
56
57pub trait TransmitQueueCommon<D: Device, C>: DeviceIdContext<D> {
59 type Meta;
61 type Allocator;
63 type Buffer: GrowBufferMut + ContiguousBuffer;
65 type DequeueContext;
67
68 fn parse_outgoing_frame<'a, 'b>(
70 buf: &'a [u8],
71 meta: &'a Self::Meta,
72 ) -> Result<SentFrame<&'a [u8]>, ParseSentFrameError>;
73}
74
75pub trait TransmitQueueContext<D: Device, BC>: TransmitQueueCommon<D, BC> {
77 fn with_transmit_queue_mut<
79 O,
80 F: FnOnce(&mut TransmitQueueState<Self::Meta, Self::Buffer, Self::Allocator>) -> O,
81 >(
82 &mut self,
83 device_id: &Self::DeviceId,
84 cb: F,
85 ) -> O;
86
87 fn with_transmit_queue<
89 O,
90 F: FnOnce(&TransmitQueueState<Self::Meta, Self::Buffer, Self::Allocator>) -> O,
91 >(
92 &mut self,
93 device_id: &Self::DeviceId,
94 cb: F,
95 ) -> O;
96
97 fn send_frame(
102 &mut self,
103 bindings_ctx: &mut BC,
104 device_id: &Self::DeviceId,
105 dequeue_context: Option<&mut Self::DequeueContext>,
106 meta: Self::Meta,
107 buf: Self::Buffer,
108 ) -> Result<(), DeviceSendFrameError>;
109}
110
111pub trait TransmitDequeueContext<D: Device, BC>: TransmitQueueContext<D, BC> {
113 type TransmitQueueCtx<'a>: TransmitQueueContext<
115 D,
116 BC,
117 Meta = Self::Meta,
118 Buffer = Self::Buffer,
119 DequeueContext = Self::DequeueContext,
120 DeviceId = Self::DeviceId,
121 > + DeviceSocketHandler<D, BC>;
122
123 fn with_dequed_packets_and_tx_queue_ctx<
125 O,
126 F: FnOnce(&mut DequeueState<Self::Meta, Self::Buffer>, &mut Self::TransmitQueueCtx<'_>) -> O,
127 >(
128 &mut self,
129 device_id: &Self::DeviceId,
130 cb: F,
131 ) -> O;
132}
133
134pub enum TransmitQueueConfiguration {
136 None,
138 Fifo,
140}
141
142pub trait TransmitQueueHandler<D: Device, BC>: TransmitQueueCommon<D, BC> {
144 fn queue_tx_frame<S>(
146 &mut self,
147 bindings_ctx: &mut BC,
148 device_id: &Self::DeviceId,
149 meta: Self::Meta,
150 body: S,
151 ) -> Result<(), TransmitQueueFrameError<S>>
152 where
153 S: Serializer,
154 S::Buffer: ReusableBuffer;
155}
156
157pub(super) fn deliver_to_device_sockets<
158 D: Device,
159 BC: TransmitQueueBindingsContext<CC::DeviceId>,
160 CC: TransmitQueueCommon<D, BC> + DeviceSocketHandler<D, BC>,
161>(
162 core_ctx: &mut CC,
163 bindings_ctx: &mut BC,
164 device_id: &CC::DeviceId,
165 buffer: &CC::Buffer,
166 meta: &CC::Meta,
167) {
168 let bytes = buffer.as_ref();
169 match CC::parse_outgoing_frame(bytes, meta) {
170 Ok(sent_frame) => DeviceSocketHandler::handle_frame(
171 core_ctx,
172 bindings_ctx,
173 device_id,
174 sent_frame.into(),
175 bytes,
176 ),
177 Err(ParseSentFrameError) => {
178 trace!("failed to parse outgoing frame on {:?} ({} bytes)", device_id, bytes.len())
179 }
180 }
181}
182
183impl EnqueueResult {
184 fn maybe_wake_tx<D, BC: TransmitQueueBindingsContext<D>>(
185 self,
186 bindings_ctx: &mut BC,
187 device_id: &D,
188 ) {
189 match self {
190 Self::QueuePreviouslyWasOccupied => (),
191 Self::QueueWasPreviouslyEmpty => bindings_ctx.wake_tx_task(device_id),
192 }
193 }
194}
195
196enum EnqueueStatus<Meta, Buffer> {
197 NotAttempted(Meta, Buffer),
198 Attempted,
199}
200
201fn insert_and_notify<
204 D: Device,
205 BC: TransmitQueueBindingsContext<CC::DeviceId>,
206 CC: TransmitQueueContext<D, BC> + DeviceSocketHandler<D, BC>,
207>(
208 bindings_ctx: &mut BC,
209 device_id: &CC::DeviceId,
210 inserter: Option<fifo::QueueTxInserter<'_, CC::Meta, CC::Buffer>>,
211 meta: CC::Meta,
212 body: CC::Buffer,
213) -> EnqueueStatus<CC::Meta, CC::Buffer> {
214 match inserter {
215 None => EnqueueStatus::NotAttempted(meta, body),
217 Some(inserter) => {
218 inserter.insert(meta, body).maybe_wake_tx(bindings_ctx, device_id);
219 EnqueueStatus::Attempted
220 }
221 }
222}
223
224fn handle_post_enqueue<
227 D: Device,
228 BC: TransmitQueueBindingsContext<CC::DeviceId>,
229 CC: TransmitQueueContext<D, BC> + DeviceSocketHandler<D, BC>,
230>(
231 core_ctx: &mut CC,
232 bindings_ctx: &mut BC,
233 device_id: &CC::DeviceId,
234 status: EnqueueStatus<CC::Meta, CC::Buffer>,
235) -> Result<(), DeviceSendFrameError> {
236 match status {
237 EnqueueStatus::NotAttempted(meta, body) => {
238 deliver_to_device_sockets(core_ctx, bindings_ctx, device_id, &body, &meta);
241 core_ctx.send_frame(bindings_ctx, device_id, None, meta, body)
244 }
245 EnqueueStatus::Attempted => Ok(()),
246 }
247}
248
249impl<
250 D: Device,
251 BC: TransmitQueueBindingsContext<CC::DeviceId>,
252 CC: TransmitQueueContext<D, BC> + DeviceSocketHandler<D, BC>,
253 > TransmitQueueHandler<D, BC> for CC
254where
255 for<'a> &'a mut CC::Allocator: BufferAlloc<CC::Buffer>,
256 CC::Buffer: ReusableBuffer,
257{
258 fn queue_tx_frame<S>(
259 &mut self,
260 bindings_ctx: &mut BC,
261 device_id: &CC::DeviceId,
262 meta: CC::Meta,
263 body: S,
264 ) -> Result<(), TransmitQueueFrameError<S>>
265 where
266 S: Serializer,
267 S::Buffer: ReusableBuffer,
268 {
269 let result =
270 self.with_transmit_queue_mut(device_id, |TransmitQueueState { allocator, queue }| {
271 let inserter = match queue {
272 None => None,
273 Some(q) => match q.tx_inserter() {
274 Some(i) => Some(i),
275 None => return Err(TransmitQueueFrameError::QueueFull(body)),
276 },
277 };
278 let body = body.serialize_outer(NoReuseBufferProvider(allocator)).map_err(
279 |(e, serializer)| {
280 TransmitQueueFrameError::SerializeError(ErrorAndSerializer {
281 serializer,
282 error: e.map_alloc(|_| ()),
283 })
284 },
285 )?;
286 Ok(insert_and_notify::<_, _, CC>(bindings_ctx, device_id, inserter, meta, body))
287 })?;
288
289 handle_post_enqueue(self, bindings_ctx, device_id, result)
290 .map_err(TransmitQueueFrameError::NoQueue)
291 }
292}
293
294#[derive(Default)]
296pub struct BufVecU8Allocator;
297
298impl<'a> BufferAlloc<Buf<Vec<u8>>> for &'a mut BufVecU8Allocator {
299 type Error = Never;
300
301 fn alloc(self, len: usize) -> Result<Buf<Vec<u8>>, Self::Error> {
302 new_buf_vec(len)
303 }
304}
305
306#[cfg(test)]
307mod tests {
308 use super::*;
309
310 use alloc::vec;
311
312 use assert_matches::assert_matches;
313 use net_declare::net_mac;
314 use net_types::ethernet::Mac;
315 use netstack3_base::testutil::{
316 FakeBindingsCtx, FakeCoreCtx, FakeLinkDevice, FakeLinkDeviceId,
317 };
318 use netstack3_base::{
319 ContextPair, CounterContext, CtxPair, ResourceCounterContext, WorkQueueReport,
320 };
321 use test_case::test_case;
322
323 use crate::internal::queue::api::TransmitQueueApi;
324 use crate::internal::queue::{BatchSize, MAX_TX_QUEUED_LEN};
325 use crate::internal::socket::{EthernetFrame, Frame};
326 use crate::DeviceCounters;
327
328 #[derive(Default)]
329 struct FakeTxQueueState {
330 queue: TransmitQueueState<(), Buf<Vec<u8>>, BufVecU8Allocator>,
331 transmitted_packets: Vec<(Buf<Vec<u8>>, Option<DequeueContext>)>,
332 no_buffers: bool,
333 stack_wide_device_counters: DeviceCounters,
334 per_device_counters: DeviceCounters,
335 }
336
337 #[derive(Default)]
338 struct FakeTxQueueBindingsCtxState {
339 woken_tx_tasks: Vec<FakeLinkDeviceId>,
340 delivered_to_sockets: Vec<Frame<Vec<u8>>>,
341 }
342
343 type FakeCoreCtxImpl = FakeCoreCtx<FakeTxQueueState, (), FakeLinkDeviceId>;
344 type FakeBindingsCtxImpl = FakeBindingsCtx<(), (), FakeTxQueueBindingsCtxState, ()>;
345
346 impl TransmitQueueBindingsContext<FakeLinkDeviceId> for FakeBindingsCtxImpl {
347 fn wake_tx_task(&mut self, device_id: &FakeLinkDeviceId) {
348 self.state.woken_tx_tasks.push(device_id.clone())
349 }
350 }
351
352 const SRC_MAC: Mac = net_mac!("AA:BB:CC:DD:EE:FF");
353 const DEST_MAC: Mac = net_mac!("FF:EE:DD:CC:BB:AA");
354
355 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
356 struct DequeueContext;
357
358 impl TransmitQueueCommon<FakeLinkDevice, FakeBindingsCtxImpl> for FakeCoreCtxImpl {
359 type DequeueContext = DequeueContext;
360 type Meta = ();
361 type Buffer = Buf<Vec<u8>>;
362 type Allocator = BufVecU8Allocator;
363
364 fn parse_outgoing_frame<'a, 'b>(
365 buf: &'a [u8],
366 (): &'b Self::Meta,
367 ) -> Result<SentFrame<&'a [u8]>, ParseSentFrameError> {
368 Ok(fake_sent_ethernet_with_body(buf))
369 }
370 }
371
372 fn fake_sent_ethernet_with_body<B>(body: B) -> SentFrame<B> {
373 SentFrame::Ethernet(EthernetFrame {
374 src_mac: SRC_MAC,
375 dst_mac: DEST_MAC,
376 ethertype: None,
377 body,
378 })
379 }
380
381 trait TransmitQueueApiExt: ContextPair + Sized {
383 fn transmit_queue_api<D>(&mut self) -> TransmitQueueApi<D, &mut Self> {
384 TransmitQueueApi::new(self)
385 }
386 }
387
388 impl<O> TransmitQueueApiExt for O where O: ContextPair + Sized {}
389
390 impl TransmitQueueContext<FakeLinkDevice, FakeBindingsCtxImpl> for FakeCoreCtxImpl {
391 fn with_transmit_queue<
392 O,
393 F: FnOnce(&TransmitQueueState<(), Buf<Vec<u8>>, BufVecU8Allocator>) -> O,
394 >(
395 &mut self,
396 &FakeLinkDeviceId: &FakeLinkDeviceId,
397 cb: F,
398 ) -> O {
399 cb(&self.state.queue)
400 }
401
402 fn with_transmit_queue_mut<
403 O,
404 F: FnOnce(&mut TransmitQueueState<(), Buf<Vec<u8>>, BufVecU8Allocator>) -> O,
405 >(
406 &mut self,
407 &FakeLinkDeviceId: &FakeLinkDeviceId,
408 cb: F,
409 ) -> O {
410 cb(&mut self.state.queue)
411 }
412
413 fn send_frame(
414 &mut self,
415 _bindings_ctx: &mut FakeBindingsCtxImpl,
416 &FakeLinkDeviceId: &FakeLinkDeviceId,
417 dequeue_context: Option<&mut DequeueContext>,
418 (): (),
419 buf: Buf<Vec<u8>>,
420 ) -> Result<(), DeviceSendFrameError> {
421 let FakeTxQueueState { transmitted_packets, no_buffers, .. } = &mut self.state;
422 if *no_buffers {
423 Err(DeviceSendFrameError::NoBuffers)
424 } else {
425 Ok(transmitted_packets.push((buf, dequeue_context.map(|c| *c))))
426 }
427 }
428 }
429
430 impl ResourceCounterContext<FakeLinkDeviceId, DeviceCounters> for FakeCoreCtxImpl {
431 fn per_resource_counters<'a>(
432 &'a self,
433 _resource: &'a FakeLinkDeviceId,
434 ) -> &'a DeviceCounters {
435 &self.state.per_device_counters
436 }
437 }
438
439 impl CounterContext<DeviceCounters> for FakeCoreCtxImpl {
440 fn counters(&self) -> &DeviceCounters {
441 &self.state.stack_wide_device_counters
442 }
443 }
444
445 impl TransmitDequeueContext<FakeLinkDevice, FakeBindingsCtxImpl> for FakeCoreCtxImpl {
446 type TransmitQueueCtx<'a> = Self;
447
448 fn with_dequed_packets_and_tx_queue_ctx<
449 O,
450 F: FnOnce(
451 &mut DequeueState<Self::Meta, Self::Buffer>,
452 &mut Self::TransmitQueueCtx<'_>,
453 ) -> O,
454 >(
455 &mut self,
456 &FakeLinkDeviceId: &FakeLinkDeviceId,
457 cb: F,
458 ) -> O {
459 cb(&mut DequeueState::default(), self)
460 }
461 }
462
463 impl DeviceSocketHandler<FakeLinkDevice, FakeBindingsCtxImpl> for FakeCoreCtxImpl {
464 fn handle_frame(
465 &mut self,
466 bindings_ctx: &mut FakeBindingsCtxImpl,
467 _device: &Self::DeviceId,
468 frame: Frame<&[u8]>,
469 _whole_frame: &[u8],
470 ) {
471 bindings_ctx.state.delivered_to_sockets.push(frame.cloned())
472 }
473 }
474
475 #[test]
476 fn noqueue() {
477 let mut ctx = CtxPair::with_core_ctx(FakeCoreCtxImpl::default());
478
479 let body = Buf::new(vec![0], ..);
480
481 let CtxPair { core_ctx, bindings_ctx } = &mut ctx;
482 assert_eq!(
483 TransmitQueueHandler::queue_tx_frame(
484 core_ctx,
485 bindings_ctx,
486 &FakeLinkDeviceId,
487 (),
488 body.clone(),
489 ),
490 Ok(())
491 );
492 let FakeTxQueueBindingsCtxState { woken_tx_tasks, delivered_to_sockets } =
493 &bindings_ctx.state;
494 assert_matches!(&woken_tx_tasks[..], &[]);
495 assert_eq!(
496 delivered_to_sockets,
497 &[Frame::Sent(fake_sent_ethernet_with_body(body.as_ref().into()))]
498 );
499 assert_eq!(core::mem::take(&mut core_ctx.state.transmitted_packets), [(body, None)]);
500
501 assert_eq!(
504 ctx.transmit_queue_api().transmit_queued_frames(
505 &FakeLinkDeviceId,
506 BatchSize::default(),
507 &mut DequeueContext,
508 ),
509 Ok(WorkQueueReport::AllDone),
510 );
511
512 let CtxPair { core_ctx, bindings_ctx } = &mut ctx;
513 assert_matches!(&bindings_ctx.state.woken_tx_tasks[..], &[]);
514 assert_eq!(core::mem::take(&mut core_ctx.state.transmitted_packets), []);
515 }
516
517 #[test_case(BatchSize::MAX)]
518 #[test_case(BatchSize::MAX/2)]
519 fn fifo_queue_and_dequeue(batch_size: usize) {
520 let mut ctx = CtxPair::with_core_ctx(FakeCoreCtxImpl::default());
521
522 ctx.transmit_queue_api()
523 .set_configuration(&FakeLinkDeviceId, TransmitQueueConfiguration::Fifo);
524
525 for _ in 0..2 {
526 let CtxPair { core_ctx, bindings_ctx } = &mut ctx;
527 for i in 0..MAX_TX_QUEUED_LEN {
528 let body = Buf::new(vec![i as u8], ..);
529 assert_eq!(
530 TransmitQueueHandler::queue_tx_frame(
531 core_ctx,
532 bindings_ctx,
533 &FakeLinkDeviceId,
534 (),
535 body
536 ),
537 Ok(())
538 );
539 assert_eq!(bindings_ctx.state.woken_tx_tasks, [FakeLinkDeviceId]);
542 }
543
544 let body = Buf::new(vec![131], ..);
545 assert_eq!(
546 TransmitQueueHandler::queue_tx_frame(
547 core_ctx,
548 bindings_ctx,
549 &FakeLinkDeviceId,
550 (),
551 body.clone(),
552 ),
553 Err(TransmitQueueFrameError::QueueFull(body))
554 );
555
556 let FakeTxQueueBindingsCtxState { woken_tx_tasks, delivered_to_sockets } =
557 &mut bindings_ctx.state;
558 assert_eq!(core::mem::take(woken_tx_tasks), [FakeLinkDeviceId]);
561 assert_eq!(core::mem::take(delivered_to_sockets), &[]);
563
564 assert!(MAX_TX_QUEUED_LEN > batch_size);
565 for i in (0..(MAX_TX_QUEUED_LEN - batch_size)).step_by(batch_size) {
566 assert_eq!(
567 ctx.transmit_queue_api().transmit_queued_frames(
568 &FakeLinkDeviceId,
569 BatchSize::new_saturating(batch_size),
570 &mut DequeueContext
571 ),
572 Ok(WorkQueueReport::Pending),
573 );
574 assert_eq!(
575 core::mem::take(&mut ctx.core_ctx.state.transmitted_packets),
576 (i..i + batch_size)
577 .map(|i| (Buf::new(vec![i as u8], ..), Some(DequeueContext)))
578 .collect::<Vec<_>>()
579 );
580 }
581
582 assert_eq!(
583 ctx.transmit_queue_api().transmit_queued_frames(
584 &FakeLinkDeviceId,
585 BatchSize::new_saturating(batch_size),
586 &mut DequeueContext
587 ),
588 Ok(WorkQueueReport::AllDone),
589 );
590
591 let CtxPair { core_ctx, bindings_ctx } = &mut ctx;
592 assert_eq!(
593 core::mem::take(&mut core_ctx.state.transmitted_packets),
594 (batch_size * (MAX_TX_QUEUED_LEN / batch_size - 1)..MAX_TX_QUEUED_LEN)
595 .map(|i| (Buf::new(vec![i as u8], ..), Some(DequeueContext)))
596 .collect::<Vec<_>>()
597 );
598 let FakeTxQueueBindingsCtxState { woken_tx_tasks, delivered_to_sockets } =
601 &mut bindings_ctx.state;
602 assert_matches!(&core::mem::take(woken_tx_tasks)[..], &[]);
603
604 assert_eq!(
607 core::mem::take(delivered_to_sockets),
608 (0..MAX_TX_QUEUED_LEN)
609 .map(|i| Frame::Sent(fake_sent_ethernet_with_body(vec![i as u8])))
610 .collect::<Vec<_>>()
611 );
612 }
613 }
614
615 #[test]
616 fn dequeue_error() {
617 let mut ctx = CtxPair::with_core_ctx(FakeCoreCtxImpl::default());
618
619 ctx.transmit_queue_api()
620 .set_configuration(&FakeLinkDeviceId, TransmitQueueConfiguration::Fifo);
621
622 let CtxPair { core_ctx, bindings_ctx } = &mut ctx;
623 let body = Buf::new(vec![0], ..);
624 assert_eq!(
625 TransmitQueueHandler::queue_tx_frame(
626 core_ctx,
627 bindings_ctx,
628 &FakeLinkDeviceId,
629 (),
630 body.clone(),
631 ),
632 Ok(())
633 );
634 assert_eq!(core::mem::take(&mut bindings_ctx.state.woken_tx_tasks), [FakeLinkDeviceId]);
635 assert_eq!(core_ctx.state.transmitted_packets, []);
636
637 core_ctx.state.no_buffers = true;
638 assert_eq!(
639 ctx.transmit_queue_api().transmit_queued_frames(
640 &FakeLinkDeviceId,
641 BatchSize::default(),
642 &mut DequeueContext
643 ),
644 Err(DeviceSendFrameError::NoBuffers),
645 );
646 let CtxPair { core_ctx, bindings_ctx } = &mut ctx;
647 assert_eq!(core_ctx.state.transmitted_packets, []);
648 let FakeTxQueueBindingsCtxState { woken_tx_tasks, delivered_to_sockets } =
649 &bindings_ctx.state;
650 assert_matches!(&woken_tx_tasks[..], &[]);
651 assert_eq!(
654 delivered_to_sockets,
655 &[Frame::Sent(fake_sent_ethernet_with_body(body.as_ref().into()))]
656 );
657
658 core_ctx.state.no_buffers = false;
659 assert_eq!(
660 ctx.transmit_queue_api().transmit_queued_frames(
661 &FakeLinkDeviceId,
662 BatchSize::default(),
663 &mut DequeueContext
664 ),
665 Ok(WorkQueueReport::AllDone),
666 );
667 let CtxPair { core_ctx, bindings_ctx } = &mut ctx;
668 assert_matches!(&bindings_ctx.state.woken_tx_tasks[..], &[]);
669 assert_eq!(core::mem::take(&mut core_ctx.state.transmitted_packets), []);
671 }
672
673 #[test_case(true; "device no buffers")]
674 #[test_case(false; "device has buffers")]
675 fn drain_before_noqueue(no_buffers: bool) {
676 let mut ctx = CtxPair::with_core_ctx(FakeCoreCtxImpl::default());
677
678 ctx.transmit_queue_api()
679 .set_configuration(&FakeLinkDeviceId, TransmitQueueConfiguration::Fifo);
680
681 let CtxPair { core_ctx, bindings_ctx } = &mut ctx;
682 let body = Buf::new(vec![0], ..);
683 assert_eq!(
684 TransmitQueueHandler::queue_tx_frame(
685 core_ctx,
686 bindings_ctx,
687 &FakeLinkDeviceId,
688 (),
689 body.clone(),
690 ),
691 Ok(())
692 );
693 assert_eq!(core::mem::take(&mut bindings_ctx.state.woken_tx_tasks), [FakeLinkDeviceId]);
694 assert_eq!(core_ctx.state.transmitted_packets, []);
695
696 core_ctx.state.no_buffers = no_buffers;
697 ctx.transmit_queue_api()
698 .set_configuration(&FakeLinkDeviceId, TransmitQueueConfiguration::None);
699
700 let CtxPair { core_ctx, bindings_ctx } = &mut ctx;
701 let FakeTxQueueBindingsCtxState { woken_tx_tasks, delivered_to_sockets } =
702 &bindings_ctx.state;
703 assert_matches!(&woken_tx_tasks[..], &[]);
704 assert_eq!(
705 delivered_to_sockets,
706 &[Frame::Sent(fake_sent_ethernet_with_body(body.as_ref().into()))]
707 );
708 if no_buffers {
709 assert_eq!(core_ctx.state.transmitted_packets, []);
710 } else {
711 assert_eq!(core::mem::take(&mut core_ctx.state.transmitted_packets), [(body, None)]);
712 }
713 }
714
715 #[test]
716 fn count() {
717 let mut ctx = CtxPair::with_core_ctx(FakeCoreCtxImpl::default());
718 assert_eq!(ctx.transmit_queue_api().count(&FakeLinkDeviceId), None);
719
720 ctx.transmit_queue_api()
721 .set_configuration(&FakeLinkDeviceId, TransmitQueueConfiguration::Fifo);
722
723 assert_eq!(ctx.transmit_queue_api().count(&FakeLinkDeviceId), Some(0));
724
725 let CtxPair { core_ctx, bindings_ctx } = &mut ctx;
726 let body = Buf::new(vec![0], ..);
727 assert_eq!(
728 TransmitQueueHandler::queue_tx_frame(
729 core_ctx,
730 bindings_ctx,
731 &FakeLinkDeviceId,
732 (),
733 body,
734 ),
735 Ok(())
736 );
737
738 assert_eq!(ctx.transmit_queue_api().count(&FakeLinkDeviceId), Some(1));
739 }
740}