1use lock_order::lock::{DelegatedOrderedLockAccess, LockLevelFor};
6use lock_order::relation::LockBefore;
7use net_types::ip::{Ip, Ipv4, Ipv6};
8use netstack3_base::socket::MaybeDualStack;
9use netstack3_base::{
10 CoreTimerContext, CounterContext, ResourceCounterContext, Uninstantiable,
11 UninstantiableWrapper, WeakDeviceIdentifier,
12};
13use netstack3_datagram as datagram;
14use netstack3_device::WeakDeviceId;
15use netstack3_tcp::{
16 self as tcp, IsnGenerator, TcpContext, TcpCountersWithSocket, TcpCountersWithoutSocket,
17 TcpDemuxContext, TcpDualStackContext, TcpSocketId, TcpSocketSet, TcpSocketState, TcpState,
18 TimestampOffsetGenerator, WeakTcpSocketId,
19};
20use netstack3_udp::{
21 self as udp, UdpCountersWithSocket, UdpCountersWithoutSocket, UdpSocketId, UdpSocketSet,
22 UdpSocketState,
23};
24
25use crate::context::WrapLockLevel;
26use crate::context::prelude::*;
27use crate::transport::TransportLayerTimerId;
28use crate::{BindingsContext, BindingsTypes, CoreCtx, StackState};
29
30impl<I, BC, L> CoreTimerContext<WeakTcpSocketId<I, WeakDeviceId<BC>, BC>, BC> for CoreCtx<'_, BC, L>
31where
32 I: tcp::DualStackIpExt,
33 BC: BindingsContext,
34{
35 fn convert_timer(dispatch_id: WeakTcpSocketId<I, WeakDeviceId<BC>, BC>) -> BC::DispatchId {
36 TransportLayerTimerId::Tcp(dispatch_id.into()).into()
37 }
38}
39
40#[netstack3_macros::instantiate_ip_impl_block(I)]
41impl<I, L, BC> TcpDemuxContext<I, WeakDeviceId<BC>, BC> for CoreCtx<'_, BC, L>
42where
43 I: Ip,
44 BC: BindingsContext,
45 L: LockBefore<crate::lock_ordering::TcpDemux<I>>,
46{
47 type IpTransportCtx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpDemux<I>>>;
48 fn with_demux<O, F: FnOnce(&tcp::DemuxState<I, WeakDeviceId<BC>, BC>) -> O>(
49 &mut self,
50 cb: F,
51 ) -> O {
52 cb(&self.read_lock::<crate::lock_ordering::TcpDemux<I>>())
53 }
54
55 fn with_demux_mut<O, F: FnOnce(&mut tcp::DemuxState<I, WeakDeviceId<BC>, BC>) -> O>(
56 &mut self,
57 cb: F,
58 ) -> O {
59 cb(&mut self.write_lock::<crate::lock_ordering::TcpDemux<I>>())
60 }
61}
62
63impl<L, BC> TcpContext<Ipv4, BC> for CoreCtx<'_, BC, L>
64where
65 BC: BindingsContext,
66 L: LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv4>>,
67{
68 type ThisStackIpTransportAndDemuxCtx<'a> =
69 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpSocketState<Ipv4>>>;
70 type SingleStackIpTransportAndDemuxCtx<'a> =
71 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpSocketState<Ipv4>>>;
72
73 type DualStackIpTransportAndDemuxCtx<'a> = UninstantiableWrapper<
74 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpSocketState<Ipv4>>>,
75 >;
76
77 type SingleStackConverter = ();
78 type DualStackConverter = Uninstantiable;
79
80 fn with_all_sockets_mut<O, F: FnOnce(&mut TcpSocketSet<Ipv4, Self::WeakDeviceId, BC>) -> O>(
81 &mut self,
82 cb: F,
83 ) -> O {
84 let mut all_sockets = self.write_lock::<crate::lock_ordering::TcpAllSocketsSet<Ipv4>>();
85 cb(&mut *all_sockets)
86 }
87
88 fn for_each_socket<
89 F: FnMut(
90 &TcpSocketId<Ipv4, Self::WeakDeviceId, BC>,
91 &TcpSocketState<Ipv4, Self::WeakDeviceId, BC>,
92 ),
93 >(
94 &mut self,
95 mut cb: F,
96 ) {
97 let (all_sockets, mut locked) =
98 self.read_lock_and::<crate::lock_ordering::TcpAllSocketsSet<Ipv4>>();
99 all_sockets.keys().for_each(|id| {
100 let mut locked = locked.adopt(id);
101 let guard = locked
102 .read_lock_with::<crate::lock_ordering::TcpSocketState<Ipv4>, _>(|c| c.right());
103 cb(id, &*guard);
104 });
105 }
106
107 fn with_socket_mut_generators_transport_demux<
108 O,
109 F: for<'a> FnOnce(
110 MaybeDualStack<
111 (&'a mut Self::DualStackIpTransportAndDemuxCtx<'a>, Self::DualStackConverter),
112 (&'a mut Self::SingleStackIpTransportAndDemuxCtx<'a>, Self::SingleStackConverter),
113 >,
114 &mut TcpSocketState<Ipv4, Self::WeakDeviceId, BC>,
115 &IsnGenerator<BC::Instant>,
116 &TimestampOffsetGenerator<BC::Instant>,
117 ) -> O,
118 >(
119 &mut self,
120 id: &TcpSocketId<Ipv4, Self::WeakDeviceId, BC>,
121 cb: F,
122 ) -> O {
123 let TcpState { isn_generator, timestamp_offset_generator, .. } = &self
124 .unlocked_access::<crate::lock_ordering::UnlockedState>()
125 .transport
126 .tcp_state::<Ipv4>();
127 let mut locked = self.adopt(id);
128 let (mut socket_state, mut restricted) = locked
129 .write_lock_with_and::<crate::lock_ordering::TcpSocketState<Ipv4>, _>(|c| c.right());
130 let mut restricted = restricted.cast_core_ctx();
131 let maybe_dual_stack = MaybeDualStack::NotDualStack((&mut restricted, ()));
132 cb(maybe_dual_stack, &mut socket_state, isn_generator, timestamp_offset_generator)
133 }
134
135 fn with_socket_and_converter<
136 O,
137 F: FnOnce(
138 &TcpSocketState<Ipv4, Self::WeakDeviceId, BC>,
139 MaybeDualStack<Self::DualStackConverter, Self::SingleStackConverter>,
140 ) -> O,
141 >(
142 &mut self,
143 id: &TcpSocketId<Ipv4, Self::WeakDeviceId, BC>,
144 cb: F,
145 ) -> O {
146 let mut locked = self.adopt(id);
148 let socket_state =
149 locked.read_lock_with::<crate::lock_ordering::TcpSocketState<Ipv4>, _>(|c| c.right());
150 cb(&socket_state, MaybeDualStack::NotDualStack(()))
151 }
152}
153
154impl<L, BC> TcpContext<Ipv6, BC> for CoreCtx<'_, BC, L>
155where
156 BC: BindingsContext,
157 L: LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv6>>,
158{
159 type ThisStackIpTransportAndDemuxCtx<'a> =
160 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpSocketState<Ipv6>>>;
161 type SingleStackIpTransportAndDemuxCtx<'a> = UninstantiableWrapper<
162 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpSocketState<Ipv6>>>,
163 >;
164
165 type DualStackIpTransportAndDemuxCtx<'a> =
166 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpSocketState<Ipv6>>>;
167
168 type SingleStackConverter = Uninstantiable;
169 type DualStackConverter = ();
170
171 fn with_all_sockets_mut<O, F: FnOnce(&mut TcpSocketSet<Ipv6, Self::WeakDeviceId, BC>) -> O>(
172 &mut self,
173 cb: F,
174 ) -> O {
175 let mut all_sockets = self.write_lock::<crate::lock_ordering::TcpAllSocketsSet<Ipv6>>();
176 cb(&mut *all_sockets)
177 }
178
179 fn for_each_socket<
180 F: FnMut(
181 &TcpSocketId<Ipv6, Self::WeakDeviceId, BC>,
182 &TcpSocketState<Ipv6, Self::WeakDeviceId, BC>,
183 ),
184 >(
185 &mut self,
186 mut cb: F,
187 ) {
188 let (all_sockets, mut locked) =
189 self.read_lock_and::<crate::lock_ordering::TcpAllSocketsSet<Ipv6>>();
190 all_sockets.keys().for_each(|id| {
191 let mut locked = locked.adopt(id);
192 let guard = locked
193 .read_lock_with::<crate::lock_ordering::TcpSocketState<Ipv6>, _>(|c| c.right());
194 cb(id, &*guard);
195 });
196 }
197
198 fn with_socket_mut_generators_transport_demux<
199 O,
200 F: for<'a> FnOnce(
201 MaybeDualStack<
202 (&'a mut Self::DualStackIpTransportAndDemuxCtx<'a>, Self::DualStackConverter),
203 (&'a mut Self::SingleStackIpTransportAndDemuxCtx<'a>, Self::SingleStackConverter),
204 >,
205 &mut TcpSocketState<Ipv6, Self::WeakDeviceId, BC>,
206 &IsnGenerator<BC::Instant>,
207 &TimestampOffsetGenerator<BC::Instant>,
208 ) -> O,
209 >(
210 &mut self,
211 id: &TcpSocketId<Ipv6, Self::WeakDeviceId, BC>,
212 cb: F,
213 ) -> O {
214 let TcpState { isn_generator, timestamp_offset_generator, .. } = &self
215 .unlocked_access::<crate::lock_ordering::UnlockedState>()
216 .transport
217 .tcp_state::<Ipv6>();
218 let mut locked = self.adopt(id);
219 let (mut socket_state, mut restricted) = locked
220 .write_lock_with_and::<crate::lock_ordering::TcpSocketState<Ipv6>, _>(|c| c.right());
221 let mut restricted = restricted.cast_core_ctx();
222 let maybe_dual_stack = MaybeDualStack::DualStack((&mut restricted, ()));
223 cb(maybe_dual_stack, &mut socket_state, isn_generator, timestamp_offset_generator)
224 }
225
226 fn with_socket_and_converter<
227 O,
228 F: FnOnce(
229 &TcpSocketState<Ipv6, Self::WeakDeviceId, BC>,
230 MaybeDualStack<Self::DualStackConverter, Self::SingleStackConverter>,
231 ) -> O,
232 >(
233 &mut self,
234 id: &TcpSocketId<Ipv6, Self::WeakDeviceId, BC>,
235 cb: F,
236 ) -> O {
237 let mut locked = self.adopt(id);
239 let socket_state =
240 locked.read_lock_with::<crate::lock_ordering::TcpSocketState<Ipv6>, _>(|c| c.right());
241 cb(&socket_state, MaybeDualStack::DualStack(()))
242 }
243}
244
245impl<L: LockBefore<crate::lock_ordering::TcpDemux<Ipv4>>, BC: BindingsContext>
246 TcpDualStackContext<Ipv6, WeakDeviceId<BC>, BC> for CoreCtx<'_, BC, L>
247{
248 type DualStackIpTransportCtx<'a> =
249 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpDemux<Ipv6>>>;
250 fn other_demux_id_converter(&self) -> impl tcp::DualStackDemuxIdConverter<Ipv6> {
251 tcp::Ipv6SocketIdToIpv4DemuxIdConverter
252 }
253
254 fn dual_stack_enabled(&self, ip_options: &tcp::Ipv6Options) -> bool {
255 ip_options.dual_stack_enabled
256 }
257
258 fn set_dual_stack_enabled(&self, ip_options: &mut tcp::Ipv6Options, value: bool) {
259 ip_options.dual_stack_enabled = value;
260 }
261
262 fn with_both_demux_mut<
263 O,
264 F: FnOnce(
265 &mut tcp::DemuxState<Ipv6, WeakDeviceId<BC>, BC>,
266 &mut tcp::DemuxState<Ipv4, WeakDeviceId<BC>, BC>,
267 ) -> O,
268 >(
269 &mut self,
270 cb: F,
271 ) -> O {
272 let (mut demux_v4, mut locked) =
273 self.write_lock_and::<crate::lock_ordering::TcpDemux<Ipv4>>();
274 let mut demux_v6 = locked.write_lock::<crate::lock_ordering::TcpDemux<Ipv6>>();
275 cb(&mut demux_v6, &mut demux_v4)
276 }
277}
278
279#[netstack3_macros::instantiate_ip_impl_block(I)]
280impl<I: Ip, BC: BindingsContext, L: LockBefore<crate::lock_ordering::UdpAllSocketsSet<I>>>
281 udp::StateContext<I, BC> for CoreCtx<'_, BC, L>
282{
283 type SocketStateCtx<'a> =
284 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::UdpSocketState<I>>>;
285
286 fn with_bound_state_context<O, F: FnOnce(&mut Self::SocketStateCtx<'_>) -> O>(
287 &mut self,
288 cb: F,
289 ) -> O {
290 cb(&mut self.cast_locked::<crate::lock_ordering::UdpSocketState<I>>())
291 }
292
293 fn with_all_sockets_mut<O, F: FnOnce(&mut UdpSocketSet<I, Self::WeakDeviceId, BC>) -> O>(
294 &mut self,
295 cb: F,
296 ) -> O {
297 cb(&mut self.write_lock::<crate::lock_ordering::UdpAllSocketsSet<I>>())
298 }
299
300 fn with_all_sockets<O, F: FnOnce(&UdpSocketSet<I, Self::WeakDeviceId, BC>) -> O>(
301 &mut self,
302 cb: F,
303 ) -> O {
304 cb(&self.read_lock::<crate::lock_ordering::UdpAllSocketsSet<I>>())
305 }
306
307 fn with_socket_state<
308 O,
309 F: FnOnce(&mut Self::SocketStateCtx<'_>, &UdpSocketState<I, Self::WeakDeviceId, BC>) -> O,
310 >(
311 &mut self,
312 id: &UdpSocketId<I, Self::WeakDeviceId, BC>,
313 cb: F,
314 ) -> O {
315 let mut locked = self.adopt(id);
316 let (socket_state, mut restricted) =
317 locked.read_lock_with_and::<crate::lock_ordering::UdpSocketState<I>, _>(|c| c.right());
318 let mut restricted = restricted.cast_core_ctx();
319 cb(&mut restricted, &socket_state)
320 }
321
322 fn with_socket_state_mut<
323 O,
324 F: FnOnce(&mut Self::SocketStateCtx<'_>, &mut UdpSocketState<I, Self::WeakDeviceId, BC>) -> O,
325 >(
326 &mut self,
327 id: &UdpSocketId<I, Self::WeakDeviceId, BC>,
328 cb: F,
329 ) -> O {
330 let mut locked = self.adopt(id);
331 let (mut socket_state, mut restricted) =
332 locked.write_lock_with_and::<crate::lock_ordering::UdpSocketState<I>, _>(|c| c.right());
333 let mut restricted = restricted.cast_core_ctx();
334 cb(&mut restricted, &mut socket_state)
335 }
336
337 fn for_each_socket<
338 F: FnMut(
339 &mut Self::SocketStateCtx<'_>,
340 &UdpSocketId<I, Self::WeakDeviceId, BC>,
341 &UdpSocketState<I, Self::WeakDeviceId, BC>,
342 ),
343 >(
344 &mut self,
345 mut cb: F,
346 ) {
347 let (all_sockets, mut locked) =
348 self.read_lock_and::<crate::lock_ordering::UdpAllSocketsSet<I>>();
349 all_sockets.keys().for_each(|id| {
350 let id = UdpSocketId::from(id.clone());
351 let mut locked = locked.adopt(&id);
352 let (socket_state, mut restricted) = locked
353 .read_lock_with_and::<crate::lock_ordering::UdpSocketState<I>, _>(|c| c.right());
354 let mut restricted = restricted.cast_core_ctx();
355 cb(&mut restricted, &id, &socket_state);
356 });
357 }
358}
359
360impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::UdpBoundMap<Ipv4>>>
361 udp::BoundStateContext<Ipv4, BC> for CoreCtx<'_, BC, L>
362{
363 type IpSocketsCtx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::UdpBoundMap<Ipv4>>>;
364 type DualStackContext = UninstantiableWrapper<Self>;
365 type NonDualStackContext = Self;
366
367 fn with_bound_sockets<
368 O,
369 F: FnOnce(&mut Self::IpSocketsCtx<'_>, &udp::BoundSockets<Ipv4, Self::WeakDeviceId, BC>) -> O,
370 >(
371 &mut self,
372 cb: F,
373 ) -> O {
374 let (bound_sockets, mut locked) =
375 self.read_lock_and::<crate::lock_ordering::UdpBoundMap<Ipv4>>();
376 cb(&mut locked, &bound_sockets)
377 }
378
379 fn with_bound_sockets_mut<
380 O,
381 F: FnOnce(
382 &mut Self::IpSocketsCtx<'_>,
383 &mut udp::BoundSockets<Ipv4, Self::WeakDeviceId, BC>,
384 ) -> O,
385 >(
386 &mut self,
387 cb: F,
388 ) -> O {
389 let (mut bound_sockets, mut locked) =
390 self.write_lock_and::<crate::lock_ordering::UdpBoundMap<Ipv4>>();
391 cb(&mut locked, &mut bound_sockets)
392 }
393
394 fn dual_stack_context(
395 &self,
396 ) -> MaybeDualStack<&Self::DualStackContext, &Self::NonDualStackContext> {
397 MaybeDualStack::NotDualStack(self)
398 }
399
400 fn dual_stack_context_mut(
401 &mut self,
402 ) -> MaybeDualStack<&mut Self::DualStackContext, &mut Self::NonDualStackContext> {
403 MaybeDualStack::NotDualStack(self)
404 }
405
406 fn with_transport_context<O, F: FnOnce(&mut Self::IpSocketsCtx<'_>) -> O>(
407 &mut self,
408 cb: F,
409 ) -> O {
410 cb(&mut self.cast_locked())
411 }
412}
413
414impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::UdpBoundMap<Ipv4>>>
415 udp::BoundStateContext<Ipv6, BC> for CoreCtx<'_, BC, L>
416{
417 type IpSocketsCtx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::UdpBoundMap<Ipv6>>>;
418 type DualStackContext = Self;
419 type NonDualStackContext = UninstantiableWrapper<Self>;
420
421 fn with_bound_sockets<
422 O,
423 F: FnOnce(&mut Self::IpSocketsCtx<'_>, &udp::BoundSockets<Ipv6, Self::WeakDeviceId, BC>) -> O,
424 >(
425 &mut self,
426 cb: F,
427 ) -> O {
428 let (bound_sockets, mut locked) =
429 self.read_lock_and::<crate::lock_ordering::UdpBoundMap<Ipv6>>();
430 cb(&mut locked, &bound_sockets)
431 }
432
433 fn with_bound_sockets_mut<
434 O,
435 F: FnOnce(
436 &mut Self::IpSocketsCtx<'_>,
437 &mut udp::BoundSockets<Ipv6, Self::WeakDeviceId, BC>,
438 ) -> O,
439 >(
440 &mut self,
441 cb: F,
442 ) -> O {
443 let (mut bound_sockets, mut locked) =
444 self.write_lock_and::<crate::lock_ordering::UdpBoundMap<Ipv6>>();
445 cb(&mut locked, &mut bound_sockets)
446 }
447
448 fn dual_stack_context(
449 &self,
450 ) -> MaybeDualStack<&Self::DualStackContext, &Self::NonDualStackContext> {
451 MaybeDualStack::DualStack(self)
452 }
453
454 fn dual_stack_context_mut(
455 &mut self,
456 ) -> MaybeDualStack<&mut Self::DualStackContext, &mut Self::NonDualStackContext> {
457 MaybeDualStack::DualStack(self)
458 }
459
460 fn with_transport_context<O, F: FnOnce(&mut Self::IpSocketsCtx<'_>) -> O>(
461 &mut self,
462 cb: F,
463 ) -> O {
464 cb(&mut self.cast_locked::<crate::lock_ordering::UdpBoundMap<Ipv6>>())
465 }
466}
467
468impl<L, BC: BindingsContext> udp::UdpStateContext for CoreCtx<'_, BC, L> {}
469
470impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::UdpBoundMap<Ipv4>>>
471 udp::DualStackBoundStateContext<Ipv6, BC> for CoreCtx<'_, BC, L>
472{
473 type IpSocketsCtx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::UdpBoundMap<Ipv6>>>;
474
475 fn with_both_bound_sockets_mut<
476 O,
477 F: FnOnce(
478 &mut Self::IpSocketsCtx<'_>,
479 &mut udp::BoundSockets<Ipv6, Self::WeakDeviceId, BC>,
480 &mut udp::BoundSockets<Ipv4, Self::WeakDeviceId, BC>,
481 ) -> O,
482 >(
483 &mut self,
484 cb: F,
485 ) -> O {
486 let (mut bound_v4, mut locked) =
487 self.write_lock_and::<crate::lock_ordering::UdpBoundMap<Ipv4>>();
488 let (mut bound_v6, mut locked) =
489 locked.write_lock_and::<crate::lock_ordering::UdpBoundMap<Ipv6>>();
490 cb(&mut locked, &mut bound_v6, &mut bound_v4)
491 }
492
493 fn with_other_bound_sockets_mut<
494 O,
495 F: FnOnce(
496 &mut Self::IpSocketsCtx<'_>,
497 &mut udp::BoundSockets<Ipv4, Self::WeakDeviceId, BC>,
498 ) -> O,
499 >(
500 &mut self,
501 cb: F,
502 ) -> O {
503 let (mut bound_v4, mut locked) =
504 self.write_lock_and::<crate::lock_ordering::UdpBoundMap<Ipv4>>();
505 cb(&mut locked.cast_locked::<crate::lock_ordering::UdpBoundMap<Ipv6>>(), &mut bound_v4)
506 }
507
508 fn with_transport_context<O, F: FnOnce(&mut Self::IpSocketsCtx<'_>) -> O>(
509 &mut self,
510 cb: F,
511 ) -> O {
512 cb(&mut self.cast_locked::<crate::lock_ordering::UdpBoundMap<Ipv6>>())
513 }
514}
515
516impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::UdpBoundMap<Ipv4>>>
517 udp::NonDualStackBoundStateContext<Ipv4, BC> for CoreCtx<'_, BC, L>
518{
519}
520
521impl<I: tcp::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
522 for crate::lock_ordering::TcpAllSocketsSet<I>
523{
524 type Data = TcpSocketSet<I, WeakDeviceId<BT>, BT>;
525}
526
527impl<I: tcp::DualStackIpExt, BT: BindingsTypes>
528 DelegatedOrderedLockAccess<TcpSocketSet<I, WeakDeviceId<BT>, BT>> for StackState<BT>
529{
530 type Inner = tcp::Sockets<I, WeakDeviceId<BT>, BT>;
531 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
532 &self.transport.tcp_state::<I>().sockets
533 }
534}
535
536impl<I: tcp::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
537 for crate::lock_ordering::TcpDemux<I>
538{
539 type Data = tcp::DemuxState<I, WeakDeviceId<BT>, BT>;
540}
541
542impl<I: tcp::DualStackIpExt, BT: BindingsTypes>
543 DelegatedOrderedLockAccess<tcp::DemuxState<I, WeakDeviceId<BT>, BT>> for StackState<BT>
544{
545 type Inner = tcp::Sockets<I, WeakDeviceId<BT>, BT>;
546 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
547 &self.transport.tcp_state::<I>().sockets
548 }
549}
550
551impl<I: crate::transport::tcp::DualStackIpExt, D: WeakDeviceIdentifier, BT: BindingsTypes>
552 LockLevelFor<TcpSocketId<I, D, BT>> for crate::lock_ordering::TcpSocketState<I>
553{
554 type Data = TcpSocketState<I, D, BT>;
555}
556
557impl<I: datagram::IpExt, D: WeakDeviceIdentifier, BT: BindingsTypes>
558 LockLevelFor<UdpSocketId<I, D, BT>> for crate::lock_ordering::UdpSocketState<I>
559{
560 type Data = UdpSocketState<I, D, BT>;
561}
562
563impl<I: datagram::IpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
564 for crate::lock_ordering::UdpBoundMap<I>
565{
566 type Data = udp::BoundSockets<I, WeakDeviceId<BT>, BT>;
567}
568
569impl<I: datagram::IpExt, BT: BindingsTypes>
570 DelegatedOrderedLockAccess<udp::BoundSockets<I, WeakDeviceId<BT>, BT>> for StackState<BT>
571{
572 type Inner = udp::Sockets<I, WeakDeviceId<BT>, BT>;
573 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
574 &self.transport.udp_state::<I>().sockets
575 }
576}
577
578impl<I: datagram::IpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
579 for crate::lock_ordering::UdpAllSocketsSet<I>
580{
581 type Data = UdpSocketSet<I, WeakDeviceId<BT>, BT>;
582}
583
584impl<I: datagram::IpExt, BT: BindingsTypes>
585 DelegatedOrderedLockAccess<UdpSocketSet<I, WeakDeviceId<BT>, BT>> for StackState<BT>
586{
587 type Inner = udp::Sockets<I, WeakDeviceId<BT>, BT>;
588 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
589 &self.transport.udp_state::<I>().sockets
590 }
591}
592
593impl<BC: BindingsContext, I: Ip, L> CounterContext<TcpCountersWithSocket<I>>
594 for CoreCtx<'_, BC, L>
595{
596 fn counters(&self) -> &TcpCountersWithSocket<I> {
597 self.unlocked_access::<crate::lock_ordering::UnlockedState>()
598 .transport
599 .tcp_counters_with_socket::<I>()
600 }
601}
602
603impl<BC: BindingsContext, I: Ip, L> CounterContext<TcpCountersWithoutSocket<I>>
604 for CoreCtx<'_, BC, L>
605{
606 fn counters(&self) -> &TcpCountersWithoutSocket<I> {
607 self.unlocked_access::<crate::lock_ordering::UnlockedState>()
608 .transport
609 .tcp_counters_without_socket::<I>()
610 }
611}
612
613impl<BC: BindingsContext, I: netstack3_tcp::DualStackIpExt, L>
614 ResourceCounterContext<TcpSocketId<I, WeakDeviceId<BC>, BC>, TcpCountersWithSocket<I>>
615 for CoreCtx<'_, BC, L>
616{
617 fn per_resource_counters<'a>(
618 &'a self,
619 resource: &'a TcpSocketId<I, WeakDeviceId<BC>, BC>,
620 ) -> &'a TcpCountersWithSocket<I> {
621 resource.counters()
622 }
623}
624
625impl<BC: BindingsContext, I: Ip, L> CounterContext<UdpCountersWithSocket<I>>
626 for CoreCtx<'_, BC, L>
627{
628 fn counters(&self) -> &UdpCountersWithSocket<I> {
629 self.unlocked_access::<crate::lock_ordering::UnlockedState>()
630 .transport
631 .udp_counters_with_socket::<I>()
632 }
633}
634
635impl<BC: BindingsContext, I: Ip, L> CounterContext<UdpCountersWithoutSocket<I>>
636 for CoreCtx<'_, BC, L>
637{
638 fn counters(&self) -> &UdpCountersWithoutSocket<I> {
639 self.unlocked_access::<crate::lock_ordering::UnlockedState>()
640 .transport
641 .udp_counters_without_socket::<I>()
642 }
643}
644
645impl<BC: BindingsContext, I: netstack3_datagram::IpExt, L>
646 ResourceCounterContext<UdpSocketId<I, WeakDeviceId<BC>, BC>, UdpCountersWithSocket<I>>
647 for CoreCtx<'_, BC, L>
648{
649 fn per_resource_counters<'a>(
650 &'a self,
651 resource: &'a UdpSocketId<I, WeakDeviceId<BC>, BC>,
652 ) -> &'a UdpCountersWithSocket<I> {
653 resource.counters()
654 }
655}