1use lock_order::lock::{DelegatedOrderedLockAccess, LockLevelFor};
6use lock_order::relation::LockBefore;
7use net_types::ip::{Ip, IpVersion, Ipv4, Ipv6};
8use netstack3_base::socket::MaybeDualStack;
9use netstack3_base::{
10 CoreTimerContext, CounterContext, ResourceCounterContext, Uninstantiable,
11 UninstantiableWrapper, WeakDeviceIdentifier,
12};
13use netstack3_datagram as datagram;
14use netstack3_device::WeakDeviceId;
15use netstack3_tcp::{
16 self as tcp, IsnGenerator, TcpContext, TcpCountersWithSocket, TcpCountersWithoutSocket,
17 TcpDemuxContext, TcpDualStackContext, TcpSocketId, TcpSocketSet, TcpSocketState,
18 WeakTcpSocketId,
19};
20use netstack3_udp::{self as udp, UdpCounters, UdpSocketId, UdpSocketSet, UdpSocketState};
21
22use crate::context::prelude::*;
23use crate::context::WrapLockLevel;
24use crate::transport::TransportLayerTimerId;
25use crate::{BindingsContext, BindingsTypes, CoreCtx, StackState};
26
27impl<I, BC, L> CoreTimerContext<WeakTcpSocketId<I, WeakDeviceId<BC>, BC>, BC> for CoreCtx<'_, BC, L>
28where
29 I: tcp::DualStackIpExt,
30 BC: BindingsContext,
31{
32 fn convert_timer(dispatch_id: WeakTcpSocketId<I, WeakDeviceId<BC>, BC>) -> BC::DispatchId {
33 TransportLayerTimerId::Tcp(dispatch_id.into()).into()
34 }
35}
36
37#[netstack3_macros::instantiate_ip_impl_block(I)]
38impl<I, L, BC> TcpDemuxContext<I, WeakDeviceId<BC>, BC> for CoreCtx<'_, BC, L>
39where
40 I: Ip,
41 BC: BindingsContext,
42 L: LockBefore<crate::lock_ordering::TcpDemux<I>>,
43{
44 type IpTransportCtx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpDemux<I>>>;
45 fn with_demux<O, F: FnOnce(&tcp::DemuxState<I, WeakDeviceId<BC>, BC>) -> O>(
46 &mut self,
47 cb: F,
48 ) -> O {
49 cb(&self.read_lock::<crate::lock_ordering::TcpDemux<I>>())
50 }
51
52 fn with_demux_mut<O, F: FnOnce(&mut tcp::DemuxState<I, WeakDeviceId<BC>, BC>) -> O>(
53 &mut self,
54 cb: F,
55 ) -> O {
56 cb(&mut self.write_lock::<crate::lock_ordering::TcpDemux<I>>())
57 }
58}
59
60impl<L, BC> TcpContext<Ipv4, BC> for CoreCtx<'_, BC, L>
61where
62 BC: BindingsContext,
63 L: LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv4>>,
64{
65 type ThisStackIpTransportAndDemuxCtx<'a> =
66 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpSocketState<Ipv4>>>;
67 type SingleStackIpTransportAndDemuxCtx<'a> =
68 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpSocketState<Ipv4>>>;
69
70 type DualStackIpTransportAndDemuxCtx<'a> = UninstantiableWrapper<
71 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpSocketState<Ipv4>>>,
72 >;
73
74 type SingleStackConverter = ();
75 type DualStackConverter = Uninstantiable;
76
77 fn with_all_sockets_mut<O, F: FnOnce(&mut TcpSocketSet<Ipv4, Self::WeakDeviceId, BC>) -> O>(
78 &mut self,
79 cb: F,
80 ) -> O {
81 let mut all_sockets = self.write_lock::<crate::lock_ordering::TcpAllSocketsSet<Ipv4>>();
82 cb(&mut *all_sockets)
83 }
84
85 fn for_each_socket<
86 F: FnMut(
87 &TcpSocketId<Ipv4, Self::WeakDeviceId, BC>,
88 &TcpSocketState<Ipv4, Self::WeakDeviceId, BC>,
89 ),
90 >(
91 &mut self,
92 mut cb: F,
93 ) {
94 let (all_sockets, mut locked) =
95 self.read_lock_and::<crate::lock_ordering::TcpAllSocketsSet<Ipv4>>();
96 all_sockets.keys().for_each(|id| {
97 let mut locked = locked.adopt(id);
98 let guard = locked
99 .read_lock_with::<crate::lock_ordering::TcpSocketState<Ipv4>, _>(|c| c.right());
100 cb(id, &*guard);
101 });
102 }
103
104 fn with_socket_mut_isn_transport_demux<
105 O,
106 F: for<'a> FnOnce(
107 MaybeDualStack<
108 (&'a mut Self::DualStackIpTransportAndDemuxCtx<'a>, Self::DualStackConverter),
109 (&'a mut Self::SingleStackIpTransportAndDemuxCtx<'a>, Self::SingleStackConverter),
110 >,
111 &mut TcpSocketState<Ipv4, Self::WeakDeviceId, BC>,
112 &IsnGenerator<BC::Instant>,
113 ) -> O,
114 >(
115 &mut self,
116 id: &TcpSocketId<Ipv4, Self::WeakDeviceId, BC>,
117 cb: F,
118 ) -> O {
119 let isn = &self
120 .unlocked_access::<crate::lock_ordering::UnlockedState>()
121 .transport
122 .tcp_state::<Ipv4>()
123 .isn_generator;
124 let mut locked = self.adopt(id);
125 let (mut socket_state, mut restricted) = locked
126 .write_lock_with_and::<crate::lock_ordering::TcpSocketState<Ipv4>, _>(|c| c.right());
127 let mut restricted = restricted.cast_core_ctx();
128 let maybe_dual_stack = MaybeDualStack::NotDualStack((&mut restricted, ()));
129 cb(maybe_dual_stack, &mut socket_state, isn)
130 }
131
132 fn with_socket_and_converter<
133 O,
134 F: FnOnce(
135 &TcpSocketState<Ipv4, Self::WeakDeviceId, BC>,
136 MaybeDualStack<Self::DualStackConverter, Self::SingleStackConverter>,
137 ) -> O,
138 >(
139 &mut self,
140 id: &TcpSocketId<Ipv4, Self::WeakDeviceId, BC>,
141 cb: F,
142 ) -> O {
143 let mut locked = self.adopt(id);
145 let socket_state =
146 locked.read_lock_with::<crate::lock_ordering::TcpSocketState<Ipv4>, _>(|c| c.right());
147 cb(&socket_state, MaybeDualStack::NotDualStack(()))
148 }
149}
150
151impl<L, BC> TcpContext<Ipv6, BC> for CoreCtx<'_, BC, L>
152where
153 BC: BindingsContext,
154 L: LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv6>>,
155{
156 type ThisStackIpTransportAndDemuxCtx<'a> =
157 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpSocketState<Ipv6>>>;
158 type SingleStackIpTransportAndDemuxCtx<'a> = UninstantiableWrapper<
159 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpSocketState<Ipv6>>>,
160 >;
161
162 type DualStackIpTransportAndDemuxCtx<'a> =
163 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpSocketState<Ipv6>>>;
164
165 type SingleStackConverter = Uninstantiable;
166 type DualStackConverter = ();
167
168 fn with_all_sockets_mut<O, F: FnOnce(&mut TcpSocketSet<Ipv6, Self::WeakDeviceId, BC>) -> O>(
169 &mut self,
170 cb: F,
171 ) -> O {
172 let mut all_sockets = self.write_lock::<crate::lock_ordering::TcpAllSocketsSet<Ipv6>>();
173 cb(&mut *all_sockets)
174 }
175
176 fn for_each_socket<
177 F: FnMut(
178 &TcpSocketId<Ipv6, Self::WeakDeviceId, BC>,
179 &TcpSocketState<Ipv6, Self::WeakDeviceId, BC>,
180 ),
181 >(
182 &mut self,
183 mut cb: F,
184 ) {
185 let (all_sockets, mut locked) =
186 self.read_lock_and::<crate::lock_ordering::TcpAllSocketsSet<Ipv6>>();
187 all_sockets.keys().for_each(|id| {
188 let mut locked = locked.adopt(id);
189 let guard = locked
190 .read_lock_with::<crate::lock_ordering::TcpSocketState<Ipv6>, _>(|c| c.right());
191 cb(id, &*guard);
192 });
193 }
194
195 fn with_socket_mut_isn_transport_demux<
196 O,
197 F: for<'a> FnOnce(
198 MaybeDualStack<
199 (&'a mut Self::DualStackIpTransportAndDemuxCtx<'a>, Self::DualStackConverter),
200 (&'a mut Self::SingleStackIpTransportAndDemuxCtx<'a>, Self::SingleStackConverter),
201 >,
202 &mut TcpSocketState<Ipv6, Self::WeakDeviceId, BC>,
203 &IsnGenerator<BC::Instant>,
204 ) -> O,
205 >(
206 &mut self,
207 id: &TcpSocketId<Ipv6, Self::WeakDeviceId, BC>,
208 cb: F,
209 ) -> O {
210 let isn = &self
211 .unlocked_access::<crate::lock_ordering::UnlockedState>()
212 .transport
213 .tcp_state::<Ipv6>()
214 .isn_generator;
215 let mut locked = self.adopt(id);
216 let (mut socket_state, mut restricted) = locked
217 .write_lock_with_and::<crate::lock_ordering::TcpSocketState<Ipv6>, _>(|c| c.right());
218 let mut restricted = restricted.cast_core_ctx();
219 let maybe_dual_stack = MaybeDualStack::DualStack((&mut restricted, ()));
220 cb(maybe_dual_stack, &mut socket_state, isn)
221 }
222
223 fn with_socket_and_converter<
224 O,
225 F: FnOnce(
226 &TcpSocketState<Ipv6, Self::WeakDeviceId, BC>,
227 MaybeDualStack<Self::DualStackConverter, Self::SingleStackConverter>,
228 ) -> O,
229 >(
230 &mut self,
231 id: &TcpSocketId<Ipv6, Self::WeakDeviceId, BC>,
232 cb: F,
233 ) -> O {
234 let mut locked = self.adopt(id);
236 let socket_state =
237 locked.read_lock_with::<crate::lock_ordering::TcpSocketState<Ipv6>, _>(|c| c.right());
238 cb(&socket_state, MaybeDualStack::DualStack(()))
239 }
240}
241
242impl<L: LockBefore<crate::lock_ordering::TcpDemux<Ipv4>>, BC: BindingsContext>
243 TcpDualStackContext<Ipv6, WeakDeviceId<BC>, BC> for CoreCtx<'_, BC, L>
244{
245 type DualStackIpTransportCtx<'a> =
246 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::TcpDemux<Ipv6>>>;
247 fn other_demux_id_converter(&self) -> impl tcp::DualStackDemuxIdConverter<Ipv6> {
248 tcp::Ipv6SocketIdToIpv4DemuxIdConverter
249 }
250
251 fn dual_stack_enabled(&self, ip_options: &tcp::Ipv6Options) -> bool {
252 ip_options.dual_stack_enabled
253 }
254
255 fn set_dual_stack_enabled(&self, ip_options: &mut tcp::Ipv6Options, value: bool) {
256 ip_options.dual_stack_enabled = value;
257 }
258
259 fn with_both_demux_mut<
260 O,
261 F: FnOnce(
262 &mut tcp::DemuxState<Ipv6, WeakDeviceId<BC>, BC>,
263 &mut tcp::DemuxState<Ipv4, WeakDeviceId<BC>, BC>,
264 ) -> O,
265 >(
266 &mut self,
267 cb: F,
268 ) -> O {
269 let (mut demux_v4, mut locked) =
270 self.write_lock_and::<crate::lock_ordering::TcpDemux<Ipv4>>();
271 let mut demux_v6 = locked.write_lock::<crate::lock_ordering::TcpDemux<Ipv6>>();
272 cb(&mut demux_v6, &mut demux_v4)
273 }
274}
275
276#[netstack3_macros::instantiate_ip_impl_block(I)]
277impl<I: Ip, BC: BindingsContext, L: LockBefore<crate::lock_ordering::UdpAllSocketsSet<I>>>
278 udp::StateContext<I, BC> for CoreCtx<'_, BC, L>
279{
280 type SocketStateCtx<'a> =
281 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::UdpSocketState<I>>>;
282
283 fn with_bound_state_context<O, F: FnOnce(&mut Self::SocketStateCtx<'_>) -> O>(
284 &mut self,
285 cb: F,
286 ) -> O {
287 cb(&mut self.cast_locked::<crate::lock_ordering::UdpSocketState<I>>())
288 }
289
290 fn with_all_sockets_mut<O, F: FnOnce(&mut UdpSocketSet<I, Self::WeakDeviceId, BC>) -> O>(
291 &mut self,
292 cb: F,
293 ) -> O {
294 cb(&mut self.write_lock::<crate::lock_ordering::UdpAllSocketsSet<I>>())
295 }
296
297 fn with_all_sockets<O, F: FnOnce(&UdpSocketSet<I, Self::WeakDeviceId, BC>) -> O>(
298 &mut self,
299 cb: F,
300 ) -> O {
301 cb(&self.read_lock::<crate::lock_ordering::UdpAllSocketsSet<I>>())
302 }
303
304 fn with_socket_state<
305 O,
306 F: FnOnce(&mut Self::SocketStateCtx<'_>, &UdpSocketState<I, Self::WeakDeviceId, BC>) -> O,
307 >(
308 &mut self,
309 id: &UdpSocketId<I, Self::WeakDeviceId, BC>,
310 cb: F,
311 ) -> O {
312 let mut locked = self.adopt(id);
313 let (socket_state, mut restricted) =
314 locked.read_lock_with_and::<crate::lock_ordering::UdpSocketState<I>, _>(|c| c.right());
315 let mut restricted = restricted.cast_core_ctx();
316 cb(&mut restricted, &socket_state)
317 }
318
319 fn with_socket_state_mut<
320 O,
321 F: FnOnce(&mut Self::SocketStateCtx<'_>, &mut UdpSocketState<I, Self::WeakDeviceId, BC>) -> O,
322 >(
323 &mut self,
324 id: &UdpSocketId<I, Self::WeakDeviceId, BC>,
325 cb: F,
326 ) -> O {
327 let mut locked = self.adopt(id);
328 let (mut socket_state, mut restricted) =
329 locked.write_lock_with_and::<crate::lock_ordering::UdpSocketState<I>, _>(|c| c.right());
330 let mut restricted = restricted.cast_core_ctx();
331 cb(&mut restricted, &mut socket_state)
332 }
333
334 fn should_send_port_unreachable(&mut self) -> bool {
335 self.cast_with(|s| match I::VERSION {
336 IpVersion::V4 => &s.transport.udpv4.send_port_unreachable,
337 IpVersion::V6 => &s.transport.udpv6.send_port_unreachable,
338 })
339 .copied()
340 }
341
342 fn for_each_socket<
343 F: FnMut(
344 &mut Self::SocketStateCtx<'_>,
345 &UdpSocketId<I, Self::WeakDeviceId, BC>,
346 &UdpSocketState<I, Self::WeakDeviceId, BC>,
347 ),
348 >(
349 &mut self,
350 mut cb: F,
351 ) {
352 let (all_sockets, mut locked) =
353 self.read_lock_and::<crate::lock_ordering::UdpAllSocketsSet<I>>();
354 all_sockets.keys().for_each(|id| {
355 let id = UdpSocketId::from(id.clone());
356 let mut locked = locked.adopt(&id);
357 let (socket_state, mut restricted) = locked
358 .read_lock_with_and::<crate::lock_ordering::UdpSocketState<I>, _>(|c| c.right());
359 let mut restricted = restricted.cast_core_ctx();
360 cb(&mut restricted, &id, &socket_state);
361 });
362 }
363}
364
365impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::UdpBoundMap<Ipv4>>>
366 udp::BoundStateContext<Ipv4, BC> for CoreCtx<'_, BC, L>
367{
368 type IpSocketsCtx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::UdpBoundMap<Ipv4>>>;
369 type DualStackContext = UninstantiableWrapper<Self>;
370 type NonDualStackContext = Self;
371
372 fn with_bound_sockets<
373 O,
374 F: FnOnce(&mut Self::IpSocketsCtx<'_>, &udp::BoundSockets<Ipv4, Self::WeakDeviceId, BC>) -> O,
375 >(
376 &mut self,
377 cb: F,
378 ) -> O {
379 let (bound_sockets, mut locked) =
380 self.read_lock_and::<crate::lock_ordering::UdpBoundMap<Ipv4>>();
381 cb(&mut locked, &bound_sockets)
382 }
383
384 fn with_bound_sockets_mut<
385 O,
386 F: FnOnce(
387 &mut Self::IpSocketsCtx<'_>,
388 &mut udp::BoundSockets<Ipv4, Self::WeakDeviceId, BC>,
389 ) -> O,
390 >(
391 &mut self,
392 cb: F,
393 ) -> O {
394 let (mut bound_sockets, mut locked) =
395 self.write_lock_and::<crate::lock_ordering::UdpBoundMap<Ipv4>>();
396 cb(&mut locked, &mut bound_sockets)
397 }
398
399 fn dual_stack_context(
400 &mut self,
401 ) -> MaybeDualStack<&mut Self::DualStackContext, &mut Self::NonDualStackContext> {
402 MaybeDualStack::NotDualStack(self)
403 }
404
405 fn with_transport_context<O, F: FnOnce(&mut Self::IpSocketsCtx<'_>) -> O>(
406 &mut self,
407 cb: F,
408 ) -> O {
409 cb(&mut self.cast_locked())
410 }
411}
412
413impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::UdpBoundMap<Ipv4>>>
414 udp::BoundStateContext<Ipv6, BC> for CoreCtx<'_, BC, L>
415{
416 type IpSocketsCtx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::UdpBoundMap<Ipv6>>>;
417 type DualStackContext = Self;
418 type NonDualStackContext = UninstantiableWrapper<Self>;
419
420 fn with_bound_sockets<
421 O,
422 F: FnOnce(&mut Self::IpSocketsCtx<'_>, &udp::BoundSockets<Ipv6, Self::WeakDeviceId, BC>) -> O,
423 >(
424 &mut self,
425 cb: F,
426 ) -> O {
427 let (bound_sockets, mut locked) =
428 self.read_lock_and::<crate::lock_ordering::UdpBoundMap<Ipv6>>();
429 cb(&mut locked, &bound_sockets)
430 }
431
432 fn with_bound_sockets_mut<
433 O,
434 F: FnOnce(
435 &mut Self::IpSocketsCtx<'_>,
436 &mut udp::BoundSockets<Ipv6, Self::WeakDeviceId, BC>,
437 ) -> O,
438 >(
439 &mut self,
440 cb: F,
441 ) -> O {
442 let (mut bound_sockets, mut locked) =
443 self.write_lock_and::<crate::lock_ordering::UdpBoundMap<Ipv6>>();
444 cb(&mut locked, &mut bound_sockets)
445 }
446
447 fn dual_stack_context(
448 &mut self,
449 ) -> MaybeDualStack<&mut Self::DualStackContext, &mut Self::NonDualStackContext> {
450 MaybeDualStack::DualStack(self)
451 }
452
453 fn with_transport_context<O, F: FnOnce(&mut Self::IpSocketsCtx<'_>) -> O>(
454 &mut self,
455 cb: F,
456 ) -> O {
457 cb(&mut self.cast_locked::<crate::lock_ordering::UdpBoundMap<Ipv6>>())
458 }
459}
460
461impl<L, BC: BindingsContext> udp::UdpStateContext for CoreCtx<'_, BC, L> {}
462
463impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::UdpBoundMap<Ipv4>>>
464 udp::DualStackBoundStateContext<Ipv6, BC> for CoreCtx<'_, BC, L>
465{
466 type IpSocketsCtx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::UdpBoundMap<Ipv6>>>;
467
468 fn with_both_bound_sockets_mut<
469 O,
470 F: FnOnce(
471 &mut Self::IpSocketsCtx<'_>,
472 &mut udp::BoundSockets<Ipv6, Self::WeakDeviceId, BC>,
473 &mut udp::BoundSockets<Ipv4, Self::WeakDeviceId, BC>,
474 ) -> O,
475 >(
476 &mut self,
477 cb: F,
478 ) -> O {
479 let (mut bound_v4, mut locked) =
480 self.write_lock_and::<crate::lock_ordering::UdpBoundMap<Ipv4>>();
481 let (mut bound_v6, mut locked) =
482 locked.write_lock_and::<crate::lock_ordering::UdpBoundMap<Ipv6>>();
483 cb(&mut locked, &mut bound_v6, &mut bound_v4)
484 }
485
486 fn with_other_bound_sockets_mut<
487 O,
488 F: FnOnce(
489 &mut Self::IpSocketsCtx<'_>,
490 &mut udp::BoundSockets<Ipv4, Self::WeakDeviceId, BC>,
491 ) -> O,
492 >(
493 &mut self,
494 cb: F,
495 ) -> O {
496 let (mut bound_v4, mut locked) =
497 self.write_lock_and::<crate::lock_ordering::UdpBoundMap<Ipv4>>();
498 cb(&mut locked.cast_locked::<crate::lock_ordering::UdpBoundMap<Ipv6>>(), &mut bound_v4)
499 }
500
501 fn with_transport_context<O, F: FnOnce(&mut Self::IpSocketsCtx<'_>) -> O>(
502 &mut self,
503 cb: F,
504 ) -> O {
505 cb(&mut self.cast_locked::<crate::lock_ordering::UdpBoundMap<Ipv6>>())
506 }
507}
508
509impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::UdpBoundMap<Ipv4>>>
510 udp::NonDualStackBoundStateContext<Ipv4, BC> for CoreCtx<'_, BC, L>
511{
512}
513
514impl<I: tcp::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
515 for crate::lock_ordering::TcpAllSocketsSet<I>
516{
517 type Data = TcpSocketSet<I, WeakDeviceId<BT>, BT>;
518}
519
520impl<I: tcp::DualStackIpExt, BT: BindingsTypes>
521 DelegatedOrderedLockAccess<TcpSocketSet<I, WeakDeviceId<BT>, BT>> for StackState<BT>
522{
523 type Inner = tcp::Sockets<I, WeakDeviceId<BT>, BT>;
524 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
525 &self.transport.tcp_state::<I>().sockets
526 }
527}
528
529impl<I: tcp::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
530 for crate::lock_ordering::TcpDemux<I>
531{
532 type Data = tcp::DemuxState<I, WeakDeviceId<BT>, BT>;
533}
534
535impl<I: tcp::DualStackIpExt, BT: BindingsTypes>
536 DelegatedOrderedLockAccess<tcp::DemuxState<I, WeakDeviceId<BT>, BT>> for StackState<BT>
537{
538 type Inner = tcp::Sockets<I, WeakDeviceId<BT>, BT>;
539 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
540 &self.transport.tcp_state::<I>().sockets
541 }
542}
543
544impl<I: crate::transport::tcp::DualStackIpExt, D: WeakDeviceIdentifier, BT: BindingsTypes>
545 LockLevelFor<TcpSocketId<I, D, BT>> for crate::lock_ordering::TcpSocketState<I>
546{
547 type Data = TcpSocketState<I, D, BT>;
548}
549
550impl<I: datagram::IpExt, D: WeakDeviceIdentifier, BT: BindingsTypes>
551 LockLevelFor<UdpSocketId<I, D, BT>> for crate::lock_ordering::UdpSocketState<I>
552{
553 type Data = UdpSocketState<I, D, BT>;
554}
555
556impl<I: datagram::IpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
557 for crate::lock_ordering::UdpBoundMap<I>
558{
559 type Data = udp::BoundSockets<I, WeakDeviceId<BT>, BT>;
560}
561
562impl<I: datagram::IpExt, BT: BindingsTypes>
563 DelegatedOrderedLockAccess<udp::BoundSockets<I, WeakDeviceId<BT>, BT>> for StackState<BT>
564{
565 type Inner = udp::Sockets<I, WeakDeviceId<BT>, BT>;
566 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
567 &self.transport.udp_state::<I>().sockets
568 }
569}
570
571impl<I: datagram::IpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
572 for crate::lock_ordering::UdpAllSocketsSet<I>
573{
574 type Data = UdpSocketSet<I, WeakDeviceId<BT>, BT>;
575}
576
577impl<I: datagram::IpExt, BT: BindingsTypes>
578 DelegatedOrderedLockAccess<UdpSocketSet<I, WeakDeviceId<BT>, BT>> for StackState<BT>
579{
580 type Inner = udp::Sockets<I, WeakDeviceId<BT>, BT>;
581 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
582 &self.transport.udp_state::<I>().sockets
583 }
584}
585
586impl<BC: BindingsContext, I: Ip, L> CounterContext<TcpCountersWithSocket<I>>
587 for CoreCtx<'_, BC, L>
588{
589 fn counters(&self) -> &TcpCountersWithSocket<I> {
590 self.unlocked_access::<crate::lock_ordering::UnlockedState>()
591 .transport
592 .tcp_counters_with_socket::<I>()
593 }
594}
595
596impl<BC: BindingsContext, I: Ip, L> CounterContext<TcpCountersWithoutSocket<I>>
597 for CoreCtx<'_, BC, L>
598{
599 fn counters(&self) -> &TcpCountersWithoutSocket<I> {
600 self.unlocked_access::<crate::lock_ordering::UnlockedState>()
601 .transport
602 .tcp_counters_without_socket::<I>()
603 }
604}
605
606impl<BC: BindingsContext, I: netstack3_tcp::DualStackIpExt, L>
607 ResourceCounterContext<TcpSocketId<I, WeakDeviceId<BC>, BC>, TcpCountersWithSocket<I>>
608 for CoreCtx<'_, BC, L>
609{
610 fn per_resource_counters<'a>(
611 &'a self,
612 resource: &'a TcpSocketId<I, WeakDeviceId<BC>, BC>,
613 ) -> &'a TcpCountersWithSocket<I> {
614 resource.counters()
615 }
616}
617
618impl<BC: BindingsContext, I: Ip, L> CounterContext<UdpCounters<I>> for CoreCtx<'_, BC, L> {
619 fn counters(&self) -> &UdpCounters<I> {
620 self.unlocked_access::<crate::lock_ordering::UnlockedState>().transport.udp_counters::<I>()
621 }
622}