1use alloc::collections::HashMap;
8
9use lock_order::lock::{DelegatedOrderedLockAccess, LockLevelFor};
10use lock_order::relation::LockBefore;
11use log::trace;
12use net_types::ip::{Ip, IpMarked, Ipv4, Ipv4Addr, Ipv6, Ipv6Addr, Ipv6SourceAddr};
13use net_types::{MulticastAddr, SpecifiedAddr};
14use netstack3_base::socket::SocketIpAddr;
15use netstack3_base::{
16 CounterContext, Icmpv4ErrorCode, Icmpv6ErrorCode, Marks, ResourceCounterContext, TokenBucket,
17 WeakDeviceIdentifier,
18};
19use netstack3_datagram as datagram;
20use netstack3_device::{for_any_device_id, BaseDeviceId, DeviceId, DeviceStateSpec, WeakDeviceId};
21use netstack3_icmp_echo::{
22 self as icmp_echo, IcmpEchoBoundStateContext, IcmpEchoContextMarker,
23 IcmpEchoIpTransportContext, IcmpEchoStateContext, IcmpSocketId, IcmpSocketSet, IcmpSocketState,
24 IcmpSockets,
25};
26use netstack3_ip::device::{self, IpDeviceBindingsContext, IpDeviceIpExt};
27use netstack3_ip::gmp::{IgmpCounters, MldCounters};
28use netstack3_ip::icmp::{
29 self, IcmpIpTransportContext, IcmpRxCounters, IcmpState, IcmpTxCounters, InnerIcmpContext,
30 InnerIcmpv4Context, NdpCounters,
31};
32use netstack3_ip::multicast_forwarding::MulticastForwardingState;
33use netstack3_ip::raw::RawIpSocketMap;
34use netstack3_ip::{
35 self as ip, FragmentContext, IpCounters, IpDeviceContext, IpHeaderInfo, IpLayerBindingsContext,
36 IpLayerIpExt, IpPacketFragmentCache, IpRouteTableContext, IpRouteTablesContext, IpStateContext,
37 IpStateInner, IpTransportContext, IpTransportDispatchContext, LocalDeliveryPacketInfo,
38 MulticastMembershipHandler, PmtuCache, PmtuContext, ResolveRouteError, ResolvedRoute,
39 RoutingTable, RoutingTableId, RulesTable, TransportReceiveError,
40};
41use netstack3_sync::rc::Primary;
42use netstack3_sync::RwLock;
43use netstack3_tcp::TcpIpTransportContext;
44use netstack3_udp::UdpIpTransportContext;
45use packet::BufferMut;
46use packet_formats::ip::{IpProto, Ipv4Proto, Ipv6Proto};
47
48use crate::context::prelude::*;
49use crate::context::WrapLockLevel;
50use crate::{BindingsContext, BindingsTypes, CoreCtx, StackState};
51
52impl<I, BT, L> FragmentContext<I, BT> for CoreCtx<'_, BT, L>
53where
54 I: IpLayerIpExt,
55 BT: BindingsTypes,
56 L: LockBefore<crate::lock_ordering::IpStateFragmentCache<I>>,
57{
58 fn with_state_mut<O, F: FnOnce(&mut IpPacketFragmentCache<I, BT>) -> O>(&mut self, cb: F) -> O {
59 let mut cache = self.lock::<crate::lock_ordering::IpStateFragmentCache<I>>();
60 cb(&mut cache)
61 }
62}
63
64impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IpStatePmtuCache<Ipv4>>>
65 PmtuContext<Ipv4, BC> for CoreCtx<'_, BC, L>
66{
67 fn with_state_mut<O, F: FnOnce(&mut PmtuCache<Ipv4, BC>) -> O>(&mut self, cb: F) -> O {
68 let mut cache = self.lock::<crate::lock_ordering::IpStatePmtuCache<Ipv4>>();
69 cb(&mut cache)
70 }
71}
72
73impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IpStatePmtuCache<Ipv6>>>
74 PmtuContext<Ipv6, BC> for CoreCtx<'_, BC, L>
75{
76 fn with_state_mut<O, F: FnOnce(&mut PmtuCache<Ipv6, BC>) -> O>(&mut self, cb: F) -> O {
77 let mut cache = self.lock::<crate::lock_ordering::IpStatePmtuCache<Ipv6>>();
78 cb(&mut cache)
79 }
80}
81
82impl<
83 I: Ip + IpDeviceIpExt + IpLayerIpExt,
84 BC: BindingsContext
85 + IpDeviceBindingsContext<I, Self::DeviceId>
86 + IpLayerBindingsContext<I, Self::DeviceId>,
87 L: LockBefore<crate::lock_ordering::IpState<I>>,
88 > MulticastMembershipHandler<I, BC> for CoreCtx<'_, BC, L>
89where
90 Self: device::IpDeviceConfigurationContext<I, BC> + IpStateContext<I> + IpDeviceContext<I>,
91{
92 fn join_multicast_group(
93 &mut self,
94 bindings_ctx: &mut BC,
95 device: &Self::DeviceId,
96 addr: MulticastAddr<I::Addr>,
97 ) {
98 ip::device::join_ip_multicast::<I, _, _>(self, bindings_ctx, device, addr)
99 }
100
101 fn leave_multicast_group(
102 &mut self,
103 bindings_ctx: &mut BC,
104 device: &Self::DeviceId,
105 addr: MulticastAddr<I::Addr>,
106 ) {
107 ip::device::leave_ip_multicast::<I, _, _>(self, bindings_ctx, device, addr)
108 }
109
110 fn select_device_for_multicast_group(
111 &mut self,
112 addr: MulticastAddr<I::Addr>,
113 marks: &Marks,
114 ) -> Result<Self::DeviceId, ResolveRouteError> {
115 let remote_ip = SocketIpAddr::new_from_multicast(addr);
116 let ResolvedRoute {
117 src_addr: _,
118 device,
119 local_delivery_device,
120 next_hop: _,
121 internal_forwarding: _,
122 } = ip::resolve_output_route_to_destination(self, None, None, Some(remote_ip), marks)?;
123 debug_assert!(local_delivery_device.is_none(), "{:?}", local_delivery_device);
126 Ok(device)
127 }
128}
129
130impl<BT: BindingsTypes, I: datagram::DualStackIpExt, L> CounterContext<IcmpTxCounters<I>>
131 for CoreCtx<'_, BT, L>
132{
133 fn counters(&self) -> &IcmpTxCounters<I> {
134 &self
135 .unlocked_access::<crate::lock_ordering::UnlockedState>()
136 .inner_icmp_state::<I>()
137 .tx_counters
138 }
139}
140
141impl<BT: BindingsTypes, I: datagram::DualStackIpExt, L> CounterContext<IcmpRxCounters<I>>
142 for CoreCtx<'_, BT, L>
143{
144 fn counters(&self) -> &IcmpRxCounters<I> {
145 &self
146 .unlocked_access::<crate::lock_ordering::UnlockedState>()
147 .inner_icmp_state::<I>()
148 .rx_counters
149 }
150}
151
152impl<BT: BindingsTypes, L> CounterContext<IgmpCounters> for CoreCtx<'_, BT, L> {
153 fn counters(&self) -> &IgmpCounters {
154 &self
155 .unlocked_access::<crate::lock_ordering::UnlockedState>()
156 .inner_ip_state::<Ipv4>()
157 .igmp_counters()
158 }
159}
160
161impl<BT: BindingsTypes, L> ResourceCounterContext<DeviceId<BT>, IgmpCounters>
162 for CoreCtx<'_, BT, L>
163{
164 fn per_resource_counters<'a>(&'a self, device_id: &'a DeviceId<BT>) -> &'a IgmpCounters {
165 for_any_device_id!(
166 DeviceId,
167 device_id,
168 id => self.per_resource_counters(id)
169 )
170 }
171}
172
173impl<BT: BindingsTypes, D: DeviceStateSpec, L>
174 ResourceCounterContext<BaseDeviceId<D, BT>, IgmpCounters> for CoreCtx<'_, BT, L>
175{
176 fn per_resource_counters<'a>(&'a self, device_id: &'a BaseDeviceId<D, BT>) -> &'a IgmpCounters {
177 device_id
178 .device_state(
179 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().device.origin,
180 )
181 .as_ref()
182 .igmp_counters()
183 }
184}
185
186impl<BT: BindingsTypes, L> CounterContext<MldCounters> for CoreCtx<'_, BT, L> {
187 fn counters(&self) -> &MldCounters {
188 &self
189 .unlocked_access::<crate::lock_ordering::UnlockedState>()
190 .inner_ip_state::<Ipv4>()
191 .mld_counters()
192 }
193}
194
195impl<BT: BindingsTypes, L> ResourceCounterContext<DeviceId<BT>, MldCounters>
196 for CoreCtx<'_, BT, L>
197{
198 fn per_resource_counters<'a>(&'a self, device_id: &'a DeviceId<BT>) -> &'a MldCounters {
199 for_any_device_id!(
200 DeviceId,
201 device_id,
202 id => self.per_resource_counters(id)
203 )
204 }
205}
206
207impl<BT: BindingsTypes, D: DeviceStateSpec, L>
208 ResourceCounterContext<BaseDeviceId<D, BT>, MldCounters> for CoreCtx<'_, BT, L>
209{
210 fn per_resource_counters<'a>(&'a self, device_id: &'a BaseDeviceId<D, BT>) -> &'a MldCounters {
211 device_id
212 .device_state(
213 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().device.origin,
214 )
215 .as_ref()
216 .mld_counters()
217 }
218}
219
220impl<BT: BindingsTypes, L> CounterContext<NdpCounters> for CoreCtx<'_, BT, L> {
221 fn counters(&self) -> &NdpCounters {
222 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().ipv6.icmp.ndp_counters
223 }
224}
225
226impl<
227 BC: BindingsContext,
228 L: LockBefore<crate::lock_ordering::IcmpBoundMap<Ipv4>>
229 + LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv4>>
230 + LockBefore<crate::lock_ordering::UdpAllSocketsSet<Ipv4>>,
231 > InnerIcmpv4Context<BC> for CoreCtx<'_, BC, L>
232{
233 fn should_send_timestamp_reply(&self) -> bool {
234 self.unlocked_access::<crate::lock_ordering::UnlockedState>().ipv4.icmp.send_timestamp_reply
235 }
236}
237
238impl<BT: BindingsTypes, I: IpLayerIpExt, L> CounterContext<IpCounters<I>> for CoreCtx<'_, BT, L> {
239 fn counters(&self) -> &IpCounters<I> {
240 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().inner_ip_state().counters()
241 }
242}
243
244impl<BT: BindingsTypes, I: IpLayerIpExt, L> ResourceCounterContext<DeviceId<BT>, IpCounters<I>>
245 for CoreCtx<'_, BT, L>
246{
247 fn per_resource_counters<'a>(&'a self, device_id: &'a DeviceId<BT>) -> &'a IpCounters<I> {
248 for_any_device_id!(
249 DeviceId,
250 device_id,
251 id => self.per_resource_counters(id)
252 )
253 }
254}
255
256impl<BT: BindingsTypes, D: DeviceStateSpec, I: IpLayerIpExt, L>
257 ResourceCounterContext<BaseDeviceId<D, BT>, IpCounters<I>> for CoreCtx<'_, BT, L>
258{
259 fn per_resource_counters<'a>(
260 &'a self,
261 device_id: &'a BaseDeviceId<D, BT>,
262 ) -> &'a IpCounters<I> {
263 device_id
264 .device_state(
265 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().device.origin,
266 )
267 .as_ref()
268 .ip_counters::<I>()
269 }
270}
271
272#[netstack3_macros::instantiate_ip_impl_block(I)]
273impl<I, BC, L> IpStateContext<I> for CoreCtx<'_, BC, L>
274where
275 I: IpLayerIpExt,
276 BC: BindingsContext,
277 L: LockBefore<crate::lock_ordering::IpStateRulesTable<I>>,
278{
279 type IpRouteTablesCtx<'a> =
280 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IpStateRulesTable<I>>>;
281
282 fn with_rules_table<
283 O,
284 F: FnOnce(&mut Self::IpRouteTablesCtx<'_>, &RulesTable<I, Self::DeviceId>) -> O,
285 >(
286 &mut self,
287 cb: F,
288 ) -> O {
289 let (rules_table, mut restricted) =
290 self.read_lock_and::<crate::lock_ordering::IpStateRulesTable<I>>();
291 cb(&mut restricted, &rules_table)
292 }
293
294 fn with_rules_table_mut<
295 O,
296 F: FnOnce(&mut Self::IpRouteTablesCtx<'_>, &mut RulesTable<I, Self::DeviceId>) -> O,
297 >(
298 &mut self,
299 cb: F,
300 ) -> O {
301 let (mut rules_table, mut restricted) =
302 self.write_lock_and::<crate::lock_ordering::IpStateRulesTable<I>>();
303 cb(&mut restricted, &mut rules_table)
304 }
305}
306
307#[netstack3_macros::instantiate_ip_impl_block(I)]
308impl<I, BC, L> IpRouteTablesContext<I> for CoreCtx<'_, BC, L>
309where
310 I: IpLayerIpExt,
311 BC: BindingsContext,
312 L: LockBefore<crate::lock_ordering::IpStateRoutingTables<I>>,
313{
314 type Ctx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IpStateRoutingTables<I>>>;
315
316 fn main_table_id(&self) -> RoutingTableId<I, Self::DeviceId> {
317 self.unlocked_access::<crate::lock_ordering::UnlockedState>()
318 .inner_ip_state()
319 .main_table_id()
320 .clone()
321 }
322
323 fn with_ip_routing_tables<
324 O,
325 F: FnOnce(
326 &mut Self::Ctx<'_>,
327 &HashMap<
328 RoutingTableId<I, Self::DeviceId>,
329 Primary<RwLock<RoutingTable<I, Self::DeviceId>>>,
330 >,
331 ) -> O,
332 >(
333 &mut self,
334 cb: F,
335 ) -> O {
336 let (table, mut ctx) = self.lock_and::<crate::lock_ordering::IpStateRoutingTables<I>>();
337 cb(&mut ctx, &table)
338 }
339
340 fn with_ip_routing_tables_mut<
341 O,
342 F: FnOnce(
343 &mut HashMap<
344 RoutingTableId<I, Self::DeviceId>,
345 Primary<RwLock<RoutingTable<I, Self::DeviceId>>>,
346 >,
347 ) -> O,
348 >(
349 &mut self,
350 cb: F,
351 ) -> O {
352 let mut tables = self.lock::<crate::lock_ordering::IpStateRoutingTables<I>>();
353 cb(&mut *tables)
354 }
355}
356
357#[netstack3_macros::instantiate_ip_impl_block(I)]
358impl<I, BC, L> IpRouteTableContext<I> for CoreCtx<'_, BC, L>
359where
360 I: IpLayerIpExt,
361 BC: BindingsContext,
362 L: LockBefore<crate::lock_ordering::IpStateRoutingTable<I>>,
363{
364 type IpDeviceIdCtx<'a> =
365 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IpStateRoutingTable<I>>>;
366
367 fn with_ip_routing_table<
368 O,
369 F: FnOnce(&mut Self::IpDeviceIdCtx<'_>, &RoutingTable<I, Self::DeviceId>) -> O,
370 >(
371 &mut self,
372 table_id: &RoutingTableId<I, Self::DeviceId>,
373 cb: F,
374 ) -> O {
375 let mut table = self.adopt(table_id);
376 let (table, mut restricted) = table
377 .read_lock_with_and::<crate::lock_ordering::IpStateRoutingTable<I>, _>(|c| c.right());
378 let mut restricted = restricted.cast_core_ctx();
379 cb(&mut restricted, &table)
380 }
381
382 fn with_ip_routing_table_mut<
383 O,
384 F: FnOnce(&mut Self::IpDeviceIdCtx<'_>, &mut RoutingTable<I, Self::DeviceId>) -> O,
385 >(
386 &mut self,
387 table_id: &RoutingTableId<I, Self::DeviceId>,
388 cb: F,
389 ) -> O {
390 let mut table = self.adopt(table_id);
391 let (mut table, mut restricted) = table
392 .write_lock_with_and::<crate::lock_ordering::IpStateRoutingTable<I>, _>(|c| c.right());
393 let mut restricted = restricted.cast_core_ctx();
394 cb(&mut restricted, &mut table)
395 }
396}
397
398impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpAllSocketsSet<Ipv4>>>
399 IpTransportDispatchContext<Ipv4, BC> for CoreCtx<'_, BC, L>
400{
401 fn dispatch_receive_ip_packet<B: BufferMut, H: IpHeaderInfo<Ipv4>>(
402 &mut self,
403 bindings_ctx: &mut BC,
404 device: &Self::DeviceId,
405 src_ip: Ipv4Addr,
406 dst_ip: SpecifiedAddr<Ipv4Addr>,
407 proto: Ipv4Proto,
408 body: B,
409 info: &LocalDeliveryPacketInfo<Ipv4, H>,
410 ) -> Result<(), TransportReceiveError> {
411 match proto {
412 Ipv4Proto::Icmp => {
413 <IcmpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_ip_packet(
414 self,
415 bindings_ctx,
416 device,
417 src_ip,
418 dst_ip,
419 body,
420 info,
421 )
422 .map_err(|(_body, err)| err)
423 }
424 Ipv4Proto::Igmp => {
425 device::receive_igmp_packet(self, bindings_ctx, device, src_ip, dst_ip, body, info);
426 Ok(())
427 }
428 Ipv4Proto::Proto(IpProto::Udp) => {
429 <UdpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_ip_packet(
430 self,
431 bindings_ctx,
432 device,
433 src_ip,
434 dst_ip,
435 body,
436 info,
437 )
438 .map_err(|(_body, err)| err)
439 }
440 Ipv4Proto::Proto(IpProto::Tcp) => {
441 <TcpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_ip_packet(
442 self,
443 bindings_ctx,
444 device,
445 src_ip,
446 dst_ip,
447 body,
448 info,
449 )
450 .map_err(|(_body, err)| err)
451 }
452 _ => Err(TransportReceiveError::ProtocolUnsupported),
455 }
456 }
457}
458
459impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpAllSocketsSet<Ipv6>>>
460 IpTransportDispatchContext<Ipv6, BC> for CoreCtx<'_, BC, L>
461{
462 fn dispatch_receive_ip_packet<B: BufferMut, H: IpHeaderInfo<Ipv6>>(
463 &mut self,
464 bindings_ctx: &mut BC,
465 device: &Self::DeviceId,
466 src_ip: Ipv6SourceAddr,
467 dst_ip: SpecifiedAddr<Ipv6Addr>,
468 proto: Ipv6Proto,
469 body: B,
470 info: &LocalDeliveryPacketInfo<Ipv6, H>,
471 ) -> Result<(), TransportReceiveError> {
472 match proto {
473 Ipv6Proto::Icmpv6 => {
474 <IcmpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_ip_packet(
475 self,
476 bindings_ctx,
477 device,
478 src_ip,
479 dst_ip,
480 body,
481 info,
482 )
483 .map_err(|(_body, err)| err)
484 }
485 Ipv6Proto::NoNextHeader => Ok(()),
489 Ipv6Proto::Proto(IpProto::Tcp) => {
490 <TcpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_ip_packet(
491 self,
492 bindings_ctx,
493 device,
494 src_ip,
495 dst_ip,
496 body,
497 info,
498 )
499 .map_err(|(_body, err)| err)
500 }
501 Ipv6Proto::Proto(IpProto::Udp) => {
502 <UdpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_ip_packet(
503 self,
504 bindings_ctx,
505 device,
506 src_ip,
507 dst_ip,
508 body,
509 info,
510 )
511 .map_err(|(_body, err)| err)
512 }
513 _ => Err(TransportReceiveError::ProtocolUnsupported),
516 }
517 }
518}
519
520impl<
521 BC: BindingsContext,
522 L: LockBefore<crate::lock_ordering::IcmpBoundMap<Ipv4>>
523 + LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv4>>
524 + LockBefore<crate::lock_ordering::UdpAllSocketsSet<Ipv4>>,
525 > InnerIcmpContext<Ipv4, BC> for CoreCtx<'_, BC, L>
526{
527 type EchoTransportContext = IcmpEchoIpTransportContext;
528
529 fn receive_icmp_error(
530 &mut self,
531 bindings_ctx: &mut BC,
532 device: &DeviceId<BC>,
533 original_src_ip: Option<SpecifiedAddr<Ipv4Addr>>,
534 original_dst_ip: SpecifiedAddr<Ipv4Addr>,
535 original_proto: Ipv4Proto,
536 original_body: &[u8],
537 err: Icmpv4ErrorCode,
538 ) {
539 self.increment_both(device, |c: &IpCounters<Ipv4>| &c.receive_icmp_error);
540 trace!("InnerIcmpContext<Ipv4>::receive_icmp_error({:?})", err);
541
542 match original_proto {
543 Ipv4Proto::Icmp => {
544 <IcmpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
545 self,
546 bindings_ctx,
547 device,
548 original_src_ip,
549 original_dst_ip,
550 original_body,
551 err,
552 )
553 }
554 Ipv4Proto::Proto(IpProto::Tcp) => {
555 <TcpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
556 self,
557 bindings_ctx,
558 device,
559 original_src_ip,
560 original_dst_ip,
561 original_body,
562 err,
563 )
564 }
565 Ipv4Proto::Proto(IpProto::Udp) => {
566 <UdpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
567 self,
568 bindings_ctx,
569 device,
570 original_src_ip,
571 original_dst_ip,
572 original_body,
573 err,
574 )
575 }
576 _ => <() as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
579 self,
580 bindings_ctx,
581 device,
582 original_src_ip,
583 original_dst_ip,
584 original_body,
585 err,
586 ),
587 }
588 }
589
590 fn with_error_send_bucket_mut<O, F: FnOnce(&mut TokenBucket<BC::Instant>) -> O>(
591 &mut self,
592 cb: F,
593 ) -> O {
594 cb(&mut self.lock::<crate::lock_ordering::IcmpTokenBucket<Ipv4>>())
595 }
596}
597
598impl<
599 BC: BindingsContext,
600 L: LockBefore<crate::lock_ordering::IcmpBoundMap<Ipv6>>
601 + LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv6>>
602 + LockBefore<crate::lock_ordering::UdpAllSocketsSet<Ipv6>>,
603 > InnerIcmpContext<Ipv6, BC> for CoreCtx<'_, BC, L>
604{
605 type EchoTransportContext = IcmpEchoIpTransportContext;
606
607 fn receive_icmp_error(
608 &mut self,
609 bindings_ctx: &mut BC,
610 device: &DeviceId<BC>,
611 original_src_ip: Option<SpecifiedAddr<Ipv6Addr>>,
612 original_dst_ip: SpecifiedAddr<Ipv6Addr>,
613 original_next_header: Ipv6Proto,
614 original_body: &[u8],
615 err: Icmpv6ErrorCode,
616 ) {
617 self.increment_both(device, |c: &IpCounters<Ipv6>| &c.receive_icmp_error);
618 trace!("InnerIcmpContext<Ipv6>::receive_icmp_error({:?})", err);
619
620 match original_next_header {
621 Ipv6Proto::Icmpv6 => {
622 <IcmpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
623 self,
624 bindings_ctx,
625 device,
626 original_src_ip,
627 original_dst_ip,
628 original_body,
629 err,
630 )
631 }
632 Ipv6Proto::Proto(IpProto::Tcp) => {
633 <TcpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
634 self,
635 bindings_ctx,
636 device,
637 original_src_ip,
638 original_dst_ip,
639 original_body,
640 err,
641 )
642 }
643 Ipv6Proto::Proto(IpProto::Udp) => {
644 <UdpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
645 self,
646 bindings_ctx,
647 device,
648 original_src_ip,
649 original_dst_ip,
650 original_body,
651 err,
652 )
653 }
654 _ => <() as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
657 self,
658 bindings_ctx,
659 device,
660 original_src_ip,
661 original_dst_ip,
662 original_body,
663 err,
664 ),
665 }
666 }
667
668 fn with_error_send_bucket_mut<O, F: FnOnce(&mut TokenBucket<BC::Instant>) -> O>(
669 &mut self,
670 cb: F,
671 ) -> O {
672 cb(&mut self.lock::<crate::lock_ordering::IcmpTokenBucket<Ipv6>>())
673 }
674}
675
676impl<L, BC: BindingsContext> icmp::IcmpStateContext for CoreCtx<'_, BC, L> {}
677
678impl<BT: BindingsTypes, L> IcmpEchoContextMarker for CoreCtx<'_, BT, L> {}
679
680#[netstack3_macros::instantiate_ip_impl_block(I)]
681impl<I, BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpAllSocketsSet<I>>>
682 IcmpEchoStateContext<I, BC> for CoreCtx<'_, BC, L>
683{
684 type SocketStateCtx<'a> =
685 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IcmpSocketState<I>>>;
686
687 fn with_all_sockets_mut<O, F: FnOnce(&mut IcmpSocketSet<I, Self::WeakDeviceId, BC>) -> O>(
688 &mut self,
689 cb: F,
690 ) -> O {
691 cb(&mut self.write_lock::<crate::lock_ordering::IcmpAllSocketsSet<I>>())
692 }
693
694 fn with_all_sockets<O, F: FnOnce(&IcmpSocketSet<I, Self::WeakDeviceId, BC>) -> O>(
695 &mut self,
696 cb: F,
697 ) -> O {
698 cb(&self.read_lock::<crate::lock_ordering::IcmpAllSocketsSet<I>>())
699 }
700
701 fn with_socket_state<
702 O,
703 F: FnOnce(&mut Self::SocketStateCtx<'_>, &IcmpSocketState<I, Self::WeakDeviceId, BC>) -> O,
704 >(
705 &mut self,
706 id: &IcmpSocketId<I, Self::WeakDeviceId, BC>,
707 cb: F,
708 ) -> O {
709 let mut locked = self.adopt(id);
710 let (socket_state, mut restricted) =
711 locked.read_lock_with_and::<crate::lock_ordering::IcmpSocketState<I>, _>(|c| c.right());
712 let mut restricted = restricted.cast_core_ctx();
713 cb(&mut restricted, &socket_state)
714 }
715
716 fn with_socket_state_mut<
717 O,
718 F: FnOnce(&mut Self::SocketStateCtx<'_>, &mut IcmpSocketState<I, Self::WeakDeviceId, BC>) -> O,
719 >(
720 &mut self,
721 id: &IcmpSocketId<I, Self::WeakDeviceId, BC>,
722 cb: F,
723 ) -> O {
724 let mut locked = self.adopt(id);
725 let (mut socket_state, mut restricted) = locked
726 .write_lock_with_and::<crate::lock_ordering::IcmpSocketState<I>, _>(|c| c.right());
727 let mut restricted = restricted.cast_core_ctx();
728 cb(&mut restricted, &mut socket_state)
729 }
730
731 fn with_bound_state_context<O, F: FnOnce(&mut Self::SocketStateCtx<'_>) -> O>(
732 &mut self,
733 cb: F,
734 ) -> O {
735 cb(&mut self.cast_locked::<crate::lock_ordering::IcmpSocketState<I>>())
736 }
737
738 fn for_each_socket<
739 F: FnMut(
740 &mut Self::SocketStateCtx<'_>,
741 &IcmpSocketId<I, Self::WeakDeviceId, BC>,
742 &IcmpSocketState<I, Self::WeakDeviceId, BC>,
743 ),
744 >(
745 &mut self,
746 mut cb: F,
747 ) {
748 let (all_sockets, mut locked) =
749 self.read_lock_and::<crate::lock_ordering::IcmpAllSocketsSet<I>>();
750 all_sockets.keys().for_each(|id| {
751 let id = IcmpSocketId::from(id.clone());
752 let mut locked = locked.adopt(&id);
753 let (socket_state, mut restricted) = locked
754 .read_lock_with_and::<crate::lock_ordering::IcmpSocketState<I>, _>(|c| c.right());
755 let mut restricted = restricted.cast_core_ctx();
756 cb(&mut restricted, &id, &socket_state);
757 });
758 }
759}
760
761#[netstack3_macros::instantiate_ip_impl_block(I)]
762impl<I, BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpBoundMap<I>>>
763 IcmpEchoBoundStateContext<I, BC> for CoreCtx<'_, BC, L>
764{
765 type IpSocketsCtx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IcmpBoundMap<I>>>;
766 fn with_icmp_ctx_and_sockets_mut<
767 O,
768 F: FnOnce(
769 &mut Self::IpSocketsCtx<'_>,
770 &mut icmp_echo::BoundSockets<I, Self::WeakDeviceId, BC>,
771 ) -> O,
772 >(
773 &mut self,
774 cb: F,
775 ) -> O {
776 let (mut sockets, mut core_ctx) =
777 self.write_lock_and::<crate::lock_ordering::IcmpBoundMap<I>>();
778 cb(&mut core_ctx, &mut sockets)
779 }
780}
781
782impl<I: IpLayerIpExt, BT: BindingsTypes> DelegatedOrderedLockAccess<IpPacketFragmentCache<I, BT>>
783 for StackState<BT>
784{
785 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
786 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
787 self.inner_ip_state()
788 }
789}
790
791impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
792 for crate::lock_ordering::IpStateFragmentCache<I>
793{
794 type Data = IpPacketFragmentCache<I, BT>;
795}
796
797impl<I: IpLayerIpExt, BT: BindingsTypes> DelegatedOrderedLockAccess<PmtuCache<I, BT>>
798 for StackState<BT>
799{
800 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
801 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
802 self.inner_ip_state()
803 }
804}
805
806impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
807 for crate::lock_ordering::IpStateRulesTable<I>
808{
809 type Data = RulesTable<I, DeviceId<BT>>;
810}
811
812impl<I: IpLayerIpExt, BT: BindingsTypes> DelegatedOrderedLockAccess<RulesTable<I, DeviceId<BT>>>
813 for StackState<BT>
814{
815 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
816 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
817 self.inner_ip_state()
818 }
819}
820
821impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
822 for crate::lock_ordering::IpStatePmtuCache<I>
823{
824 type Data = PmtuCache<I, BT>;
825}
826
827impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
828 for crate::lock_ordering::IpStateRoutingTables<I>
829{
830 type Data =
831 HashMap<RoutingTableId<I, DeviceId<BT>>, Primary<RwLock<RoutingTable<I, DeviceId<BT>>>>>;
832}
833
834impl<I: IpLayerIpExt, BT: BindingsTypes>
835 DelegatedOrderedLockAccess<
836 HashMap<RoutingTableId<I, DeviceId<BT>>, Primary<RwLock<RoutingTable<I, DeviceId<BT>>>>>,
837 > for StackState<BT>
838{
839 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
840 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
841 self.inner_ip_state()
842 }
843}
844
845impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<RoutingTableId<I, DeviceId<BT>>>
846 for crate::lock_ordering::IpStateRoutingTable<I>
847{
848 type Data = RoutingTable<I, DeviceId<BT>>;
849}
850
851impl<I: IpLayerIpExt, BT: BindingsTypes>
852 DelegatedOrderedLockAccess<MulticastForwardingState<I, DeviceId<BT>, BT>> for StackState<BT>
853{
854 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
855 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
856 self.inner_ip_state()
857 }
858}
859
860impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
861 for crate::lock_ordering::IpMulticastForwardingState<I>
862{
863 type Data = MulticastForwardingState<I, DeviceId<BT>, BT>;
864}
865
866impl<I: IpLayerIpExt, BT: BindingsTypes>
867 DelegatedOrderedLockAccess<RawIpSocketMap<I, WeakDeviceId<BT>, BT>> for StackState<BT>
868{
869 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
870 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
871 self.inner_ip_state()
872 }
873}
874
875impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
876 for crate::lock_ordering::AllRawIpSockets<I>
877{
878 type Data = RawIpSocketMap<I, WeakDeviceId<BT>, BT>;
879}
880
881impl<I: datagram::DualStackIpExt, BT: BindingsTypes>
882 DelegatedOrderedLockAccess<icmp_echo::BoundSockets<I, WeakDeviceId<BT>, BT>>
883 for StackState<BT>
884{
885 type Inner = IcmpSockets<I, WeakDeviceId<BT>, BT>;
886 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
887 &self.transport.icmp_echo_state()
888 }
889}
890
891impl<I: datagram::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
892 for crate::lock_ordering::IcmpBoundMap<I>
893{
894 type Data = icmp_echo::BoundSockets<I, WeakDeviceId<BT>, BT>;
895}
896
897impl<I: datagram::DualStackIpExt, BT: BindingsTypes>
898 DelegatedOrderedLockAccess<IcmpSocketSet<I, WeakDeviceId<BT>, BT>> for StackState<BT>
899{
900 type Inner = IcmpSockets<I, WeakDeviceId<BT>, BT>;
901 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
902 &self.transport.icmp_echo_state()
903 }
904}
905
906impl<I: datagram::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
907 for crate::lock_ordering::IcmpAllSocketsSet<I>
908{
909 type Data = IcmpSocketSet<I, WeakDeviceId<BT>, BT>;
910}
911
912impl<I: datagram::DualStackIpExt, BT: BindingsTypes>
913 DelegatedOrderedLockAccess<IpMarked<I, TokenBucket<BT::Instant>>> for StackState<BT>
914{
915 type Inner = IcmpState<I, BT>;
916 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
917 self.inner_icmp_state()
918 }
919}
920
921impl<I: datagram::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
922 for crate::lock_ordering::IcmpTokenBucket<I>
923{
924 type Data = IpMarked<I, TokenBucket<BT::Instant>>;
925}
926
927impl<I: datagram::DualStackIpExt, D: WeakDeviceIdentifier, BT: BindingsTypes>
928 LockLevelFor<IcmpSocketId<I, D, BT>> for crate::lock_ordering::IcmpSocketState<I>
929{
930 type Data = IcmpSocketState<I, D, BT>;
931}