netstack3_core/ip/
base.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! The integrations for protocols built on top of IP.
6
7use lock_order::lock::{DelegatedOrderedLockAccess, LockLevelFor};
8use lock_order::relation::LockBefore;
9use log::trace;
10use net_types::ip::{Ip, IpMarked, Ipv4, Ipv4Addr, Ipv4SourceAddr, Ipv6, Ipv6Addr, Ipv6SourceAddr};
11use net_types::{MulticastAddr, SpecifiedAddr};
12use netstack3_base::socket::{SocketCookie, SocketIpAddr};
13use netstack3_base::{
14    CounterContext, FrameDestination, Icmpv4ErrorCode, Icmpv6ErrorCode, Marks,
15    ResourceCounterContext, TokenBucket, WeakDeviceIdentifier,
16};
17use netstack3_datagram::{self as datagram};
18use netstack3_device::{BaseDeviceId, DeviceId, DeviceStateSpec, WeakDeviceId, for_any_device_id};
19use netstack3_hashmap::HashMap;
20use netstack3_icmp_echo::{
21    self as icmp_echo, IcmpEchoBoundStateContext, IcmpEchoContextMarker,
22    IcmpEchoIpTransportContext, IcmpEchoStateContext, IcmpSocketId, IcmpSocketSet, IcmpSocketState,
23    IcmpSockets,
24};
25use netstack3_ip::device::{self, IpDeviceBindingsContext, IpDeviceIpExt};
26use netstack3_ip::gmp::{IgmpCounters, MldCounters};
27use netstack3_ip::icmp::{
28    self, IcmpIpTransportContext, IcmpRxCounters, IcmpState, IcmpTxCounters, InnerIcmpContext,
29    InnerIcmpv4Context, NdpCounters,
30};
31use netstack3_ip::multicast_forwarding::MulticastForwardingState;
32use netstack3_ip::raw::RawIpSocketMap;
33use netstack3_ip::{
34    self as ip, BaseRoutingTableState, FragmentContext, IpCounters, IpDeviceContext, IpHeaderInfo,
35    IpLayerBindingsContext, IpLayerIpExt, IpPacketFragmentCache, IpRouteTableContext,
36    IpRouteTablesContext, IpStateContext, IpStateInner, IpTransportContext,
37    IpTransportDispatchContext, LocalDeliveryPacketInfo, MulticastMembershipHandler, PmtuCache,
38    PmtuContext, ResolveRouteError, ResolvedRoute, RoutingTable, RoutingTableId, RulesTable,
39    SocketMetadata, TransportReceiveError,
40};
41use netstack3_sync::rc::Primary;
42use netstack3_tcp::TcpIpTransportContext;
43use netstack3_udp::{DualStackUdpSocketId, UdpBindingsTypes, UdpIpTransportContext};
44use packet::{BufferMut, ParseBuffer};
45use packet_formats::ip::{IpProto, Ipv4Proto, Ipv6Proto};
46
47use crate::context::WrapLockLevel;
48use crate::context::prelude::*;
49use crate::{BindingsContext, BindingsTypes, CoreCtx, StackState};
50
51impl<I, BT, L> FragmentContext<I, BT> for CoreCtx<'_, BT, L>
52where
53    I: IpLayerIpExt,
54    BT: BindingsTypes,
55    L: LockBefore<crate::lock_ordering::IpStateFragmentCache<I>>,
56{
57    fn with_state_mut<O, F: FnOnce(&mut IpPacketFragmentCache<I, BT>) -> O>(&mut self, cb: F) -> O {
58        let mut cache = self.lock::<crate::lock_ordering::IpStateFragmentCache<I>>();
59        cb(&mut cache)
60    }
61}
62
63impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IpStatePmtuCache<Ipv4>>>
64    PmtuContext<Ipv4, BC> for CoreCtx<'_, BC, L>
65{
66    fn with_state_mut<O, F: FnOnce(&mut PmtuCache<Ipv4, BC>) -> O>(&mut self, cb: F) -> O {
67        let mut cache = self.lock::<crate::lock_ordering::IpStatePmtuCache<Ipv4>>();
68        cb(&mut cache)
69    }
70}
71
72impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IpStatePmtuCache<Ipv6>>>
73    PmtuContext<Ipv6, BC> for CoreCtx<'_, BC, L>
74{
75    fn with_state_mut<O, F: FnOnce(&mut PmtuCache<Ipv6, BC>) -> O>(&mut self, cb: F) -> O {
76        let mut cache = self.lock::<crate::lock_ordering::IpStatePmtuCache<Ipv6>>();
77        cb(&mut cache)
78    }
79}
80
81impl<
82    I: Ip + IpDeviceIpExt + IpLayerIpExt,
83    BC: BindingsContext
84        + IpDeviceBindingsContext<I, Self::DeviceId>
85        + IpLayerBindingsContext<I, Self::DeviceId>,
86    L: LockBefore<crate::lock_ordering::IpState<I>>,
87> MulticastMembershipHandler<I, BC> for CoreCtx<'_, BC, L>
88where
89    Self: device::IpDeviceConfigurationContext<I, BC> + IpStateContext<I, BC> + IpDeviceContext<I>,
90{
91    fn join_multicast_group(
92        &mut self,
93        bindings_ctx: &mut BC,
94        device: &Self::DeviceId,
95        addr: MulticastAddr<I::Addr>,
96    ) {
97        ip::device::join_ip_multicast::<I, _, _>(self, bindings_ctx, device, addr)
98    }
99
100    fn leave_multicast_group(
101        &mut self,
102        bindings_ctx: &mut BC,
103        device: &Self::DeviceId,
104        addr: MulticastAddr<I::Addr>,
105    ) {
106        ip::device::leave_ip_multicast::<I, _, _>(self, bindings_ctx, device, addr)
107    }
108
109    fn select_device_for_multicast_group(
110        &mut self,
111        addr: MulticastAddr<I::Addr>,
112        marks: &Marks,
113    ) -> Result<Self::DeviceId, ResolveRouteError> {
114        let remote_ip = SocketIpAddr::new_from_multicast(addr);
115        let ResolvedRoute {
116            src_addr: _,
117            device,
118            local_delivery_device,
119            next_hop: _,
120            internal_forwarding: _,
121        } = ip::resolve_output_route_to_destination(self, None, None, Some(remote_ip), marks)?;
122        // NB: Because the original address is multicast, it cannot be assigned
123        // to a local interface. Thus local delivery should never be requested.
124        debug_assert!(local_delivery_device.is_none(), "{:?}", local_delivery_device);
125        Ok(device)
126    }
127}
128
129impl<BT: BindingsTypes, I: datagram::DualStackIpExt, L> CounterContext<IcmpTxCounters<I>>
130    for CoreCtx<'_, BT, L>
131{
132    fn counters(&self) -> &IcmpTxCounters<I> {
133        &self
134            .unlocked_access::<crate::lock_ordering::UnlockedState>()
135            .inner_icmp_state::<I>()
136            .tx_counters
137    }
138}
139
140impl<BT: BindingsTypes, I: datagram::DualStackIpExt, L> CounterContext<IcmpRxCounters<I>>
141    for CoreCtx<'_, BT, L>
142{
143    fn counters(&self) -> &IcmpRxCounters<I> {
144        &self
145            .unlocked_access::<crate::lock_ordering::UnlockedState>()
146            .inner_icmp_state::<I>()
147            .rx_counters
148    }
149}
150
151impl<BT: BindingsTypes, L> CounterContext<IgmpCounters> for CoreCtx<'_, BT, L> {
152    fn counters(&self) -> &IgmpCounters {
153        &self
154            .unlocked_access::<crate::lock_ordering::UnlockedState>()
155            .inner_ip_state::<Ipv4>()
156            .igmp_counters()
157    }
158}
159
160impl<BT: BindingsTypes, L> ResourceCounterContext<DeviceId<BT>, IgmpCounters>
161    for CoreCtx<'_, BT, L>
162{
163    fn per_resource_counters<'a>(&'a self, device_id: &'a DeviceId<BT>) -> &'a IgmpCounters {
164        for_any_device_id!(
165            DeviceId,
166            device_id,
167            id => self.per_resource_counters(id)
168        )
169    }
170}
171
172impl<BT: BindingsTypes, D: DeviceStateSpec, L>
173    ResourceCounterContext<BaseDeviceId<D, BT>, IgmpCounters> for CoreCtx<'_, BT, L>
174{
175    fn per_resource_counters<'a>(&'a self, device_id: &'a BaseDeviceId<D, BT>) -> &'a IgmpCounters {
176        device_id
177            .device_state(
178                &self.unlocked_access::<crate::lock_ordering::UnlockedState>().device.origin,
179            )
180            .as_ref()
181            .igmp_counters()
182    }
183}
184
185impl<BT: BindingsTypes, L> CounterContext<MldCounters> for CoreCtx<'_, BT, L> {
186    fn counters(&self) -> &MldCounters {
187        &self
188            .unlocked_access::<crate::lock_ordering::UnlockedState>()
189            .inner_ip_state::<Ipv4>()
190            .mld_counters()
191    }
192}
193
194impl<BT: BindingsTypes, L> ResourceCounterContext<DeviceId<BT>, MldCounters>
195    for CoreCtx<'_, BT, L>
196{
197    fn per_resource_counters<'a>(&'a self, device_id: &'a DeviceId<BT>) -> &'a MldCounters {
198        for_any_device_id!(
199            DeviceId,
200            device_id,
201            id => self.per_resource_counters(id)
202        )
203    }
204}
205
206impl<BT: BindingsTypes, D: DeviceStateSpec, L>
207    ResourceCounterContext<BaseDeviceId<D, BT>, MldCounters> for CoreCtx<'_, BT, L>
208{
209    fn per_resource_counters<'a>(&'a self, device_id: &'a BaseDeviceId<D, BT>) -> &'a MldCounters {
210        device_id
211            .device_state(
212                &self.unlocked_access::<crate::lock_ordering::UnlockedState>().device.origin,
213            )
214            .as_ref()
215            .mld_counters()
216    }
217}
218
219impl<BT: BindingsTypes, L> CounterContext<NdpCounters> for CoreCtx<'_, BT, L> {
220    fn counters(&self) -> &NdpCounters {
221        &self.unlocked_access::<crate::lock_ordering::UnlockedState>().ipv6.icmp.ndp_counters
222    }
223}
224
225impl<
226    BC: BindingsContext,
227    L: LockBefore<crate::lock_ordering::IcmpBoundMap<Ipv4>>
228        + LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv4>>
229        + LockBefore<crate::lock_ordering::UdpAllSocketsSet<Ipv4>>,
230> InnerIcmpv4Context<BC> for CoreCtx<'_, BC, L>
231{
232    fn should_send_timestamp_reply(&self) -> bool {
233        self.unlocked_access::<crate::lock_ordering::UnlockedState>().ipv4.icmp.send_timestamp_reply
234    }
235}
236
237impl<BT: BindingsTypes, I: IpLayerIpExt, L> CounterContext<IpCounters<I>> for CoreCtx<'_, BT, L> {
238    fn counters(&self) -> &IpCounters<I> {
239        &self.unlocked_access::<crate::lock_ordering::UnlockedState>().inner_ip_state().counters()
240    }
241}
242
243impl<BT: BindingsTypes, I: IpLayerIpExt, L> ResourceCounterContext<DeviceId<BT>, IpCounters<I>>
244    for CoreCtx<'_, BT, L>
245{
246    fn per_resource_counters<'a>(&'a self, device_id: &'a DeviceId<BT>) -> &'a IpCounters<I> {
247        for_any_device_id!(
248            DeviceId,
249            device_id,
250            id => self.per_resource_counters(id)
251        )
252    }
253}
254
255impl<BT: BindingsTypes, D: DeviceStateSpec, I: IpLayerIpExt, L>
256    ResourceCounterContext<BaseDeviceId<D, BT>, IpCounters<I>> for CoreCtx<'_, BT, L>
257{
258    fn per_resource_counters<'a>(
259        &'a self,
260        device_id: &'a BaseDeviceId<D, BT>,
261    ) -> &'a IpCounters<I> {
262        device_id
263            .device_state(
264                &self.unlocked_access::<crate::lock_ordering::UnlockedState>().device.origin,
265            )
266            .as_ref()
267            .ip_counters::<I>()
268    }
269}
270
271#[netstack3_macros::instantiate_ip_impl_block(I)]
272impl<I, BC, L> IpStateContext<I, BC> for CoreCtx<'_, BC, L>
273where
274    I: IpLayerIpExt,
275    BC: BindingsContext,
276    L: LockBefore<crate::lock_ordering::IpStateRulesTable<I>>,
277{
278    type IpRouteTablesCtx<'a> =
279        CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IpStateRulesTable<I>>>;
280
281    fn with_rules_table<
282        O,
283        F: FnOnce(&mut Self::IpRouteTablesCtx<'_>, &RulesTable<I, Self::DeviceId, BC>) -> O,
284    >(
285        &mut self,
286        cb: F,
287    ) -> O {
288        let (rules_table, mut restricted) =
289            self.read_lock_and::<crate::lock_ordering::IpStateRulesTable<I>>();
290        cb(&mut restricted, &rules_table)
291    }
292
293    fn with_rules_table_mut<
294        O,
295        F: FnOnce(&mut Self::IpRouteTablesCtx<'_>, &mut RulesTable<I, Self::DeviceId, BC>) -> O,
296    >(
297        &mut self,
298        cb: F,
299    ) -> O {
300        let (mut rules_table, mut restricted) =
301            self.write_lock_and::<crate::lock_ordering::IpStateRulesTable<I>>();
302        cb(&mut restricted, &mut rules_table)
303    }
304}
305
306#[netstack3_macros::instantiate_ip_impl_block(I)]
307impl<I, BC, L> IpRouteTablesContext<I, BC> for CoreCtx<'_, BC, L>
308where
309    I: IpLayerIpExt,
310    BC: BindingsContext,
311    L: LockBefore<crate::lock_ordering::IpStateRoutingTables<I>>,
312{
313    type Ctx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IpStateRoutingTables<I>>>;
314
315    fn main_table_id(&self) -> RoutingTableId<I, Self::DeviceId, BC> {
316        self.unlocked_access::<crate::lock_ordering::UnlockedState>()
317            .inner_ip_state()
318            .main_table_id()
319            .clone()
320    }
321
322    fn with_ip_routing_tables<
323        O,
324        F: FnOnce(
325            &mut Self::Ctx<'_>,
326            &HashMap<
327                RoutingTableId<I, Self::DeviceId, BC>,
328                Primary<BaseRoutingTableState<I, Self::DeviceId, BC>>,
329            >,
330        ) -> O,
331    >(
332        &mut self,
333        cb: F,
334    ) -> O {
335        let (table, mut ctx) = self.lock_and::<crate::lock_ordering::IpStateRoutingTables<I>>();
336        cb(&mut ctx, &table)
337    }
338
339    fn with_ip_routing_tables_mut<
340        O,
341        F: FnOnce(
342            &mut HashMap<
343                RoutingTableId<I, Self::DeviceId, BC>,
344                Primary<BaseRoutingTableState<I, Self::DeviceId, BC>>,
345            >,
346        ) -> O,
347    >(
348        &mut self,
349        cb: F,
350    ) -> O {
351        let mut tables = self.lock::<crate::lock_ordering::IpStateRoutingTables<I>>();
352        cb(&mut *tables)
353    }
354}
355
356#[netstack3_macros::instantiate_ip_impl_block(I)]
357impl<I, BC, L> IpRouteTableContext<I, BC> for CoreCtx<'_, BC, L>
358where
359    I: IpLayerIpExt,
360    BC: BindingsContext,
361    L: LockBefore<crate::lock_ordering::IpStateRoutingTable<I>>,
362{
363    type IpDeviceIdCtx<'a> =
364        CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IpStateRoutingTable<I>>>;
365
366    fn with_ip_routing_table<
367        O,
368        F: FnOnce(&mut Self::IpDeviceIdCtx<'_>, &RoutingTable<I, Self::DeviceId>) -> O,
369    >(
370        &mut self,
371        table_id: &RoutingTableId<I, Self::DeviceId, BC>,
372        cb: F,
373    ) -> O {
374        let mut table = self.adopt(table_id);
375        let (table, mut restricted) = table
376            .read_lock_with_and::<crate::lock_ordering::IpStateRoutingTable<I>, _>(|c| c.right());
377        let mut restricted = restricted.cast_core_ctx();
378        cb(&mut restricted, &table)
379    }
380
381    fn with_ip_routing_table_mut<
382        O,
383        F: FnOnce(&mut Self::IpDeviceIdCtx<'_>, &mut RoutingTable<I, Self::DeviceId>) -> O,
384    >(
385        &mut self,
386        table_id: &RoutingTableId<I, Self::DeviceId, BC>,
387        cb: F,
388    ) -> O {
389        let mut table = self.adopt(table_id);
390        let (mut table, mut restricted) = table
391            .write_lock_with_and::<crate::lock_ordering::IpStateRoutingTable<I>, _>(|c| c.right());
392        let mut restricted = restricted.cast_core_ctx();
393        cb(&mut restricted, &mut table)
394    }
395}
396
397pub enum EarlyDemuxSocket<I, D, BT>
398where
399    I: netstack3_datagram::IpExt,
400    D: WeakDeviceIdentifier,
401    BT: UdpBindingsTypes,
402{
403    UdpSocket(DualStackUdpSocketId<I, D, BT>),
404}
405
406impl<I, D, BT> EarlyDemuxSocket<I, D, BT>
407where
408    I: netstack3_datagram::IpExt,
409    D: WeakDeviceIdentifier,
410    BT: UdpBindingsTypes,
411{
412    fn into_udp(self) -> DualStackUdpSocketId<I, D, BT> {
413        match self {
414            EarlyDemuxSocket::UdpSocket(id) => id,
415        }
416    }
417}
418
419impl<I, D, BT, CC> SocketMetadata<CC> for EarlyDemuxSocket<I, D, BT>
420where
421    CC: netstack3_base::DeviceIdContext<netstack3_base::AnyDevice, WeakDeviceId = D>,
422    I: netstack3_datagram::IpExt,
423    D: WeakDeviceIdentifier,
424    BT: BindingsTypes,
425    DualStackUdpSocketId<I, D, BT>: SocketMetadata<CC>,
426{
427    fn socket_cookie(&self, core_ctx: &mut CC) -> SocketCookie {
428        match self {
429            Self::UdpSocket(s) => s.socket_cookie(core_ctx),
430        }
431    }
432
433    fn marks(&self, core_ctx: &mut CC) -> Marks {
434        match self {
435            Self::UdpSocket(s) => s.marks(core_ctx),
436        }
437    }
438}
439
440impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpAllSocketsSet<Ipv4>>>
441    IpTransportDispatchContext<Ipv4, BC> for CoreCtx<'_, BC, L>
442{
443    type EarlyDemuxSocket = EarlyDemuxSocket<Ipv4, Self::WeakDeviceId, BC>;
444
445    fn early_demux<B: ParseBuffer>(
446        &mut self,
447        device: &Self::DeviceId,
448        frame_dst: Option<FrameDestination>,
449        src_ip: Ipv4Addr,
450        dst_ip: Ipv4Addr,
451        proto: Ipv4Proto,
452        body: B,
453    ) -> Option<Self::EarlyDemuxSocket> {
454        // Only unicast packets are demuxed early.
455        // TODO(https://fxbug.com/476450053): Consider early demuxing multicast
456        // packets as well.
457        match frame_dst {
458            Some(FrameDestination::Individual { local: _ }) => (),
459            Some(FrameDestination::Broadcast) | Some(FrameDestination::Multicast) | None => {
460                return None;
461            }
462        };
463
464        match proto {
465            Ipv4Proto::Proto(IpProto::Udp) => {
466                <UdpIpTransportContext as IpTransportContext<Ipv4, _, _>>::early_demux(
467                    self, device, src_ip, dst_ip, body,
468                )
469                .map(EarlyDemuxSocket::UdpSocket)
470            }
471            Ipv4Proto::Proto(IpProto::Tcp) => {
472                // TODO(https://fxbug.dev/473819144) Implement early demux for TCP.
473                None
474            }
475            Ipv4Proto::Icmp
476            | Ipv4Proto::Igmp
477            | Ipv4Proto::Proto(IpProto::Reserved)
478            | Ipv4Proto::Other(_) => None,
479        }
480    }
481
482    fn dispatch_receive_ip_packet<B: BufferMut, H: IpHeaderInfo<Ipv4>>(
483        &mut self,
484        bindings_ctx: &mut BC,
485        device: &Self::DeviceId,
486        src_ip: Ipv4SourceAddr,
487        dst_ip: SpecifiedAddr<Ipv4Addr>,
488        proto: Ipv4Proto,
489        body: B,
490        info: &LocalDeliveryPacketInfo<Ipv4, H>,
491        early_demux_socket: Option<Self::EarlyDemuxSocket>,
492    ) -> Result<(), TransportReceiveError> {
493        match proto {
494            Ipv4Proto::Icmp => {
495                <IcmpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_ip_packet(
496                    self,
497                    bindings_ctx,
498                    device,
499                    src_ip,
500                    dst_ip,
501                    body,
502                    info,
503                    None,
504                )
505                .map_err(|(_body, err)| err)
506            }
507            Ipv4Proto::Igmp => {
508                device::receive_igmp_packet(self, bindings_ctx, device, src_ip, dst_ip, body, info);
509                Ok(())
510            }
511            Ipv4Proto::Proto(IpProto::Udp) => {
512                let early_demux_socket = early_demux_socket.map(EarlyDemuxSocket::into_udp);
513
514                <UdpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_ip_packet(
515                    self,
516                    bindings_ctx,
517                    device,
518                    src_ip,
519                    dst_ip,
520                    body,
521                    info,
522                    early_demux_socket,
523                )
524                .map_err(|(_body, err)| err)
525            }
526            Ipv4Proto::Proto(IpProto::Tcp) => {
527                <TcpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_ip_packet(
528                    self,
529                    bindings_ctx,
530                    device,
531                    src_ip,
532                    dst_ip,
533                    body,
534                    info,
535                    None,
536                )
537                .map_err(|(_body, err)| err)
538            }
539            Ipv4Proto::Proto(IpProto::Reserved) | Ipv4Proto::Other(_) => {
540                Err(TransportReceiveError::ProtocolUnsupported)
541            }
542        }
543    }
544}
545
546impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpAllSocketsSet<Ipv6>>>
547    IpTransportDispatchContext<Ipv6, BC> for CoreCtx<'_, BC, L>
548{
549    type EarlyDemuxSocket = EarlyDemuxSocket<Ipv6, Self::WeakDeviceId, BC>;
550
551    fn early_demux<B: ParseBuffer>(
552        &mut self,
553        device: &Self::DeviceId,
554        frame_dst: Option<FrameDestination>,
555        src_ip: Ipv6Addr,
556        dst_ip: Ipv6Addr,
557        proto: Ipv6Proto,
558        body: B,
559    ) -> Option<Self::EarlyDemuxSocket> {
560        // Only unicast packets are demuxed early.
561        // TODO(https://fxbug.com/476450053): Consider early demuxing multicast
562        // packets as well.
563        match frame_dst {
564            Some(FrameDestination::Individual { local: _ }) => (),
565            Some(FrameDestination::Broadcast) | Some(FrameDestination::Multicast) | None => {
566                return None;
567            }
568        };
569
570        match proto {
571            Ipv6Proto::Proto(IpProto::Udp) => {
572                <UdpIpTransportContext as IpTransportContext<Ipv6, _, _>>::early_demux(
573                    self, device, src_ip, dst_ip, body,
574                )
575                .map(EarlyDemuxSocket::UdpSocket)
576            }
577            Ipv6Proto::Proto(IpProto::Tcp) => {
578                // TODO(https://fxbug.dev/473819144) Implement early demux for TCP.
579                None
580            }
581            Ipv6Proto::Icmpv6
582            | Ipv6Proto::NoNextHeader
583            | Ipv6Proto::Proto(IpProto::Reserved)
584            | Ipv6Proto::Other(_) => None,
585        }
586    }
587
588    fn dispatch_receive_ip_packet<B: BufferMut, H: IpHeaderInfo<Ipv6>>(
589        &mut self,
590        bindings_ctx: &mut BC,
591        device: &Self::DeviceId,
592        src_ip: Ipv6SourceAddr,
593        dst_ip: SpecifiedAddr<Ipv6Addr>,
594        proto: Ipv6Proto,
595        body: B,
596        info: &LocalDeliveryPacketInfo<Ipv6, H>,
597        early_demux_socket: Option<Self::EarlyDemuxSocket>,
598    ) -> Result<(), TransportReceiveError> {
599        match proto {
600            Ipv6Proto::Icmpv6 => {
601                <IcmpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_ip_packet(
602                    self,
603                    bindings_ctx,
604                    device,
605                    src_ip,
606                    dst_ip,
607                    body,
608                    info,
609                    None,
610                )
611                .map_err(|(_body, err)| err)
612            }
613            // A value of `Ipv6Proto::NoNextHeader` tells us that there is no
614            // header whatsoever following the last lower-level header so we stop
615            // processing here.
616            Ipv6Proto::NoNextHeader => Ok(()),
617            Ipv6Proto::Proto(IpProto::Tcp) => {
618                <TcpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_ip_packet(
619                    self,
620                    bindings_ctx,
621                    device,
622                    src_ip,
623                    dst_ip,
624                    body,
625                    info,
626                    None,
627                )
628                .map_err(|(_body, err)| err)
629            }
630            Ipv6Proto::Proto(IpProto::Udp) => {
631                let early_demux_socket = early_demux_socket.map(EarlyDemuxSocket::into_udp);
632
633                <UdpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_ip_packet(
634                    self,
635                    bindings_ctx,
636                    device,
637                    src_ip,
638                    dst_ip,
639                    body,
640                    info,
641                    early_demux_socket,
642                )
643                .map_err(|(_body, err)| err)
644            }
645            Ipv6Proto::Proto(IpProto::Reserved) | Ipv6Proto::Other(_) => {
646                Err(TransportReceiveError::ProtocolUnsupported)
647            }
648        }
649    }
650}
651
652impl<
653    BC: BindingsContext,
654    L: LockBefore<crate::lock_ordering::IcmpBoundMap<Ipv4>>
655        + LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv4>>
656        + LockBefore<crate::lock_ordering::UdpAllSocketsSet<Ipv4>>,
657> InnerIcmpContext<Ipv4, BC> for CoreCtx<'_, BC, L>
658{
659    type EchoTransportContext = IcmpEchoIpTransportContext;
660
661    fn receive_icmp_error(
662        &mut self,
663        bindings_ctx: &mut BC,
664        device: &DeviceId<BC>,
665        original_src_ip: Option<SpecifiedAddr<Ipv4Addr>>,
666        original_dst_ip: SpecifiedAddr<Ipv4Addr>,
667        original_proto: Ipv4Proto,
668        original_body: &[u8],
669        err: Icmpv4ErrorCode,
670    ) {
671        self.increment_both(device, |c: &IpCounters<Ipv4>| &c.receive_icmp_error);
672        trace!("InnerIcmpContext<Ipv4>::receive_icmp_error({:?})", err);
673
674        match original_proto {
675            Ipv4Proto::Icmp => {
676                <IcmpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
677                    self,
678                    bindings_ctx,
679                    device,
680                    original_src_ip,
681                    original_dst_ip,
682                    original_body,
683                    err,
684                )
685            }
686            Ipv4Proto::Proto(IpProto::Tcp) => {
687                <TcpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
688                    self,
689                    bindings_ctx,
690                    device,
691                    original_src_ip,
692                    original_dst_ip,
693                    original_body,
694                    err,
695                )
696            }
697            Ipv4Proto::Proto(IpProto::Udp) => {
698                <UdpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
699                    self,
700                    bindings_ctx,
701                    device,
702                    original_src_ip,
703                    original_dst_ip,
704                    original_body,
705                    err,
706                )
707            }
708            // TODO(joshlf): Once all IP protocol numbers are covered,
709            // remove this default case.
710            _ => <() as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
711                self,
712                bindings_ctx,
713                device,
714                original_src_ip,
715                original_dst_ip,
716                original_body,
717                err,
718            ),
719        }
720    }
721
722    fn with_error_send_bucket_mut<O, F: FnOnce(&mut TokenBucket<BC::Instant>) -> O>(
723        &mut self,
724        cb: F,
725    ) -> O {
726        cb(&mut self.lock::<crate::lock_ordering::IcmpTokenBucket<Ipv4>>())
727    }
728}
729
730impl<
731    BC: BindingsContext,
732    L: LockBefore<crate::lock_ordering::IcmpBoundMap<Ipv6>>
733        + LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv6>>
734        + LockBefore<crate::lock_ordering::UdpAllSocketsSet<Ipv6>>,
735> InnerIcmpContext<Ipv6, BC> for CoreCtx<'_, BC, L>
736{
737    type EchoTransportContext = IcmpEchoIpTransportContext;
738
739    fn receive_icmp_error(
740        &mut self,
741        bindings_ctx: &mut BC,
742        device: &DeviceId<BC>,
743        original_src_ip: Option<SpecifiedAddr<Ipv6Addr>>,
744        original_dst_ip: SpecifiedAddr<Ipv6Addr>,
745        original_next_header: Ipv6Proto,
746        original_body: &[u8],
747        err: Icmpv6ErrorCode,
748    ) {
749        self.increment_both(device, |c: &IpCounters<Ipv6>| &c.receive_icmp_error);
750        trace!("InnerIcmpContext<Ipv6>::receive_icmp_error({:?})", err);
751
752        match original_next_header {
753            Ipv6Proto::Icmpv6 => {
754                <IcmpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
755                    self,
756                    bindings_ctx,
757                    device,
758                    original_src_ip,
759                    original_dst_ip,
760                    original_body,
761                    err,
762                )
763            }
764            Ipv6Proto::Proto(IpProto::Tcp) => {
765                <TcpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
766                    self,
767                    bindings_ctx,
768                    device,
769                    original_src_ip,
770                    original_dst_ip,
771                    original_body,
772                    err,
773                )
774            }
775            Ipv6Proto::Proto(IpProto::Udp) => {
776                <UdpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
777                    self,
778                    bindings_ctx,
779                    device,
780                    original_src_ip,
781                    original_dst_ip,
782                    original_body,
783                    err,
784                )
785            }
786            // TODO(joshlf): Once all IP protocol numbers are covered,
787            // remove this default case.
788            _ => <() as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
789                self,
790                bindings_ctx,
791                device,
792                original_src_ip,
793                original_dst_ip,
794                original_body,
795                err,
796            ),
797        }
798    }
799
800    fn with_error_send_bucket_mut<O, F: FnOnce(&mut TokenBucket<BC::Instant>) -> O>(
801        &mut self,
802        cb: F,
803    ) -> O {
804        cb(&mut self.lock::<crate::lock_ordering::IcmpTokenBucket<Ipv6>>())
805    }
806}
807
808impl<L, BC: BindingsContext> icmp::IcmpStateContext for CoreCtx<'_, BC, L> {}
809
810impl<BT: BindingsTypes, L> IcmpEchoContextMarker for CoreCtx<'_, BT, L> {}
811
812#[netstack3_macros::instantiate_ip_impl_block(I)]
813impl<I, BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpAllSocketsSet<I>>>
814    IcmpEchoStateContext<I, BC> for CoreCtx<'_, BC, L>
815{
816    type SocketStateCtx<'a> =
817        CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IcmpSocketState<I>>>;
818
819    fn with_all_sockets_mut<O, F: FnOnce(&mut IcmpSocketSet<I, Self::WeakDeviceId, BC>) -> O>(
820        &mut self,
821        cb: F,
822    ) -> O {
823        cb(&mut self.write_lock::<crate::lock_ordering::IcmpAllSocketsSet<I>>())
824    }
825
826    fn with_all_sockets<O, F: FnOnce(&IcmpSocketSet<I, Self::WeakDeviceId, BC>) -> O>(
827        &mut self,
828        cb: F,
829    ) -> O {
830        cb(&self.read_lock::<crate::lock_ordering::IcmpAllSocketsSet<I>>())
831    }
832
833    fn with_socket_state<
834        O,
835        F: FnOnce(&mut Self::SocketStateCtx<'_>, &IcmpSocketState<I, Self::WeakDeviceId, BC>) -> O,
836    >(
837        &mut self,
838        id: &IcmpSocketId<I, Self::WeakDeviceId, BC>,
839        cb: F,
840    ) -> O {
841        let mut locked = self.adopt(id);
842        let (socket_state, mut restricted) =
843            locked.read_lock_with_and::<crate::lock_ordering::IcmpSocketState<I>, _>(|c| c.right());
844        let mut restricted = restricted.cast_core_ctx();
845        cb(&mut restricted, &socket_state)
846    }
847
848    fn with_socket_state_mut<
849        O,
850        F: FnOnce(&mut Self::SocketStateCtx<'_>, &mut IcmpSocketState<I, Self::WeakDeviceId, BC>) -> O,
851    >(
852        &mut self,
853        id: &IcmpSocketId<I, Self::WeakDeviceId, BC>,
854        cb: F,
855    ) -> O {
856        let mut locked = self.adopt(id);
857        let (mut socket_state, mut restricted) = locked
858            .write_lock_with_and::<crate::lock_ordering::IcmpSocketState<I>, _>(|c| c.right());
859        let mut restricted = restricted.cast_core_ctx();
860        cb(&mut restricted, &mut socket_state)
861    }
862
863    fn with_bound_state_context<O, F: FnOnce(&mut Self::SocketStateCtx<'_>) -> O>(
864        &mut self,
865        cb: F,
866    ) -> O {
867        cb(&mut self.cast_locked::<crate::lock_ordering::IcmpSocketState<I>>())
868    }
869
870    fn for_each_socket<
871        F: FnMut(
872            &mut Self::SocketStateCtx<'_>,
873            &IcmpSocketId<I, Self::WeakDeviceId, BC>,
874            &IcmpSocketState<I, Self::WeakDeviceId, BC>,
875        ),
876    >(
877        &mut self,
878        mut cb: F,
879    ) {
880        let (all_sockets, mut locked) =
881            self.read_lock_and::<crate::lock_ordering::IcmpAllSocketsSet<I>>();
882        all_sockets.keys().for_each(|id| {
883            let id = IcmpSocketId::from(id.clone());
884            let mut locked = locked.adopt(&id);
885            let (socket_state, mut restricted) = locked
886                .read_lock_with_and::<crate::lock_ordering::IcmpSocketState<I>, _>(|c| c.right());
887            let mut restricted = restricted.cast_core_ctx();
888            cb(&mut restricted, &id, &socket_state);
889        });
890    }
891}
892
893#[netstack3_macros::instantiate_ip_impl_block(I)]
894impl<I, BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpBoundMap<I>>>
895    IcmpEchoBoundStateContext<I, BC> for CoreCtx<'_, BC, L>
896{
897    type IpSocketsCtx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IcmpBoundMap<I>>>;
898    fn with_icmp_ctx_and_sockets_mut<
899        O,
900        F: FnOnce(
901            &mut Self::IpSocketsCtx<'_>,
902            &mut icmp_echo::BoundSockets<I, Self::WeakDeviceId, BC>,
903        ) -> O,
904    >(
905        &mut self,
906        cb: F,
907    ) -> O {
908        let (mut sockets, mut core_ctx) =
909            self.write_lock_and::<crate::lock_ordering::IcmpBoundMap<I>>();
910        cb(&mut core_ctx, &mut sockets)
911    }
912}
913
914impl<I: IpLayerIpExt, BT: BindingsTypes> DelegatedOrderedLockAccess<IpPacketFragmentCache<I, BT>>
915    for StackState<BT>
916{
917    type Inner = IpStateInner<I, DeviceId<BT>, BT>;
918    fn delegate_ordered_lock_access(&self) -> &Self::Inner {
919        self.inner_ip_state()
920    }
921}
922
923impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
924    for crate::lock_ordering::IpStateFragmentCache<I>
925{
926    type Data = IpPacketFragmentCache<I, BT>;
927}
928
929impl<I: IpLayerIpExt, BT: BindingsTypes> DelegatedOrderedLockAccess<PmtuCache<I, BT>>
930    for StackState<BT>
931{
932    type Inner = IpStateInner<I, DeviceId<BT>, BT>;
933    fn delegate_ordered_lock_access(&self) -> &Self::Inner {
934        self.inner_ip_state()
935    }
936}
937
938impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
939    for crate::lock_ordering::IpStateRulesTable<I>
940{
941    type Data = RulesTable<I, DeviceId<BT>, BT>;
942}
943
944impl<I: IpLayerIpExt, BT: BindingsTypes> DelegatedOrderedLockAccess<RulesTable<I, DeviceId<BT>, BT>>
945    for StackState<BT>
946{
947    type Inner = IpStateInner<I, DeviceId<BT>, BT>;
948    fn delegate_ordered_lock_access(&self) -> &Self::Inner {
949        self.inner_ip_state()
950    }
951}
952
953impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
954    for crate::lock_ordering::IpStatePmtuCache<I>
955{
956    type Data = PmtuCache<I, BT>;
957}
958
959impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
960    for crate::lock_ordering::IpStateRoutingTables<I>
961{
962    type Data = HashMap<
963        RoutingTableId<I, DeviceId<BT>, BT>,
964        Primary<BaseRoutingTableState<I, DeviceId<BT>, BT>>,
965    >;
966}
967
968impl<I: IpLayerIpExt, BT: BindingsTypes>
969    DelegatedOrderedLockAccess<
970        HashMap<
971            RoutingTableId<I, DeviceId<BT>, BT>,
972            Primary<BaseRoutingTableState<I, DeviceId<BT>, BT>>,
973        >,
974    > for StackState<BT>
975{
976    type Inner = IpStateInner<I, DeviceId<BT>, BT>;
977    fn delegate_ordered_lock_access(&self) -> &Self::Inner {
978        self.inner_ip_state()
979    }
980}
981
982impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<RoutingTableId<I, DeviceId<BT>, BT>>
983    for crate::lock_ordering::IpStateRoutingTable<I>
984{
985    type Data = RoutingTable<I, DeviceId<BT>>;
986}
987
988impl<I: IpLayerIpExt, BT: BindingsTypes>
989    DelegatedOrderedLockAccess<MulticastForwardingState<I, DeviceId<BT>, BT>> for StackState<BT>
990{
991    type Inner = IpStateInner<I, DeviceId<BT>, BT>;
992    fn delegate_ordered_lock_access(&self) -> &Self::Inner {
993        self.inner_ip_state()
994    }
995}
996
997impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
998    for crate::lock_ordering::IpMulticastForwardingState<I>
999{
1000    type Data = MulticastForwardingState<I, DeviceId<BT>, BT>;
1001}
1002
1003impl<I: IpLayerIpExt, BT: BindingsTypes>
1004    DelegatedOrderedLockAccess<RawIpSocketMap<I, WeakDeviceId<BT>, BT>> for StackState<BT>
1005{
1006    type Inner = IpStateInner<I, DeviceId<BT>, BT>;
1007    fn delegate_ordered_lock_access(&self) -> &Self::Inner {
1008        self.inner_ip_state()
1009    }
1010}
1011
1012impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
1013    for crate::lock_ordering::AllRawIpSockets<I>
1014{
1015    type Data = RawIpSocketMap<I, WeakDeviceId<BT>, BT>;
1016}
1017
1018impl<I: datagram::DualStackIpExt, BT: BindingsTypes>
1019    DelegatedOrderedLockAccess<icmp_echo::BoundSockets<I, WeakDeviceId<BT>, BT>>
1020    for StackState<BT>
1021{
1022    type Inner = IcmpSockets<I, WeakDeviceId<BT>, BT>;
1023    fn delegate_ordered_lock_access(&self) -> &Self::Inner {
1024        &self.transport.icmp_echo_state()
1025    }
1026}
1027
1028impl<I: datagram::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
1029    for crate::lock_ordering::IcmpBoundMap<I>
1030{
1031    type Data = icmp_echo::BoundSockets<I, WeakDeviceId<BT>, BT>;
1032}
1033
1034impl<I: datagram::DualStackIpExt, BT: BindingsTypes>
1035    DelegatedOrderedLockAccess<IcmpSocketSet<I, WeakDeviceId<BT>, BT>> for StackState<BT>
1036{
1037    type Inner = IcmpSockets<I, WeakDeviceId<BT>, BT>;
1038    fn delegate_ordered_lock_access(&self) -> &Self::Inner {
1039        &self.transport.icmp_echo_state()
1040    }
1041}
1042
1043impl<I: datagram::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
1044    for crate::lock_ordering::IcmpAllSocketsSet<I>
1045{
1046    type Data = IcmpSocketSet<I, WeakDeviceId<BT>, BT>;
1047}
1048
1049impl<I: datagram::DualStackIpExt, BT: BindingsTypes>
1050    DelegatedOrderedLockAccess<IpMarked<I, TokenBucket<BT::Instant>>> for StackState<BT>
1051{
1052    type Inner = IcmpState<I, BT>;
1053    fn delegate_ordered_lock_access(&self) -> &Self::Inner {
1054        self.inner_icmp_state()
1055    }
1056}
1057
1058impl<I: datagram::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
1059    for crate::lock_ordering::IcmpTokenBucket<I>
1060{
1061    type Data = IpMarked<I, TokenBucket<BT::Instant>>;
1062}
1063
1064impl<I: datagram::DualStackIpExt, D: WeakDeviceIdentifier, BT: BindingsTypes>
1065    LockLevelFor<IcmpSocketId<I, D, BT>> for crate::lock_ordering::IcmpSocketState<I>
1066{
1067    type Data = IcmpSocketState<I, D, BT>;
1068}