1use lock_order::lock::{DelegatedOrderedLockAccess, LockLevelFor};
8use lock_order::relation::LockBefore;
9use log::trace;
10use net_types::ip::{Ip, IpMarked, Ipv4, Ipv4Addr, Ipv4SourceAddr, Ipv6, Ipv6Addr, Ipv6SourceAddr};
11use net_types::{MulticastAddr, SpecifiedAddr};
12use netstack3_base::socket::{SocketCookie, SocketIpAddr};
13use netstack3_base::{
14 CounterContext, FrameDestination, Icmpv4ErrorCode, Icmpv6ErrorCode, Marks,
15 ResourceCounterContext, TokenBucket, WeakDeviceIdentifier,
16};
17use netstack3_datagram::{self as datagram};
18use netstack3_device::{BaseDeviceId, DeviceId, DeviceStateSpec, WeakDeviceId, for_any_device_id};
19use netstack3_hashmap::HashMap;
20use netstack3_icmp_echo::{
21 self as icmp_echo, IcmpEchoBoundStateContext, IcmpEchoContextMarker,
22 IcmpEchoIpTransportContext, IcmpEchoStateContext, IcmpSocketId, IcmpSocketSet, IcmpSocketState,
23 IcmpSockets,
24};
25use netstack3_ip::device::{self, IpDeviceBindingsContext, IpDeviceIpExt};
26use netstack3_ip::gmp::{IgmpCounters, MldCounters};
27use netstack3_ip::icmp::{
28 self, IcmpIpTransportContext, IcmpRxCounters, IcmpState, IcmpTxCounters, Icmpv4Error,
29 Icmpv6Error, InnerIcmpContext, InnerIcmpv4Context, NdpCounters,
30};
31use netstack3_ip::multicast_forwarding::MulticastForwardingState;
32use netstack3_ip::raw::RawIpSocketMap;
33use netstack3_ip::{
34 self as ip, BaseRoutingTableState, FragmentContext, IpCounters, IpDeviceContext, IpHeaderInfo,
35 IpLayerBindingsContext, IpLayerIpExt, IpPacketFragmentCache, IpRouteTableContext,
36 IpRouteTablesContext, IpStateContext, IpStateInner, IpTransportContext,
37 IpTransportDispatchContext, LocalDeliveryPacketInfo, MulticastMembershipHandler, PmtuCache,
38 PmtuContext, ResolveRouteError, ResolvedRoute, RoutingTable, RoutingTableId, RulesTable,
39 SocketMetadata,
40};
41use netstack3_sync::rc::Primary;
42use netstack3_tcp::{DualStackTcpSocketId, TcpBindingsTypes, TcpIpTransportContext};
43use netstack3_udp::{DualStackUdpSocketId, UdpBindingsTypes, UdpIpTransportContext};
44use packet::{BufferMut, ParseBuffer};
45use packet_formats::ip::{IpProto, Ipv4Proto, Ipv6Proto};
46
47use crate::context::WrapLockLevel;
48use crate::context::prelude::*;
49use crate::{BindingsContext, BindingsTypes, CoreCtx, StackState};
50
51impl<I, BT, L> FragmentContext<I, BT> for CoreCtx<'_, BT, L>
52where
53 I: IpLayerIpExt,
54 BT: BindingsTypes,
55 L: LockBefore<crate::lock_ordering::IpStateFragmentCache<I>>,
56{
57 fn with_state_mut<O, F: FnOnce(&mut IpPacketFragmentCache<I, BT>) -> O>(&mut self, cb: F) -> O {
58 let mut cache = self.lock::<crate::lock_ordering::IpStateFragmentCache<I>>();
59 cb(&mut cache)
60 }
61}
62
63impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IpStatePmtuCache<Ipv4>>>
64 PmtuContext<Ipv4, BC> for CoreCtx<'_, BC, L>
65{
66 fn with_state_mut<O, F: FnOnce(&mut PmtuCache<Ipv4, BC>) -> O>(&mut self, cb: F) -> O {
67 let mut cache = self.lock::<crate::lock_ordering::IpStatePmtuCache<Ipv4>>();
68 cb(&mut cache)
69 }
70}
71
72impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IpStatePmtuCache<Ipv6>>>
73 PmtuContext<Ipv6, BC> for CoreCtx<'_, BC, L>
74{
75 fn with_state_mut<O, F: FnOnce(&mut PmtuCache<Ipv6, BC>) -> O>(&mut self, cb: F) -> O {
76 let mut cache = self.lock::<crate::lock_ordering::IpStatePmtuCache<Ipv6>>();
77 cb(&mut cache)
78 }
79}
80
81impl<
82 I: Ip + IpDeviceIpExt + IpLayerIpExt,
83 BC: BindingsContext
84 + IpDeviceBindingsContext<I, Self::DeviceId>
85 + IpLayerBindingsContext<I, Self::DeviceId>,
86 L: LockBefore<crate::lock_ordering::IpState<I>>,
87> MulticastMembershipHandler<I, BC> for CoreCtx<'_, BC, L>
88where
89 Self: device::IpDeviceConfigurationContext<I, BC> + IpStateContext<I, BC> + IpDeviceContext<I>,
90{
91 fn join_multicast_group(
92 &mut self,
93 bindings_ctx: &mut BC,
94 device: &Self::DeviceId,
95 addr: MulticastAddr<I::Addr>,
96 ) {
97 ip::device::join_ip_multicast::<I, _, _>(self, bindings_ctx, device, addr)
98 }
99
100 fn leave_multicast_group(
101 &mut self,
102 bindings_ctx: &mut BC,
103 device: &Self::DeviceId,
104 addr: MulticastAddr<I::Addr>,
105 ) {
106 ip::device::leave_ip_multicast::<I, _, _>(self, bindings_ctx, device, addr)
107 }
108
109 fn select_device_for_multicast_group(
110 &mut self,
111 addr: MulticastAddr<I::Addr>,
112 marks: &Marks,
113 ) -> Result<Self::DeviceId, ResolveRouteError> {
114 let remote_ip = SocketIpAddr::new_from_multicast(addr);
115 let ResolvedRoute {
116 src_addr: _,
117 device,
118 local_delivery_device,
119 next_hop: _,
120 internal_forwarding: _,
121 } = ip::resolve_output_route_to_destination(self, None, None, Some(remote_ip), marks)?;
122 debug_assert!(local_delivery_device.is_none(), "{:?}", local_delivery_device);
125 Ok(device)
126 }
127}
128
129impl<BT: BindingsTypes, I: datagram::DualStackIpExt, L> CounterContext<IcmpTxCounters<I>>
130 for CoreCtx<'_, BT, L>
131{
132 fn counters(&self) -> &IcmpTxCounters<I> {
133 &self
134 .unlocked_access::<crate::lock_ordering::UnlockedState>()
135 .inner_icmp_state::<I>()
136 .tx_counters
137 }
138}
139
140impl<BT: BindingsTypes, I: datagram::DualStackIpExt, L> CounterContext<IcmpRxCounters<I>>
141 for CoreCtx<'_, BT, L>
142{
143 fn counters(&self) -> &IcmpRxCounters<I> {
144 &self
145 .unlocked_access::<crate::lock_ordering::UnlockedState>()
146 .inner_icmp_state::<I>()
147 .rx_counters
148 }
149}
150
151impl<BT: BindingsTypes, L> CounterContext<IgmpCounters> for CoreCtx<'_, BT, L> {
152 fn counters(&self) -> &IgmpCounters {
153 &self
154 .unlocked_access::<crate::lock_ordering::UnlockedState>()
155 .inner_ip_state::<Ipv4>()
156 .igmp_counters()
157 }
158}
159
160impl<BT: BindingsTypes, L> ResourceCounterContext<DeviceId<BT>, IgmpCounters>
161 for CoreCtx<'_, BT, L>
162{
163 fn per_resource_counters<'a>(&'a self, device_id: &'a DeviceId<BT>) -> &'a IgmpCounters {
164 for_any_device_id!(
165 DeviceId,
166 device_id,
167 id => self.per_resource_counters(id)
168 )
169 }
170}
171
172impl<BT: BindingsTypes, D: DeviceStateSpec, L>
173 ResourceCounterContext<BaseDeviceId<D, BT>, IgmpCounters> for CoreCtx<'_, BT, L>
174{
175 fn per_resource_counters<'a>(&'a self, device_id: &'a BaseDeviceId<D, BT>) -> &'a IgmpCounters {
176 device_id
177 .device_state(
178 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().device.origin,
179 )
180 .as_ref()
181 .igmp_counters()
182 }
183}
184
185impl<BT: BindingsTypes, L> CounterContext<MldCounters> for CoreCtx<'_, BT, L> {
186 fn counters(&self) -> &MldCounters {
187 &self
188 .unlocked_access::<crate::lock_ordering::UnlockedState>()
189 .inner_ip_state::<Ipv4>()
190 .mld_counters()
191 }
192}
193
194impl<BT: BindingsTypes, L> ResourceCounterContext<DeviceId<BT>, MldCounters>
195 for CoreCtx<'_, BT, L>
196{
197 fn per_resource_counters<'a>(&'a self, device_id: &'a DeviceId<BT>) -> &'a MldCounters {
198 for_any_device_id!(
199 DeviceId,
200 device_id,
201 id => self.per_resource_counters(id)
202 )
203 }
204}
205
206impl<BT: BindingsTypes, D: DeviceStateSpec, L>
207 ResourceCounterContext<BaseDeviceId<D, BT>, MldCounters> for CoreCtx<'_, BT, L>
208{
209 fn per_resource_counters<'a>(&'a self, device_id: &'a BaseDeviceId<D, BT>) -> &'a MldCounters {
210 device_id
211 .device_state(
212 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().device.origin,
213 )
214 .as_ref()
215 .mld_counters()
216 }
217}
218
219impl<BT: BindingsTypes, L> CounterContext<NdpCounters> for CoreCtx<'_, BT, L> {
220 fn counters(&self) -> &NdpCounters {
221 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().ipv6.icmp.ndp_counters
222 }
223}
224
225impl<
226 BC: BindingsContext,
227 L: LockBefore<crate::lock_ordering::IcmpBoundMap<Ipv4>>
228 + LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv4>>
229 + LockBefore<crate::lock_ordering::UdpAllSocketsSet<Ipv4>>,
230> InnerIcmpv4Context<BC> for CoreCtx<'_, BC, L>
231{
232 fn should_send_timestamp_reply(&self) -> bool {
233 self.unlocked_access::<crate::lock_ordering::UnlockedState>().ipv4.icmp.send_timestamp_reply
234 }
235}
236
237impl<BT: BindingsTypes, I: IpLayerIpExt, L> CounterContext<IpCounters<I>> for CoreCtx<'_, BT, L> {
238 fn counters(&self) -> &IpCounters<I> {
239 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().inner_ip_state().counters()
240 }
241}
242
243impl<BT: BindingsTypes, I: IpLayerIpExt, L> ResourceCounterContext<DeviceId<BT>, IpCounters<I>>
244 for CoreCtx<'_, BT, L>
245{
246 fn per_resource_counters<'a>(&'a self, device_id: &'a DeviceId<BT>) -> &'a IpCounters<I> {
247 for_any_device_id!(
248 DeviceId,
249 device_id,
250 id => self.per_resource_counters(id)
251 )
252 }
253}
254
255impl<BT: BindingsTypes, D: DeviceStateSpec, I: IpLayerIpExt, L>
256 ResourceCounterContext<BaseDeviceId<D, BT>, IpCounters<I>> for CoreCtx<'_, BT, L>
257{
258 fn per_resource_counters<'a>(
259 &'a self,
260 device_id: &'a BaseDeviceId<D, BT>,
261 ) -> &'a IpCounters<I> {
262 device_id
263 .device_state(
264 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().device.origin,
265 )
266 .as_ref()
267 .ip_counters::<I>()
268 }
269}
270
271#[netstack3_macros::instantiate_ip_impl_block(I)]
272impl<I, BC, L> IpStateContext<I, BC> for CoreCtx<'_, BC, L>
273where
274 I: IpLayerIpExt,
275 BC: BindingsContext,
276 L: LockBefore<crate::lock_ordering::IpStateRulesTable<I>>,
277{
278 type IpRouteTablesCtx<'a> =
279 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IpStateRulesTable<I>>>;
280
281 fn with_rules_table<
282 O,
283 F: FnOnce(&mut Self::IpRouteTablesCtx<'_>, &RulesTable<I, Self::DeviceId, BC>) -> O,
284 >(
285 &mut self,
286 cb: F,
287 ) -> O {
288 let (rules_table, mut restricted) =
289 self.read_lock_and::<crate::lock_ordering::IpStateRulesTable<I>>();
290 cb(&mut restricted, &rules_table)
291 }
292
293 fn with_rules_table_mut<
294 O,
295 F: FnOnce(&mut Self::IpRouteTablesCtx<'_>, &mut RulesTable<I, Self::DeviceId, BC>) -> O,
296 >(
297 &mut self,
298 cb: F,
299 ) -> O {
300 let (mut rules_table, mut restricted) =
301 self.write_lock_and::<crate::lock_ordering::IpStateRulesTable<I>>();
302 cb(&mut restricted, &mut rules_table)
303 }
304}
305
306#[netstack3_macros::instantiate_ip_impl_block(I)]
307impl<I, BC, L> IpRouteTablesContext<I, BC> for CoreCtx<'_, BC, L>
308where
309 I: IpLayerIpExt,
310 BC: BindingsContext,
311 L: LockBefore<crate::lock_ordering::IpStateRoutingTables<I>>,
312{
313 type Ctx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IpStateRoutingTables<I>>>;
314
315 fn main_table_id(&self) -> RoutingTableId<I, Self::DeviceId, BC> {
316 self.unlocked_access::<crate::lock_ordering::UnlockedState>()
317 .inner_ip_state()
318 .main_table_id()
319 .clone()
320 }
321
322 fn with_ip_routing_tables<
323 O,
324 F: FnOnce(
325 &mut Self::Ctx<'_>,
326 &HashMap<
327 RoutingTableId<I, Self::DeviceId, BC>,
328 Primary<BaseRoutingTableState<I, Self::DeviceId, BC>>,
329 >,
330 ) -> O,
331 >(
332 &mut self,
333 cb: F,
334 ) -> O {
335 let (table, mut ctx) = self.lock_and::<crate::lock_ordering::IpStateRoutingTables<I>>();
336 cb(&mut ctx, &table)
337 }
338
339 fn with_ip_routing_tables_mut<
340 O,
341 F: FnOnce(
342 &mut HashMap<
343 RoutingTableId<I, Self::DeviceId, BC>,
344 Primary<BaseRoutingTableState<I, Self::DeviceId, BC>>,
345 >,
346 ) -> O,
347 >(
348 &mut self,
349 cb: F,
350 ) -> O {
351 let mut tables = self.lock::<crate::lock_ordering::IpStateRoutingTables<I>>();
352 cb(&mut *tables)
353 }
354}
355
356#[netstack3_macros::instantiate_ip_impl_block(I)]
357impl<I, BC, L> IpRouteTableContext<I, BC> for CoreCtx<'_, BC, L>
358where
359 I: IpLayerIpExt,
360 BC: BindingsContext,
361 L: LockBefore<crate::lock_ordering::IpStateRoutingTable<I>>,
362{
363 type IpDeviceIdCtx<'a> =
364 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IpStateRoutingTable<I>>>;
365
366 fn with_ip_routing_table<
367 O,
368 F: FnOnce(&mut Self::IpDeviceIdCtx<'_>, &RoutingTable<I, Self::DeviceId>) -> O,
369 >(
370 &mut self,
371 table_id: &RoutingTableId<I, Self::DeviceId, BC>,
372 cb: F,
373 ) -> O {
374 let mut table = self.adopt(table_id);
375 let (table, mut restricted) = table
376 .read_lock_with_and::<crate::lock_ordering::IpStateRoutingTable<I>, _>(|c| c.right());
377 let mut restricted = restricted.cast_core_ctx();
378 cb(&mut restricted, &table)
379 }
380
381 fn with_ip_routing_table_mut<
382 O,
383 F: FnOnce(&mut Self::IpDeviceIdCtx<'_>, &mut RoutingTable<I, Self::DeviceId>) -> O,
384 >(
385 &mut self,
386 table_id: &RoutingTableId<I, Self::DeviceId, BC>,
387 cb: F,
388 ) -> O {
389 let mut table = self.adopt(table_id);
390 let (mut table, mut restricted) = table
391 .write_lock_with_and::<crate::lock_ordering::IpStateRoutingTable<I>, _>(|c| c.right());
392 let mut restricted = restricted.cast_core_ctx();
393 cb(&mut restricted, &mut table)
394 }
395}
396
397pub enum EarlyDemuxSocket<I, D, BT>
398where
399 I: netstack3_datagram::IpExt + netstack3_tcp::DualStackIpExt,
400 D: WeakDeviceIdentifier,
401 BT: UdpBindingsTypes + TcpBindingsTypes,
402{
403 UdpSocket(DualStackUdpSocketId<I, D, BT>),
404 TcpSocket(DualStackTcpSocketId<I, D, BT>),
405}
406
407impl<I, D, BT> EarlyDemuxSocket<I, D, BT>
408where
409 I: netstack3_datagram::IpExt + netstack3_tcp::DualStackIpExt,
410 D: WeakDeviceIdentifier,
411 BT: UdpBindingsTypes + TcpBindingsTypes,
412{
413 fn into_udp(self) -> DualStackUdpSocketId<I, D, BT> {
417 match self {
418 EarlyDemuxSocket::UdpSocket(id) => id,
419 EarlyDemuxSocket::TcpSocket(_) => panic!("not a udp socket"),
420 }
421 }
422
423 fn into_tcp(self) -> DualStackTcpSocketId<I, D, BT> {
427 match self {
428 EarlyDemuxSocket::TcpSocket(id) => id,
429 EarlyDemuxSocket::UdpSocket(_) => panic!("not a tcp socket"),
430 }
431 }
432}
433
434impl<I, D, BT, CC> SocketMetadata<CC> for EarlyDemuxSocket<I, D, BT>
435where
436 CC: netstack3_base::DeviceIdContext<netstack3_base::AnyDevice, WeakDeviceId = D>,
437 I: netstack3_datagram::IpExt + netstack3_tcp::DualStackIpExt,
438 D: WeakDeviceIdentifier,
439 BT: BindingsTypes,
440 DualStackUdpSocketId<I, D, BT>: SocketMetadata<CC>,
441 DualStackTcpSocketId<I, D, BT>: SocketMetadata<CC>,
442{
443 fn socket_cookie(&self, core_ctx: &mut CC) -> SocketCookie {
444 match self {
445 Self::UdpSocket(s) => s.socket_cookie(core_ctx),
446 Self::TcpSocket(s) => s.socket_cookie(core_ctx),
447 }
448 }
449
450 fn marks(&self, core_ctx: &mut CC) -> Marks {
451 match self {
452 Self::UdpSocket(s) => s.marks(core_ctx),
453 Self::TcpSocket(s) => s.marks(core_ctx),
454 }
455 }
456}
457
458impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpAllSocketsSet<Ipv4>>>
459 IpTransportDispatchContext<Ipv4, BC> for CoreCtx<'_, BC, L>
460{
461 type EarlyDemuxSocket = EarlyDemuxSocket<Ipv4, Self::WeakDeviceId, BC>;
462
463 fn early_demux<B: ParseBuffer>(
464 &mut self,
465 device: &Self::DeviceId,
466 frame_dst: Option<FrameDestination>,
467 src_ip: Ipv4Addr,
468 dst_ip: Ipv4Addr,
469 proto: Ipv4Proto,
470 body: B,
471 ) -> Option<Self::EarlyDemuxSocket> {
472 match frame_dst {
476 Some(FrameDestination::Individual { local: _ }) => (),
477 Some(FrameDestination::Broadcast) | Some(FrameDestination::Multicast) | None => {
478 return None;
479 }
480 };
481
482 match proto {
483 Ipv4Proto::Proto(IpProto::Udp) => {
484 <UdpIpTransportContext as IpTransportContext<Ipv4, _, _>>::early_demux(
485 self, device, src_ip, dst_ip, body,
486 )
487 .map(EarlyDemuxSocket::UdpSocket)
488 }
489 Ipv4Proto::Proto(IpProto::Tcp) => {
490 <TcpIpTransportContext as IpTransportContext<Ipv4, _, _>>::early_demux(
491 self, device, src_ip, dst_ip, body,
492 )
493 .map(EarlyDemuxSocket::TcpSocket)
494 }
495 Ipv4Proto::Icmp
496 | Ipv4Proto::Igmp
497 | Ipv4Proto::Proto(IpProto::Reserved)
498 | Ipv4Proto::Other(_) => None,
499 }
500 }
501
502 fn dispatch_receive_ip_packet<B: BufferMut, H: IpHeaderInfo<Ipv4>>(
503 &mut self,
504 bindings_ctx: &mut BC,
505 device: &Self::DeviceId,
506 src_ip: Ipv4SourceAddr,
507 dst_ip: SpecifiedAddr<Ipv4Addr>,
508 proto: Ipv4Proto,
509 body: B,
510 info: &LocalDeliveryPacketInfo<Ipv4, H>,
511 early_demux_socket: Option<Self::EarlyDemuxSocket>,
512 ) -> Result<(), Icmpv4Error> {
513 match proto {
514 Ipv4Proto::Icmp => {
515 <IcmpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_ip_packet(
516 self,
517 bindings_ctx,
518 device,
519 src_ip,
520 dst_ip,
521 body,
522 info,
523 None,
524 )
525 .map_err(|(_body, err)| err)
526 }
527 Ipv4Proto::Igmp => {
528 device::receive_igmp_packet(self, bindings_ctx, device, src_ip, dst_ip, body, info);
529 Ok(())
530 }
531 Ipv4Proto::Proto(IpProto::Udp) => {
532 <UdpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_ip_packet(
533 self,
534 bindings_ctx,
535 device,
536 src_ip,
537 dst_ip,
538 body,
539 info,
540 early_demux_socket.map(EarlyDemuxSocket::into_udp),
541 )
542 .map_err(|(_body, err)| err)
543 }
544 Ipv4Proto::Proto(IpProto::Tcp) => {
545 <TcpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_ip_packet(
546 self,
547 bindings_ctx,
548 device,
549 src_ip,
550 dst_ip,
551 body,
552 info,
553 early_demux_socket.map(EarlyDemuxSocket::into_tcp),
554 )
555 .map_err(|(_body, err)| err)
556 }
557 Ipv4Proto::Proto(IpProto::Reserved) | Ipv4Proto::Other(_) => {
558 Err(Icmpv4Error::ProtocolUnreachable)
559 }
560 }
561 }
562}
563
564impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpAllSocketsSet<Ipv6>>>
565 IpTransportDispatchContext<Ipv6, BC> for CoreCtx<'_, BC, L>
566{
567 type EarlyDemuxSocket = EarlyDemuxSocket<Ipv6, Self::WeakDeviceId, BC>;
568
569 fn early_demux<B: ParseBuffer>(
570 &mut self,
571 device: &Self::DeviceId,
572 frame_dst: Option<FrameDestination>,
573 src_ip: Ipv6Addr,
574 dst_ip: Ipv6Addr,
575 proto: Ipv6Proto,
576 body: B,
577 ) -> Option<Self::EarlyDemuxSocket> {
578 match frame_dst {
582 Some(FrameDestination::Individual { local: _ }) => (),
583 Some(FrameDestination::Broadcast) | Some(FrameDestination::Multicast) | None => {
584 return None;
585 }
586 };
587
588 match proto {
589 Ipv6Proto::Proto(IpProto::Udp) => {
590 <UdpIpTransportContext as IpTransportContext<Ipv6, _, _>>::early_demux(
591 self, device, src_ip, dst_ip, body,
592 )
593 .map(EarlyDemuxSocket::UdpSocket)
594 }
595 Ipv6Proto::Proto(IpProto::Tcp) => {
596 <TcpIpTransportContext as IpTransportContext<Ipv6, _, _>>::early_demux(
597 self, device, src_ip, dst_ip, body,
598 )
599 .map(EarlyDemuxSocket::TcpSocket)
600 }
601 Ipv6Proto::Icmpv6
602 | Ipv6Proto::NoNextHeader
603 | Ipv6Proto::Proto(IpProto::Reserved)
604 | Ipv6Proto::Other(_) => None,
605 }
606 }
607
608 fn dispatch_receive_ip_packet<B: BufferMut, H: IpHeaderInfo<Ipv6>>(
609 &mut self,
610 bindings_ctx: &mut BC,
611 device: &Self::DeviceId,
612 src_ip: Ipv6SourceAddr,
613 dst_ip: SpecifiedAddr<Ipv6Addr>,
614 proto: Ipv6Proto,
615 body: B,
616 info: &LocalDeliveryPacketInfo<Ipv6, H>,
617 early_demux_socket: Option<Self::EarlyDemuxSocket>,
618 ) -> Result<(), Icmpv6Error> {
619 match proto {
620 Ipv6Proto::Icmpv6 => {
621 <IcmpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_ip_packet(
622 self,
623 bindings_ctx,
624 device,
625 src_ip,
626 dst_ip,
627 body,
628 info,
629 None,
630 )
631 .map_err(|(_body, err)| err)
632 }
633 Ipv6Proto::NoNextHeader => Ok(()),
637 Ipv6Proto::Proto(IpProto::Tcp) => {
638 <TcpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_ip_packet(
639 self,
640 bindings_ctx,
641 device,
642 src_ip,
643 dst_ip,
644 body,
645 info,
646 early_demux_socket.map(EarlyDemuxSocket::into_tcp),
647 )
648 .map_err(|(_body, err)| err)
649 }
650 Ipv6Proto::Proto(IpProto::Udp) => {
651 <UdpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_ip_packet(
652 self,
653 bindings_ctx,
654 device,
655 src_ip,
656 dst_ip,
657 body,
658 info,
659 early_demux_socket.map(EarlyDemuxSocket::into_udp),
660 )
661 .map_err(|(_body, err)| err)
662 }
663 Ipv6Proto::Proto(IpProto::Reserved) | Ipv6Proto::Other(_) => {
664 unreachable!()
666 }
667 }
668 }
669}
670
671impl<
672 BC: BindingsContext,
673 L: LockBefore<crate::lock_ordering::IcmpBoundMap<Ipv4>>
674 + LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv4>>
675 + LockBefore<crate::lock_ordering::UdpAllSocketsSet<Ipv4>>,
676> InnerIcmpContext<Ipv4, BC> for CoreCtx<'_, BC, L>
677{
678 type EchoTransportContext = IcmpEchoIpTransportContext;
679
680 fn receive_icmp_error(
681 &mut self,
682 bindings_ctx: &mut BC,
683 device: &DeviceId<BC>,
684 original_src_ip: Option<SpecifiedAddr<Ipv4Addr>>,
685 original_dst_ip: SpecifiedAddr<Ipv4Addr>,
686 original_proto: Ipv4Proto,
687 original_body: &[u8],
688 err: Icmpv4ErrorCode,
689 ) {
690 self.increment_both(device, |c: &IpCounters<Ipv4>| &c.receive_icmp_error);
691 trace!("InnerIcmpContext<Ipv4>::receive_icmp_error({:?})", err);
692
693 match original_proto {
694 Ipv4Proto::Icmp => {
695 <IcmpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
696 self,
697 bindings_ctx,
698 device,
699 original_src_ip,
700 original_dst_ip,
701 original_body,
702 err,
703 )
704 }
705 Ipv4Proto::Proto(IpProto::Tcp) => {
706 <TcpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
707 self,
708 bindings_ctx,
709 device,
710 original_src_ip,
711 original_dst_ip,
712 original_body,
713 err,
714 )
715 }
716 Ipv4Proto::Proto(IpProto::Udp) => {
717 <UdpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
718 self,
719 bindings_ctx,
720 device,
721 original_src_ip,
722 original_dst_ip,
723 original_body,
724 err,
725 )
726 }
727 Ipv4Proto::Igmp | Ipv4Proto::Other(_) | Ipv4Proto::Proto(IpProto::Reserved) => {
728 trace!(
729 "Received ICMP error message ({:?}) for unsupported IP protocol: {:?}",
730 err, original_proto
731 );
732 }
733 }
734 }
735
736 fn with_error_send_bucket_mut<O, F: FnOnce(&mut TokenBucket<BC::Instant>) -> O>(
737 &mut self,
738 cb: F,
739 ) -> O {
740 cb(&mut self.lock::<crate::lock_ordering::IcmpTokenBucket<Ipv4>>())
741 }
742}
743
744impl<
745 BC: BindingsContext,
746 L: LockBefore<crate::lock_ordering::IcmpBoundMap<Ipv6>>
747 + LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv6>>
748 + LockBefore<crate::lock_ordering::UdpAllSocketsSet<Ipv6>>,
749> InnerIcmpContext<Ipv6, BC> for CoreCtx<'_, BC, L>
750{
751 type EchoTransportContext = IcmpEchoIpTransportContext;
752
753 fn receive_icmp_error(
754 &mut self,
755 bindings_ctx: &mut BC,
756 device: &DeviceId<BC>,
757 original_src_ip: Option<SpecifiedAddr<Ipv6Addr>>,
758 original_dst_ip: SpecifiedAddr<Ipv6Addr>,
759 original_next_header: Ipv6Proto,
760 original_body: &[u8],
761 err: Icmpv6ErrorCode,
762 ) {
763 self.increment_both(device, |c: &IpCounters<Ipv6>| &c.receive_icmp_error);
764 trace!("InnerIcmpContext<Ipv6>::receive_icmp_error({:?})", err);
765
766 match original_next_header {
767 Ipv6Proto::Icmpv6 => {
768 <IcmpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
769 self,
770 bindings_ctx,
771 device,
772 original_src_ip,
773 original_dst_ip,
774 original_body,
775 err,
776 )
777 }
778 Ipv6Proto::Proto(IpProto::Tcp) => {
779 <TcpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
780 self,
781 bindings_ctx,
782 device,
783 original_src_ip,
784 original_dst_ip,
785 original_body,
786 err,
787 )
788 }
789 Ipv6Proto::Proto(IpProto::Udp) => {
790 <UdpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
791 self,
792 bindings_ctx,
793 device,
794 original_src_ip,
795 original_dst_ip,
796 original_body,
797 err,
798 )
799 }
800 Ipv6Proto::NoNextHeader | Ipv6Proto::Other(_) | Ipv6Proto::Proto(IpProto::Reserved) => {
801 trace!(
802 "Received ICMPv6 error message ({:?}) for unsupported IP protocol: {:?}",
803 err, original_next_header
804 );
805 }
806 }
807 }
808
809 fn with_error_send_bucket_mut<O, F: FnOnce(&mut TokenBucket<BC::Instant>) -> O>(
810 &mut self,
811 cb: F,
812 ) -> O {
813 cb(&mut self.lock::<crate::lock_ordering::IcmpTokenBucket<Ipv6>>())
814 }
815}
816
817impl<L, BC: BindingsContext> icmp::IcmpStateContext for CoreCtx<'_, BC, L> {}
818
819impl<BT: BindingsTypes, L> IcmpEchoContextMarker for CoreCtx<'_, BT, L> {}
820
821#[netstack3_macros::instantiate_ip_impl_block(I)]
822impl<I, BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpAllSocketsSet<I>>>
823 IcmpEchoStateContext<I, BC> for CoreCtx<'_, BC, L>
824{
825 type SocketStateCtx<'a> =
826 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IcmpSocketState<I>>>;
827
828 fn with_all_sockets_mut<O, F: FnOnce(&mut IcmpSocketSet<I, Self::WeakDeviceId, BC>) -> O>(
829 &mut self,
830 cb: F,
831 ) -> O {
832 cb(&mut self.write_lock::<crate::lock_ordering::IcmpAllSocketsSet<I>>())
833 }
834
835 fn with_all_sockets<O, F: FnOnce(&IcmpSocketSet<I, Self::WeakDeviceId, BC>) -> O>(
836 &mut self,
837 cb: F,
838 ) -> O {
839 cb(&self.read_lock::<crate::lock_ordering::IcmpAllSocketsSet<I>>())
840 }
841
842 fn with_socket_state<
843 O,
844 F: FnOnce(&mut Self::SocketStateCtx<'_>, &IcmpSocketState<I, Self::WeakDeviceId, BC>) -> O,
845 >(
846 &mut self,
847 id: &IcmpSocketId<I, Self::WeakDeviceId, BC>,
848 cb: F,
849 ) -> O {
850 let mut locked = self.adopt(id);
851 let (socket_state, mut restricted) =
852 locked.read_lock_with_and::<crate::lock_ordering::IcmpSocketState<I>, _>(|c| c.right());
853 let mut restricted = restricted.cast_core_ctx();
854 cb(&mut restricted, &socket_state)
855 }
856
857 fn with_socket_state_mut<
858 O,
859 F: FnOnce(&mut Self::SocketStateCtx<'_>, &mut IcmpSocketState<I, Self::WeakDeviceId, BC>) -> O,
860 >(
861 &mut self,
862 id: &IcmpSocketId<I, Self::WeakDeviceId, BC>,
863 cb: F,
864 ) -> O {
865 let mut locked = self.adopt(id);
866 let (mut socket_state, mut restricted) = locked
867 .write_lock_with_and::<crate::lock_ordering::IcmpSocketState<I>, _>(|c| c.right());
868 let mut restricted = restricted.cast_core_ctx();
869 cb(&mut restricted, &mut socket_state)
870 }
871
872 fn with_bound_state_context<O, F: FnOnce(&mut Self::SocketStateCtx<'_>) -> O>(
873 &mut self,
874 cb: F,
875 ) -> O {
876 cb(&mut self.cast_locked::<crate::lock_ordering::IcmpSocketState<I>>())
877 }
878
879 fn for_each_socket<
880 F: FnMut(
881 &mut Self::SocketStateCtx<'_>,
882 &IcmpSocketId<I, Self::WeakDeviceId, BC>,
883 &IcmpSocketState<I, Self::WeakDeviceId, BC>,
884 ),
885 >(
886 &mut self,
887 mut cb: F,
888 ) {
889 let (all_sockets, mut locked) =
890 self.read_lock_and::<crate::lock_ordering::IcmpAllSocketsSet<I>>();
891 all_sockets.keys().for_each(|id| {
892 let id = IcmpSocketId::from(id.clone());
893 let mut locked = locked.adopt(&id);
894 let (socket_state, mut restricted) = locked
895 .read_lock_with_and::<crate::lock_ordering::IcmpSocketState<I>, _>(|c| c.right());
896 let mut restricted = restricted.cast_core_ctx();
897 cb(&mut restricted, &id, &socket_state);
898 });
899 }
900}
901
902#[netstack3_macros::instantiate_ip_impl_block(I)]
903impl<I, BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpBoundMap<I>>>
904 IcmpEchoBoundStateContext<I, BC> for CoreCtx<'_, BC, L>
905{
906 type IpSocketsCtx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IcmpBoundMap<I>>>;
907 fn with_icmp_ctx_and_sockets_mut<
908 O,
909 F: FnOnce(
910 &mut Self::IpSocketsCtx<'_>,
911 &mut icmp_echo::BoundSockets<I, Self::WeakDeviceId, BC>,
912 ) -> O,
913 >(
914 &mut self,
915 cb: F,
916 ) -> O {
917 let (mut sockets, mut core_ctx) =
918 self.write_lock_and::<crate::lock_ordering::IcmpBoundMap<I>>();
919 cb(&mut core_ctx, &mut sockets)
920 }
921}
922
923impl<I: IpLayerIpExt, BT: BindingsTypes> DelegatedOrderedLockAccess<IpPacketFragmentCache<I, BT>>
924 for StackState<BT>
925{
926 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
927 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
928 self.inner_ip_state()
929 }
930}
931
932impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
933 for crate::lock_ordering::IpStateFragmentCache<I>
934{
935 type Data = IpPacketFragmentCache<I, BT>;
936}
937
938impl<I: IpLayerIpExt, BT: BindingsTypes> DelegatedOrderedLockAccess<PmtuCache<I, BT>>
939 for StackState<BT>
940{
941 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
942 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
943 self.inner_ip_state()
944 }
945}
946
947impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
948 for crate::lock_ordering::IpStateRulesTable<I>
949{
950 type Data = RulesTable<I, DeviceId<BT>, BT>;
951}
952
953impl<I: IpLayerIpExt, BT: BindingsTypes> DelegatedOrderedLockAccess<RulesTable<I, DeviceId<BT>, BT>>
954 for StackState<BT>
955{
956 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
957 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
958 self.inner_ip_state()
959 }
960}
961
962impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
963 for crate::lock_ordering::IpStatePmtuCache<I>
964{
965 type Data = PmtuCache<I, BT>;
966}
967
968impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
969 for crate::lock_ordering::IpStateRoutingTables<I>
970{
971 type Data = HashMap<
972 RoutingTableId<I, DeviceId<BT>, BT>,
973 Primary<BaseRoutingTableState<I, DeviceId<BT>, BT>>,
974 >;
975}
976
977impl<I: IpLayerIpExt, BT: BindingsTypes>
978 DelegatedOrderedLockAccess<
979 HashMap<
980 RoutingTableId<I, DeviceId<BT>, BT>,
981 Primary<BaseRoutingTableState<I, DeviceId<BT>, BT>>,
982 >,
983 > for StackState<BT>
984{
985 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
986 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
987 self.inner_ip_state()
988 }
989}
990
991impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<RoutingTableId<I, DeviceId<BT>, BT>>
992 for crate::lock_ordering::IpStateRoutingTable<I>
993{
994 type Data = RoutingTable<I, DeviceId<BT>>;
995}
996
997impl<I: IpLayerIpExt, BT: BindingsTypes>
998 DelegatedOrderedLockAccess<MulticastForwardingState<I, DeviceId<BT>, BT>> for StackState<BT>
999{
1000 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
1001 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
1002 self.inner_ip_state()
1003 }
1004}
1005
1006impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
1007 for crate::lock_ordering::IpMulticastForwardingState<I>
1008{
1009 type Data = MulticastForwardingState<I, DeviceId<BT>, BT>;
1010}
1011
1012impl<I: IpLayerIpExt, BT: BindingsTypes>
1013 DelegatedOrderedLockAccess<RawIpSocketMap<I, WeakDeviceId<BT>, BT>> for StackState<BT>
1014{
1015 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
1016 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
1017 self.inner_ip_state()
1018 }
1019}
1020
1021impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
1022 for crate::lock_ordering::AllRawIpSockets<I>
1023{
1024 type Data = RawIpSocketMap<I, WeakDeviceId<BT>, BT>;
1025}
1026
1027impl<I: datagram::DualStackIpExt, BT: BindingsTypes>
1028 DelegatedOrderedLockAccess<icmp_echo::BoundSockets<I, WeakDeviceId<BT>, BT>>
1029 for StackState<BT>
1030{
1031 type Inner = IcmpSockets<I, WeakDeviceId<BT>, BT>;
1032 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
1033 &self.transport.icmp_echo_state()
1034 }
1035}
1036
1037impl<I: datagram::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
1038 for crate::lock_ordering::IcmpBoundMap<I>
1039{
1040 type Data = icmp_echo::BoundSockets<I, WeakDeviceId<BT>, BT>;
1041}
1042
1043impl<I: datagram::DualStackIpExt, BT: BindingsTypes>
1044 DelegatedOrderedLockAccess<IcmpSocketSet<I, WeakDeviceId<BT>, BT>> for StackState<BT>
1045{
1046 type Inner = IcmpSockets<I, WeakDeviceId<BT>, BT>;
1047 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
1048 &self.transport.icmp_echo_state()
1049 }
1050}
1051
1052impl<I: datagram::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
1053 for crate::lock_ordering::IcmpAllSocketsSet<I>
1054{
1055 type Data = IcmpSocketSet<I, WeakDeviceId<BT>, BT>;
1056}
1057
1058impl<I: datagram::DualStackIpExt, BT: BindingsTypes>
1059 DelegatedOrderedLockAccess<IpMarked<I, TokenBucket<BT::Instant>>> for StackState<BT>
1060{
1061 type Inner = IcmpState<I, BT>;
1062 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
1063 self.inner_icmp_state()
1064 }
1065}
1066
1067impl<I: datagram::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
1068 for crate::lock_ordering::IcmpTokenBucket<I>
1069{
1070 type Data = IpMarked<I, TokenBucket<BT::Instant>>;
1071}
1072
1073impl<I: datagram::DualStackIpExt, D: WeakDeviceIdentifier, BT: BindingsTypes>
1074 LockLevelFor<IcmpSocketId<I, D, BT>> for crate::lock_ordering::IcmpSocketState<I>
1075{
1076 type Data = IcmpSocketState<I, D, BT>;
1077}