1use lock_order::lock::{DelegatedOrderedLockAccess, LockLevelFor};
8use lock_order::relation::LockBefore;
9use log::trace;
10use net_types::ip::{Ip, IpMarked, Ipv4, Ipv4Addr, Ipv4SourceAddr, Ipv6, Ipv6Addr, Ipv6SourceAddr};
11use net_types::{MulticastAddr, SpecifiedAddr};
12use netstack3_base::socket::SocketIpAddr;
13use netstack3_base::{
14 CounterContext, Icmpv4ErrorCode, Icmpv6ErrorCode, Marks, ResourceCounterContext, TokenBucket,
15 WeakDeviceIdentifier,
16};
17use netstack3_datagram as datagram;
18use netstack3_device::{BaseDeviceId, DeviceId, DeviceStateSpec, WeakDeviceId, for_any_device_id};
19use netstack3_hashmap::HashMap;
20use netstack3_icmp_echo::{
21 self as icmp_echo, IcmpEchoBoundStateContext, IcmpEchoContextMarker,
22 IcmpEchoIpTransportContext, IcmpEchoStateContext, IcmpSocketId, IcmpSocketSet, IcmpSocketState,
23 IcmpSockets,
24};
25use netstack3_ip::device::{self, IpDeviceBindingsContext, IpDeviceIpExt};
26use netstack3_ip::gmp::{IgmpCounters, MldCounters};
27use netstack3_ip::icmp::{
28 self, IcmpIpTransportContext, IcmpRxCounters, IcmpState, IcmpTxCounters, InnerIcmpContext,
29 InnerIcmpv4Context, NdpCounters,
30};
31use netstack3_ip::multicast_forwarding::MulticastForwardingState;
32use netstack3_ip::raw::RawIpSocketMap;
33use netstack3_ip::{
34 self as ip, BaseRoutingTableState, FragmentContext, IpCounters, IpDeviceContext, IpHeaderInfo,
35 IpLayerBindingsContext, IpLayerIpExt, IpPacketFragmentCache, IpRouteTableContext,
36 IpRouteTablesContext, IpStateContext, IpStateInner, IpTransportContext,
37 IpTransportDispatchContext, LocalDeliveryPacketInfo, MulticastMembershipHandler, PmtuCache,
38 PmtuContext, ResolveRouteError, ResolvedRoute, RoutingTable, RoutingTableId, RulesTable,
39 TransportReceiveError,
40};
41use netstack3_sync::rc::Primary;
42use netstack3_tcp::TcpIpTransportContext;
43use netstack3_udp::UdpIpTransportContext;
44use packet::BufferMut;
45use packet_formats::ip::{IpProto, Ipv4Proto, Ipv6Proto};
46
47use crate::context::WrapLockLevel;
48use crate::context::prelude::*;
49use crate::{BindingsContext, BindingsTypes, CoreCtx, StackState};
50
51impl<I, BT, L> FragmentContext<I, BT> for CoreCtx<'_, BT, L>
52where
53 I: IpLayerIpExt,
54 BT: BindingsTypes,
55 L: LockBefore<crate::lock_ordering::IpStateFragmentCache<I>>,
56{
57 fn with_state_mut<O, F: FnOnce(&mut IpPacketFragmentCache<I, BT>) -> O>(&mut self, cb: F) -> O {
58 let mut cache = self.lock::<crate::lock_ordering::IpStateFragmentCache<I>>();
59 cb(&mut cache)
60 }
61}
62
63impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IpStatePmtuCache<Ipv4>>>
64 PmtuContext<Ipv4, BC> for CoreCtx<'_, BC, L>
65{
66 fn with_state_mut<O, F: FnOnce(&mut PmtuCache<Ipv4, BC>) -> O>(&mut self, cb: F) -> O {
67 let mut cache = self.lock::<crate::lock_ordering::IpStatePmtuCache<Ipv4>>();
68 cb(&mut cache)
69 }
70}
71
72impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IpStatePmtuCache<Ipv6>>>
73 PmtuContext<Ipv6, BC> for CoreCtx<'_, BC, L>
74{
75 fn with_state_mut<O, F: FnOnce(&mut PmtuCache<Ipv6, BC>) -> O>(&mut self, cb: F) -> O {
76 let mut cache = self.lock::<crate::lock_ordering::IpStatePmtuCache<Ipv6>>();
77 cb(&mut cache)
78 }
79}
80
81impl<
82 I: Ip + IpDeviceIpExt + IpLayerIpExt,
83 BC: BindingsContext
84 + IpDeviceBindingsContext<I, Self::DeviceId>
85 + IpLayerBindingsContext<I, Self::DeviceId>,
86 L: LockBefore<crate::lock_ordering::IpState<I>>,
87> MulticastMembershipHandler<I, BC> for CoreCtx<'_, BC, L>
88where
89 Self: device::IpDeviceConfigurationContext<I, BC> + IpStateContext<I, BC> + IpDeviceContext<I>,
90{
91 fn join_multicast_group(
92 &mut self,
93 bindings_ctx: &mut BC,
94 device: &Self::DeviceId,
95 addr: MulticastAddr<I::Addr>,
96 ) {
97 ip::device::join_ip_multicast::<I, _, _>(self, bindings_ctx, device, addr)
98 }
99
100 fn leave_multicast_group(
101 &mut self,
102 bindings_ctx: &mut BC,
103 device: &Self::DeviceId,
104 addr: MulticastAddr<I::Addr>,
105 ) {
106 ip::device::leave_ip_multicast::<I, _, _>(self, bindings_ctx, device, addr)
107 }
108
109 fn select_device_for_multicast_group(
110 &mut self,
111 addr: MulticastAddr<I::Addr>,
112 marks: &Marks,
113 ) -> Result<Self::DeviceId, ResolveRouteError> {
114 let remote_ip = SocketIpAddr::new_from_multicast(addr);
115 let ResolvedRoute {
116 src_addr: _,
117 device,
118 local_delivery_device,
119 next_hop: _,
120 internal_forwarding: _,
121 } = ip::resolve_output_route_to_destination(self, None, None, Some(remote_ip), marks)?;
122 debug_assert!(local_delivery_device.is_none(), "{:?}", local_delivery_device);
125 Ok(device)
126 }
127}
128
129impl<BT: BindingsTypes, I: datagram::DualStackIpExt, L> CounterContext<IcmpTxCounters<I>>
130 for CoreCtx<'_, BT, L>
131{
132 fn counters(&self) -> &IcmpTxCounters<I> {
133 &self
134 .unlocked_access::<crate::lock_ordering::UnlockedState>()
135 .inner_icmp_state::<I>()
136 .tx_counters
137 }
138}
139
140impl<BT: BindingsTypes, I: datagram::DualStackIpExt, L> CounterContext<IcmpRxCounters<I>>
141 for CoreCtx<'_, BT, L>
142{
143 fn counters(&self) -> &IcmpRxCounters<I> {
144 &self
145 .unlocked_access::<crate::lock_ordering::UnlockedState>()
146 .inner_icmp_state::<I>()
147 .rx_counters
148 }
149}
150
151impl<BT: BindingsTypes, L> CounterContext<IgmpCounters> for CoreCtx<'_, BT, L> {
152 fn counters(&self) -> &IgmpCounters {
153 &self
154 .unlocked_access::<crate::lock_ordering::UnlockedState>()
155 .inner_ip_state::<Ipv4>()
156 .igmp_counters()
157 }
158}
159
160impl<BT: BindingsTypes, L> ResourceCounterContext<DeviceId<BT>, IgmpCounters>
161 for CoreCtx<'_, BT, L>
162{
163 fn per_resource_counters<'a>(&'a self, device_id: &'a DeviceId<BT>) -> &'a IgmpCounters {
164 for_any_device_id!(
165 DeviceId,
166 device_id,
167 id => self.per_resource_counters(id)
168 )
169 }
170}
171
172impl<BT: BindingsTypes, D: DeviceStateSpec, L>
173 ResourceCounterContext<BaseDeviceId<D, BT>, IgmpCounters> for CoreCtx<'_, BT, L>
174{
175 fn per_resource_counters<'a>(&'a self, device_id: &'a BaseDeviceId<D, BT>) -> &'a IgmpCounters {
176 device_id
177 .device_state(
178 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().device.origin,
179 )
180 .as_ref()
181 .igmp_counters()
182 }
183}
184
185impl<BT: BindingsTypes, L> CounterContext<MldCounters> for CoreCtx<'_, BT, L> {
186 fn counters(&self) -> &MldCounters {
187 &self
188 .unlocked_access::<crate::lock_ordering::UnlockedState>()
189 .inner_ip_state::<Ipv4>()
190 .mld_counters()
191 }
192}
193
194impl<BT: BindingsTypes, L> ResourceCounterContext<DeviceId<BT>, MldCounters>
195 for CoreCtx<'_, BT, L>
196{
197 fn per_resource_counters<'a>(&'a self, device_id: &'a DeviceId<BT>) -> &'a MldCounters {
198 for_any_device_id!(
199 DeviceId,
200 device_id,
201 id => self.per_resource_counters(id)
202 )
203 }
204}
205
206impl<BT: BindingsTypes, D: DeviceStateSpec, L>
207 ResourceCounterContext<BaseDeviceId<D, BT>, MldCounters> for CoreCtx<'_, BT, L>
208{
209 fn per_resource_counters<'a>(&'a self, device_id: &'a BaseDeviceId<D, BT>) -> &'a MldCounters {
210 device_id
211 .device_state(
212 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().device.origin,
213 )
214 .as_ref()
215 .mld_counters()
216 }
217}
218
219impl<BT: BindingsTypes, L> CounterContext<NdpCounters> for CoreCtx<'_, BT, L> {
220 fn counters(&self) -> &NdpCounters {
221 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().ipv6.icmp.ndp_counters
222 }
223}
224
225impl<
226 BC: BindingsContext,
227 L: LockBefore<crate::lock_ordering::IcmpBoundMap<Ipv4>>
228 + LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv4>>
229 + LockBefore<crate::lock_ordering::UdpAllSocketsSet<Ipv4>>,
230> InnerIcmpv4Context<BC> for CoreCtx<'_, BC, L>
231{
232 fn should_send_timestamp_reply(&self) -> bool {
233 self.unlocked_access::<crate::lock_ordering::UnlockedState>().ipv4.icmp.send_timestamp_reply
234 }
235}
236
237impl<BT: BindingsTypes, I: IpLayerIpExt, L> CounterContext<IpCounters<I>> for CoreCtx<'_, BT, L> {
238 fn counters(&self) -> &IpCounters<I> {
239 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().inner_ip_state().counters()
240 }
241}
242
243impl<BT: BindingsTypes, I: IpLayerIpExt, L> ResourceCounterContext<DeviceId<BT>, IpCounters<I>>
244 for CoreCtx<'_, BT, L>
245{
246 fn per_resource_counters<'a>(&'a self, device_id: &'a DeviceId<BT>) -> &'a IpCounters<I> {
247 for_any_device_id!(
248 DeviceId,
249 device_id,
250 id => self.per_resource_counters(id)
251 )
252 }
253}
254
255impl<BT: BindingsTypes, D: DeviceStateSpec, I: IpLayerIpExt, L>
256 ResourceCounterContext<BaseDeviceId<D, BT>, IpCounters<I>> for CoreCtx<'_, BT, L>
257{
258 fn per_resource_counters<'a>(
259 &'a self,
260 device_id: &'a BaseDeviceId<D, BT>,
261 ) -> &'a IpCounters<I> {
262 device_id
263 .device_state(
264 &self.unlocked_access::<crate::lock_ordering::UnlockedState>().device.origin,
265 )
266 .as_ref()
267 .ip_counters::<I>()
268 }
269}
270
271#[netstack3_macros::instantiate_ip_impl_block(I)]
272impl<I, BC, L> IpStateContext<I, BC> for CoreCtx<'_, BC, L>
273where
274 I: IpLayerIpExt,
275 BC: BindingsContext,
276 L: LockBefore<crate::lock_ordering::IpStateRulesTable<I>>,
277{
278 type IpRouteTablesCtx<'a> =
279 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IpStateRulesTable<I>>>;
280
281 fn with_rules_table<
282 O,
283 F: FnOnce(&mut Self::IpRouteTablesCtx<'_>, &RulesTable<I, Self::DeviceId, BC>) -> O,
284 >(
285 &mut self,
286 cb: F,
287 ) -> O {
288 let (rules_table, mut restricted) =
289 self.read_lock_and::<crate::lock_ordering::IpStateRulesTable<I>>();
290 cb(&mut restricted, &rules_table)
291 }
292
293 fn with_rules_table_mut<
294 O,
295 F: FnOnce(&mut Self::IpRouteTablesCtx<'_>, &mut RulesTable<I, Self::DeviceId, BC>) -> O,
296 >(
297 &mut self,
298 cb: F,
299 ) -> O {
300 let (mut rules_table, mut restricted) =
301 self.write_lock_and::<crate::lock_ordering::IpStateRulesTable<I>>();
302 cb(&mut restricted, &mut rules_table)
303 }
304}
305
306#[netstack3_macros::instantiate_ip_impl_block(I)]
307impl<I, BC, L> IpRouteTablesContext<I, BC> for CoreCtx<'_, BC, L>
308where
309 I: IpLayerIpExt,
310 BC: BindingsContext,
311 L: LockBefore<crate::lock_ordering::IpStateRoutingTables<I>>,
312{
313 type Ctx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IpStateRoutingTables<I>>>;
314
315 fn main_table_id(&self) -> RoutingTableId<I, Self::DeviceId, BC> {
316 self.unlocked_access::<crate::lock_ordering::UnlockedState>()
317 .inner_ip_state()
318 .main_table_id()
319 .clone()
320 }
321
322 fn with_ip_routing_tables<
323 O,
324 F: FnOnce(
325 &mut Self::Ctx<'_>,
326 &HashMap<
327 RoutingTableId<I, Self::DeviceId, BC>,
328 Primary<BaseRoutingTableState<I, Self::DeviceId, BC>>,
329 >,
330 ) -> O,
331 >(
332 &mut self,
333 cb: F,
334 ) -> O {
335 let (table, mut ctx) = self.lock_and::<crate::lock_ordering::IpStateRoutingTables<I>>();
336 cb(&mut ctx, &table)
337 }
338
339 fn with_ip_routing_tables_mut<
340 O,
341 F: FnOnce(
342 &mut HashMap<
343 RoutingTableId<I, Self::DeviceId, BC>,
344 Primary<BaseRoutingTableState<I, Self::DeviceId, BC>>,
345 >,
346 ) -> O,
347 >(
348 &mut self,
349 cb: F,
350 ) -> O {
351 let mut tables = self.lock::<crate::lock_ordering::IpStateRoutingTables<I>>();
352 cb(&mut *tables)
353 }
354}
355
356#[netstack3_macros::instantiate_ip_impl_block(I)]
357impl<I, BC, L> IpRouteTableContext<I, BC> for CoreCtx<'_, BC, L>
358where
359 I: IpLayerIpExt,
360 BC: BindingsContext,
361 L: LockBefore<crate::lock_ordering::IpStateRoutingTable<I>>,
362{
363 type IpDeviceIdCtx<'a> =
364 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IpStateRoutingTable<I>>>;
365
366 fn with_ip_routing_table<
367 O,
368 F: FnOnce(&mut Self::IpDeviceIdCtx<'_>, &RoutingTable<I, Self::DeviceId>) -> O,
369 >(
370 &mut self,
371 table_id: &RoutingTableId<I, Self::DeviceId, BC>,
372 cb: F,
373 ) -> O {
374 let mut table = self.adopt(table_id);
375 let (table, mut restricted) = table
376 .read_lock_with_and::<crate::lock_ordering::IpStateRoutingTable<I>, _>(|c| c.right());
377 let mut restricted = restricted.cast_core_ctx();
378 cb(&mut restricted, &table)
379 }
380
381 fn with_ip_routing_table_mut<
382 O,
383 F: FnOnce(&mut Self::IpDeviceIdCtx<'_>, &mut RoutingTable<I, Self::DeviceId>) -> O,
384 >(
385 &mut self,
386 table_id: &RoutingTableId<I, Self::DeviceId, BC>,
387 cb: F,
388 ) -> O {
389 let mut table = self.adopt(table_id);
390 let (mut table, mut restricted) = table
391 .write_lock_with_and::<crate::lock_ordering::IpStateRoutingTable<I>, _>(|c| c.right());
392 let mut restricted = restricted.cast_core_ctx();
393 cb(&mut restricted, &mut table)
394 }
395}
396
397impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpAllSocketsSet<Ipv4>>>
398 IpTransportDispatchContext<Ipv4, BC> for CoreCtx<'_, BC, L>
399{
400 fn dispatch_receive_ip_packet<B: BufferMut, H: IpHeaderInfo<Ipv4>>(
401 &mut self,
402 bindings_ctx: &mut BC,
403 device: &Self::DeviceId,
404 src_ip: Ipv4SourceAddr,
405 dst_ip: SpecifiedAddr<Ipv4Addr>,
406 proto: Ipv4Proto,
407 body: B,
408 info: &LocalDeliveryPacketInfo<Ipv4, H>,
409 ) -> Result<(), TransportReceiveError> {
410 match proto {
411 Ipv4Proto::Icmp => {
412 <IcmpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_ip_packet(
413 self,
414 bindings_ctx,
415 device,
416 src_ip,
417 dst_ip,
418 body,
419 info,
420 )
421 .map_err(|(_body, err)| err)
422 }
423 Ipv4Proto::Igmp => {
424 device::receive_igmp_packet(self, bindings_ctx, device, src_ip, dst_ip, body, info);
425 Ok(())
426 }
427 Ipv4Proto::Proto(IpProto::Udp) => {
428 <UdpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_ip_packet(
429 self,
430 bindings_ctx,
431 device,
432 src_ip,
433 dst_ip,
434 body,
435 info,
436 )
437 .map_err(|(_body, err)| err)
438 }
439 Ipv4Proto::Proto(IpProto::Tcp) => {
440 <TcpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_ip_packet(
441 self,
442 bindings_ctx,
443 device,
444 src_ip,
445 dst_ip,
446 body,
447 info,
448 )
449 .map_err(|(_body, err)| err)
450 }
451 _ => Err(TransportReceiveError::ProtocolUnsupported),
454 }
455 }
456}
457
458impl<BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpAllSocketsSet<Ipv6>>>
459 IpTransportDispatchContext<Ipv6, BC> for CoreCtx<'_, BC, L>
460{
461 fn dispatch_receive_ip_packet<B: BufferMut, H: IpHeaderInfo<Ipv6>>(
462 &mut self,
463 bindings_ctx: &mut BC,
464 device: &Self::DeviceId,
465 src_ip: Ipv6SourceAddr,
466 dst_ip: SpecifiedAddr<Ipv6Addr>,
467 proto: Ipv6Proto,
468 body: B,
469 info: &LocalDeliveryPacketInfo<Ipv6, H>,
470 ) -> Result<(), TransportReceiveError> {
471 match proto {
472 Ipv6Proto::Icmpv6 => {
473 <IcmpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_ip_packet(
474 self,
475 bindings_ctx,
476 device,
477 src_ip,
478 dst_ip,
479 body,
480 info,
481 )
482 .map_err(|(_body, err)| err)
483 }
484 Ipv6Proto::NoNextHeader => Ok(()),
488 Ipv6Proto::Proto(IpProto::Tcp) => {
489 <TcpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_ip_packet(
490 self,
491 bindings_ctx,
492 device,
493 src_ip,
494 dst_ip,
495 body,
496 info,
497 )
498 .map_err(|(_body, err)| err)
499 }
500 Ipv6Proto::Proto(IpProto::Udp) => {
501 <UdpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_ip_packet(
502 self,
503 bindings_ctx,
504 device,
505 src_ip,
506 dst_ip,
507 body,
508 info,
509 )
510 .map_err(|(_body, err)| err)
511 }
512 _ => Err(TransportReceiveError::ProtocolUnsupported),
515 }
516 }
517}
518
519impl<
520 BC: BindingsContext,
521 L: LockBefore<crate::lock_ordering::IcmpBoundMap<Ipv4>>
522 + LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv4>>
523 + LockBefore<crate::lock_ordering::UdpAllSocketsSet<Ipv4>>,
524> InnerIcmpContext<Ipv4, BC> for CoreCtx<'_, BC, L>
525{
526 type EchoTransportContext = IcmpEchoIpTransportContext;
527
528 fn receive_icmp_error(
529 &mut self,
530 bindings_ctx: &mut BC,
531 device: &DeviceId<BC>,
532 original_src_ip: Option<SpecifiedAddr<Ipv4Addr>>,
533 original_dst_ip: SpecifiedAddr<Ipv4Addr>,
534 original_proto: Ipv4Proto,
535 original_body: &[u8],
536 err: Icmpv4ErrorCode,
537 ) {
538 self.increment_both(device, |c: &IpCounters<Ipv4>| &c.receive_icmp_error);
539 trace!("InnerIcmpContext<Ipv4>::receive_icmp_error({:?})", err);
540
541 match original_proto {
542 Ipv4Proto::Icmp => {
543 <IcmpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
544 self,
545 bindings_ctx,
546 device,
547 original_src_ip,
548 original_dst_ip,
549 original_body,
550 err,
551 )
552 }
553 Ipv4Proto::Proto(IpProto::Tcp) => {
554 <TcpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
555 self,
556 bindings_ctx,
557 device,
558 original_src_ip,
559 original_dst_ip,
560 original_body,
561 err,
562 )
563 }
564 Ipv4Proto::Proto(IpProto::Udp) => {
565 <UdpIpTransportContext as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
566 self,
567 bindings_ctx,
568 device,
569 original_src_ip,
570 original_dst_ip,
571 original_body,
572 err,
573 )
574 }
575 _ => <() as IpTransportContext<Ipv4, _, _>>::receive_icmp_error(
578 self,
579 bindings_ctx,
580 device,
581 original_src_ip,
582 original_dst_ip,
583 original_body,
584 err,
585 ),
586 }
587 }
588
589 fn with_error_send_bucket_mut<O, F: FnOnce(&mut TokenBucket<BC::Instant>) -> O>(
590 &mut self,
591 cb: F,
592 ) -> O {
593 cb(&mut self.lock::<crate::lock_ordering::IcmpTokenBucket<Ipv4>>())
594 }
595}
596
597impl<
598 BC: BindingsContext,
599 L: LockBefore<crate::lock_ordering::IcmpBoundMap<Ipv6>>
600 + LockBefore<crate::lock_ordering::TcpAllSocketsSet<Ipv6>>
601 + LockBefore<crate::lock_ordering::UdpAllSocketsSet<Ipv6>>,
602> InnerIcmpContext<Ipv6, BC> for CoreCtx<'_, BC, L>
603{
604 type EchoTransportContext = IcmpEchoIpTransportContext;
605
606 fn receive_icmp_error(
607 &mut self,
608 bindings_ctx: &mut BC,
609 device: &DeviceId<BC>,
610 original_src_ip: Option<SpecifiedAddr<Ipv6Addr>>,
611 original_dst_ip: SpecifiedAddr<Ipv6Addr>,
612 original_next_header: Ipv6Proto,
613 original_body: &[u8],
614 err: Icmpv6ErrorCode,
615 ) {
616 self.increment_both(device, |c: &IpCounters<Ipv6>| &c.receive_icmp_error);
617 trace!("InnerIcmpContext<Ipv6>::receive_icmp_error({:?})", err);
618
619 match original_next_header {
620 Ipv6Proto::Icmpv6 => {
621 <IcmpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
622 self,
623 bindings_ctx,
624 device,
625 original_src_ip,
626 original_dst_ip,
627 original_body,
628 err,
629 )
630 }
631 Ipv6Proto::Proto(IpProto::Tcp) => {
632 <TcpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
633 self,
634 bindings_ctx,
635 device,
636 original_src_ip,
637 original_dst_ip,
638 original_body,
639 err,
640 )
641 }
642 Ipv6Proto::Proto(IpProto::Udp) => {
643 <UdpIpTransportContext as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
644 self,
645 bindings_ctx,
646 device,
647 original_src_ip,
648 original_dst_ip,
649 original_body,
650 err,
651 )
652 }
653 _ => <() as IpTransportContext<Ipv6, _, _>>::receive_icmp_error(
656 self,
657 bindings_ctx,
658 device,
659 original_src_ip,
660 original_dst_ip,
661 original_body,
662 err,
663 ),
664 }
665 }
666
667 fn with_error_send_bucket_mut<O, F: FnOnce(&mut TokenBucket<BC::Instant>) -> O>(
668 &mut self,
669 cb: F,
670 ) -> O {
671 cb(&mut self.lock::<crate::lock_ordering::IcmpTokenBucket<Ipv6>>())
672 }
673}
674
675impl<L, BC: BindingsContext> icmp::IcmpStateContext for CoreCtx<'_, BC, L> {}
676
677impl<BT: BindingsTypes, L> IcmpEchoContextMarker for CoreCtx<'_, BT, L> {}
678
679#[netstack3_macros::instantiate_ip_impl_block(I)]
680impl<I, BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpAllSocketsSet<I>>>
681 IcmpEchoStateContext<I, BC> for CoreCtx<'_, BC, L>
682{
683 type SocketStateCtx<'a> =
684 CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IcmpSocketState<I>>>;
685
686 fn with_all_sockets_mut<O, F: FnOnce(&mut IcmpSocketSet<I, Self::WeakDeviceId, BC>) -> O>(
687 &mut self,
688 cb: F,
689 ) -> O {
690 cb(&mut self.write_lock::<crate::lock_ordering::IcmpAllSocketsSet<I>>())
691 }
692
693 fn with_all_sockets<O, F: FnOnce(&IcmpSocketSet<I, Self::WeakDeviceId, BC>) -> O>(
694 &mut self,
695 cb: F,
696 ) -> O {
697 cb(&self.read_lock::<crate::lock_ordering::IcmpAllSocketsSet<I>>())
698 }
699
700 fn with_socket_state<
701 O,
702 F: FnOnce(&mut Self::SocketStateCtx<'_>, &IcmpSocketState<I, Self::WeakDeviceId, BC>) -> O,
703 >(
704 &mut self,
705 id: &IcmpSocketId<I, Self::WeakDeviceId, BC>,
706 cb: F,
707 ) -> O {
708 let mut locked = self.adopt(id);
709 let (socket_state, mut restricted) =
710 locked.read_lock_with_and::<crate::lock_ordering::IcmpSocketState<I>, _>(|c| c.right());
711 let mut restricted = restricted.cast_core_ctx();
712 cb(&mut restricted, &socket_state)
713 }
714
715 fn with_socket_state_mut<
716 O,
717 F: FnOnce(&mut Self::SocketStateCtx<'_>, &mut IcmpSocketState<I, Self::WeakDeviceId, BC>) -> O,
718 >(
719 &mut self,
720 id: &IcmpSocketId<I, Self::WeakDeviceId, BC>,
721 cb: F,
722 ) -> O {
723 let mut locked = self.adopt(id);
724 let (mut socket_state, mut restricted) = locked
725 .write_lock_with_and::<crate::lock_ordering::IcmpSocketState<I>, _>(|c| c.right());
726 let mut restricted = restricted.cast_core_ctx();
727 cb(&mut restricted, &mut socket_state)
728 }
729
730 fn with_bound_state_context<O, F: FnOnce(&mut Self::SocketStateCtx<'_>) -> O>(
731 &mut self,
732 cb: F,
733 ) -> O {
734 cb(&mut self.cast_locked::<crate::lock_ordering::IcmpSocketState<I>>())
735 }
736
737 fn for_each_socket<
738 F: FnMut(
739 &mut Self::SocketStateCtx<'_>,
740 &IcmpSocketId<I, Self::WeakDeviceId, BC>,
741 &IcmpSocketState<I, Self::WeakDeviceId, BC>,
742 ),
743 >(
744 &mut self,
745 mut cb: F,
746 ) {
747 let (all_sockets, mut locked) =
748 self.read_lock_and::<crate::lock_ordering::IcmpAllSocketsSet<I>>();
749 all_sockets.keys().for_each(|id| {
750 let id = IcmpSocketId::from(id.clone());
751 let mut locked = locked.adopt(&id);
752 let (socket_state, mut restricted) = locked
753 .read_lock_with_and::<crate::lock_ordering::IcmpSocketState<I>, _>(|c| c.right());
754 let mut restricted = restricted.cast_core_ctx();
755 cb(&mut restricted, &id, &socket_state);
756 });
757 }
758}
759
760#[netstack3_macros::instantiate_ip_impl_block(I)]
761impl<I, BC: BindingsContext, L: LockBefore<crate::lock_ordering::IcmpBoundMap<I>>>
762 IcmpEchoBoundStateContext<I, BC> for CoreCtx<'_, BC, L>
763{
764 type IpSocketsCtx<'a> = CoreCtx<'a, BC, WrapLockLevel<crate::lock_ordering::IcmpBoundMap<I>>>;
765 fn with_icmp_ctx_and_sockets_mut<
766 O,
767 F: FnOnce(
768 &mut Self::IpSocketsCtx<'_>,
769 &mut icmp_echo::BoundSockets<I, Self::WeakDeviceId, BC>,
770 ) -> O,
771 >(
772 &mut self,
773 cb: F,
774 ) -> O {
775 let (mut sockets, mut core_ctx) =
776 self.write_lock_and::<crate::lock_ordering::IcmpBoundMap<I>>();
777 cb(&mut core_ctx, &mut sockets)
778 }
779}
780
781impl<I: IpLayerIpExt, BT: BindingsTypes> DelegatedOrderedLockAccess<IpPacketFragmentCache<I, BT>>
782 for StackState<BT>
783{
784 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
785 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
786 self.inner_ip_state()
787 }
788}
789
790impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
791 for crate::lock_ordering::IpStateFragmentCache<I>
792{
793 type Data = IpPacketFragmentCache<I, BT>;
794}
795
796impl<I: IpLayerIpExt, BT: BindingsTypes> DelegatedOrderedLockAccess<PmtuCache<I, BT>>
797 for StackState<BT>
798{
799 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
800 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
801 self.inner_ip_state()
802 }
803}
804
805impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
806 for crate::lock_ordering::IpStateRulesTable<I>
807{
808 type Data = RulesTable<I, DeviceId<BT>, BT>;
809}
810
811impl<I: IpLayerIpExt, BT: BindingsTypes> DelegatedOrderedLockAccess<RulesTable<I, DeviceId<BT>, BT>>
812 for StackState<BT>
813{
814 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
815 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
816 self.inner_ip_state()
817 }
818}
819
820impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
821 for crate::lock_ordering::IpStatePmtuCache<I>
822{
823 type Data = PmtuCache<I, BT>;
824}
825
826impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
827 for crate::lock_ordering::IpStateRoutingTables<I>
828{
829 type Data = HashMap<
830 RoutingTableId<I, DeviceId<BT>, BT>,
831 Primary<BaseRoutingTableState<I, DeviceId<BT>, BT>>,
832 >;
833}
834
835impl<I: IpLayerIpExt, BT: BindingsTypes>
836 DelegatedOrderedLockAccess<
837 HashMap<
838 RoutingTableId<I, DeviceId<BT>, BT>,
839 Primary<BaseRoutingTableState<I, DeviceId<BT>, BT>>,
840 >,
841 > for StackState<BT>
842{
843 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
844 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
845 self.inner_ip_state()
846 }
847}
848
849impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<RoutingTableId<I, DeviceId<BT>, BT>>
850 for crate::lock_ordering::IpStateRoutingTable<I>
851{
852 type Data = RoutingTable<I, DeviceId<BT>>;
853}
854
855impl<I: IpLayerIpExt, BT: BindingsTypes>
856 DelegatedOrderedLockAccess<MulticastForwardingState<I, DeviceId<BT>, BT>> for StackState<BT>
857{
858 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
859 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
860 self.inner_ip_state()
861 }
862}
863
864impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
865 for crate::lock_ordering::IpMulticastForwardingState<I>
866{
867 type Data = MulticastForwardingState<I, DeviceId<BT>, BT>;
868}
869
870impl<I: IpLayerIpExt, BT: BindingsTypes>
871 DelegatedOrderedLockAccess<RawIpSocketMap<I, WeakDeviceId<BT>, BT>> for StackState<BT>
872{
873 type Inner = IpStateInner<I, DeviceId<BT>, BT>;
874 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
875 self.inner_ip_state()
876 }
877}
878
879impl<I: IpLayerIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
880 for crate::lock_ordering::AllRawIpSockets<I>
881{
882 type Data = RawIpSocketMap<I, WeakDeviceId<BT>, BT>;
883}
884
885impl<I: datagram::DualStackIpExt, BT: BindingsTypes>
886 DelegatedOrderedLockAccess<icmp_echo::BoundSockets<I, WeakDeviceId<BT>, BT>>
887 for StackState<BT>
888{
889 type Inner = IcmpSockets<I, WeakDeviceId<BT>, BT>;
890 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
891 &self.transport.icmp_echo_state()
892 }
893}
894
895impl<I: datagram::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
896 for crate::lock_ordering::IcmpBoundMap<I>
897{
898 type Data = icmp_echo::BoundSockets<I, WeakDeviceId<BT>, BT>;
899}
900
901impl<I: datagram::DualStackIpExt, BT: BindingsTypes>
902 DelegatedOrderedLockAccess<IcmpSocketSet<I, WeakDeviceId<BT>, BT>> for StackState<BT>
903{
904 type Inner = IcmpSockets<I, WeakDeviceId<BT>, BT>;
905 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
906 &self.transport.icmp_echo_state()
907 }
908}
909
910impl<I: datagram::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
911 for crate::lock_ordering::IcmpAllSocketsSet<I>
912{
913 type Data = IcmpSocketSet<I, WeakDeviceId<BT>, BT>;
914}
915
916impl<I: datagram::DualStackIpExt, BT: BindingsTypes>
917 DelegatedOrderedLockAccess<IpMarked<I, TokenBucket<BT::Instant>>> for StackState<BT>
918{
919 type Inner = IcmpState<I, BT>;
920 fn delegate_ordered_lock_access(&self) -> &Self::Inner {
921 self.inner_icmp_state()
922 }
923}
924
925impl<I: datagram::DualStackIpExt, BT: BindingsTypes> LockLevelFor<StackState<BT>>
926 for crate::lock_ordering::IcmpTokenBucket<I>
927{
928 type Data = IpMarked<I, TokenBucket<BT::Instant>>;
929}
930
931impl<I: datagram::DualStackIpExt, D: WeakDeviceIdentifier, BT: BindingsTypes>
932 LockLevelFor<IcmpSocketId<I, D, BT>> for crate::lock_ordering::IcmpSocketState<I>
933{
934 type Data = IcmpSocketState<I, D, BT>;
935}