fidl_next_protocol/fuchsia/
channel.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! A transport implementation which uses Zircon channels.
6
7use core::mem::replace;
8use core::pin::Pin;
9use core::ptr::NonNull;
10use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
11use core::task::{Context, Poll};
12use std::sync::Arc;
13
14use fidl_next_codec::decoder::InternalHandleDecoder;
15use fidl_next_codec::encoder::InternalHandleEncoder;
16use fidl_next_codec::fuchsia::{HandleDecoder, HandleEncoder};
17use fidl_next_codec::{Chunk, DecodeError, Decoder, EncodeError, Encoder, CHUNK_SIZE};
18use fuchsia_async::{RWHandle, ReadableHandle as _};
19use futures::task::AtomicWaker;
20use zx::sys::{
21    zx_channel_read, zx_channel_write, ZX_ERR_BUFFER_TOO_SMALL, ZX_ERR_PEER_CLOSED,
22    ZX_ERR_SHOULD_WAIT, ZX_OK,
23};
24use zx::{AsHandleRef as _, Channel, Handle, Status};
25
26use crate::Transport;
27
28struct Shared {
29    is_closed: AtomicBool,
30    sender_count: AtomicUsize,
31    closed_waker: AtomicWaker,
32    channel: RWHandle<Channel>,
33    // TODO: recycle send/recv buffers to reduce allocations
34}
35
36impl Shared {
37    fn new(channel: Channel) -> Self {
38        Self {
39            is_closed: AtomicBool::new(false),
40            sender_count: AtomicUsize::new(1),
41            closed_waker: AtomicWaker::new(),
42            channel: RWHandle::new(channel),
43        }
44    }
45
46    fn close(&self) {
47        self.is_closed.store(true, Ordering::Relaxed);
48        self.closed_waker.wake();
49    }
50}
51
52/// A channel sender.
53pub struct Sender {
54    shared: Arc<Shared>,
55}
56
57impl Drop for Sender {
58    fn drop(&mut self) {
59        let senders = self.shared.sender_count.fetch_sub(1, Ordering::Relaxed);
60        if senders == 1 {
61            self.shared.close();
62        }
63    }
64}
65
66impl Clone for Sender {
67    fn clone(&self) -> Self {
68        self.shared.sender_count.fetch_add(1, Ordering::Relaxed);
69        Self { shared: self.shared.clone() }
70    }
71}
72
73/// A channel buffer.
74pub struct Buffer {
75    handles: Vec<Handle>,
76    chunks: Vec<Chunk>,
77}
78
79impl Buffer {
80    fn new() -> Self {
81        Self { handles: Vec::new(), chunks: Vec::new() }
82    }
83}
84
85impl InternalHandleEncoder for Buffer {
86    #[inline]
87    fn __internal_handle_count(&self) -> usize {
88        self.handles.len()
89    }
90}
91
92impl Encoder for Buffer {
93    #[inline]
94    fn bytes_written(&self) -> usize {
95        Encoder::bytes_written(&self.chunks)
96    }
97
98    #[inline]
99    fn write_zeroes(&mut self, len: usize) {
100        Encoder::write_zeroes(&mut self.chunks, len)
101    }
102
103    #[inline]
104    fn write(&mut self, bytes: &[u8]) {
105        Encoder::write(&mut self.chunks, bytes)
106    }
107
108    #[inline]
109    fn rewrite(&mut self, pos: usize, bytes: &[u8]) {
110        Encoder::rewrite(&mut self.chunks, pos, bytes)
111    }
112}
113
114impl HandleEncoder for Buffer {
115    fn push_handle(&mut self, handle: Handle) -> Result<(), EncodeError> {
116        self.handles.push(handle);
117        Ok(())
118    }
119
120    fn handles_pushed(&self) -> usize {
121        self.handles.len()
122    }
123}
124
125/// The state for a channel send future.
126pub struct SendFutureState {
127    buffer: Buffer,
128}
129
130/// A channel receiver.
131pub struct Receiver {
132    shared: Arc<Shared>,
133}
134
135/// The state for a channel receive future.
136pub struct RecvFutureState {
137    buffer: Option<Buffer>,
138}
139
140/// A channel receive buffer.
141pub struct RecvBuffer {
142    buffer: Buffer,
143    chunks_taken: usize,
144    handles_taken: usize,
145}
146
147unsafe impl Decoder for RecvBuffer {
148    fn take_chunks_raw(&mut self, count: usize) -> Result<NonNull<Chunk>, DecodeError> {
149        if count > self.buffer.chunks.len() - self.chunks_taken {
150            return Err(DecodeError::InsufficientData);
151        }
152
153        let chunks = unsafe { self.buffer.chunks.as_mut_ptr().add(self.chunks_taken) };
154        self.chunks_taken += count;
155
156        unsafe { Ok(NonNull::new_unchecked(chunks)) }
157    }
158
159    fn finish(&mut self) -> Result<(), DecodeError> {
160        if self.chunks_taken != self.buffer.chunks.len() {
161            return Err(DecodeError::ExtraBytes {
162                num_extra: (self.buffer.chunks.len() - self.chunks_taken) * CHUNK_SIZE,
163            });
164        }
165
166        if self.handles_taken != self.buffer.handles.len() {
167            return Err(DecodeError::ExtraHandles {
168                num_extra: self.buffer.handles.len() - self.handles_taken,
169            });
170        }
171
172        Ok(())
173    }
174}
175
176impl InternalHandleDecoder for RecvBuffer {
177    fn __internal_take_handles(&mut self, count: usize) -> Result<(), DecodeError> {
178        if count > self.buffer.handles.len() - self.handles_taken {
179            return Err(DecodeError::InsufficientHandles);
180        }
181
182        for i in self.handles_taken..self.handles_taken + count {
183            let handle = replace(&mut self.buffer.handles[i], Handle::invalid());
184            drop(handle);
185        }
186        self.handles_taken += count;
187
188        Ok(())
189    }
190
191    fn __internal_handles_remaining(&self) -> usize {
192        self.buffer.handles.len() - self.handles_taken
193    }
194}
195
196impl HandleDecoder for RecvBuffer {
197    fn take_handle(&mut self) -> Result<Handle, DecodeError> {
198        if self.handles_taken >= self.buffer.handles.len() {
199            return Err(DecodeError::InsufficientHandles);
200        }
201
202        let handle = replace(&mut self.buffer.handles[self.handles_taken], Handle::invalid());
203        self.handles_taken += 1;
204
205        Ok(handle)
206    }
207
208    fn handles_remaining(&mut self) -> usize {
209        self.buffer.handles.len() - self.handles_taken
210    }
211}
212
213impl Transport for Channel {
214    type Error = Status;
215
216    fn split(self) -> (Self::Sender, Self::Receiver) {
217        let shared = Arc::new(Shared::new(self));
218        (Sender { shared: shared.clone() }, Receiver { shared })
219    }
220
221    type Sender = Sender;
222    type SendBuffer = Buffer;
223    type SendFutureState = SendFutureState;
224
225    fn acquire(_: &Self::Sender) -> Self::SendBuffer {
226        Buffer::new()
227    }
228
229    fn begin_send(_: &Self::Sender, buffer: Self::SendBuffer) -> Self::SendFutureState {
230        SendFutureState { buffer }
231    }
232
233    fn poll_send(
234        mut future_state: Pin<&mut Self::SendFutureState>,
235        _: &mut Context<'_>,
236        sender: &Self::Sender,
237    ) -> Poll<Result<(), Self::Error>> {
238        let result = unsafe {
239            zx_channel_write(
240                sender.shared.channel.get_ref().raw_handle(),
241                0,
242                future_state.buffer.chunks.as_ptr().cast::<u8>(),
243                (future_state.buffer.chunks.len() * CHUNK_SIZE) as u32,
244                future_state.buffer.handles.as_ptr().cast(),
245                future_state.buffer.handles.len() as u32,
246            )
247        };
248
249        if result == ZX_OK {
250            // Handles were written to the channel, so we must not drop them.
251            unsafe {
252                future_state.buffer.handles.set_len(0);
253            }
254            Poll::Ready(Ok(()))
255        } else {
256            Poll::Ready(Err(Status::from_raw(result)))
257        }
258    }
259
260    fn close(sender: &Self::Sender) {
261        sender.shared.close();
262    }
263
264    type Receiver = Receiver;
265    type RecvFutureState = RecvFutureState;
266    type RecvBuffer = RecvBuffer;
267
268    fn begin_recv(_: &mut Self::Receiver) -> Self::RecvFutureState {
269        RecvFutureState { buffer: Some(Buffer::new()) }
270    }
271
272    fn poll_recv(
273        mut future_state: Pin<&mut Self::RecvFutureState>,
274        cx: &mut Context<'_>,
275        receiver: &mut Self::Receiver,
276    ) -> Poll<Result<Option<Self::RecvBuffer>, Self::Error>> {
277        let buffer = future_state.buffer.as_mut().unwrap();
278
279        let mut actual_bytes = 0;
280        let mut actual_handles = 0;
281
282        loop {
283            let result = unsafe {
284                zx_channel_read(
285                    receiver.shared.channel.get_ref().raw_handle(),
286                    0,
287                    buffer.chunks.as_mut_ptr().cast(),
288                    buffer.handles.as_mut_ptr().cast(),
289                    (buffer.chunks.capacity() * CHUNK_SIZE) as u32,
290                    buffer.handles.capacity() as u32,
291                    &mut actual_bytes,
292                    &mut actual_handles,
293                )
294            };
295
296            match result {
297                ZX_OK => {
298                    unsafe {
299                        buffer.chunks.set_len(actual_bytes as usize / CHUNK_SIZE);
300                        buffer.handles.set_len(actual_handles as usize);
301                    }
302                    return Poll::Ready(Ok(Some(RecvBuffer {
303                        buffer: future_state.buffer.take().unwrap(),
304                        chunks_taken: 0,
305                        handles_taken: 0,
306                    })));
307                }
308                ZX_ERR_PEER_CLOSED => return Poll::Ready(Ok(None)),
309                ZX_ERR_BUFFER_TOO_SMALL => {
310                    let min_chunks = (actual_bytes as usize).div_ceil(CHUNK_SIZE);
311                    buffer.chunks.reserve(min_chunks - buffer.chunks.capacity());
312                    buffer.handles.reserve(actual_handles as usize - buffer.handles.capacity());
313                }
314                ZX_ERR_SHOULD_WAIT => {
315                    if matches!(receiver.shared.channel.need_readable(cx)?, Poll::Pending) {
316                        receiver.shared.closed_waker.register(cx.waker());
317                        if receiver.shared.is_closed.load(Ordering::Relaxed) {
318                            return Poll::Ready(Ok(None));
319                        }
320                        return Poll::Pending;
321                    }
322                }
323                raw => return Poll::Ready(Err(Status::from_raw(raw))),
324            }
325        }
326    }
327}
328
329#[cfg(test)]
330mod tests {
331    use fuchsia_async as fasync;
332    use zx::Channel;
333
334    use crate::testing::transport::*;
335
336    #[fasync::run_singlethreaded(test)]
337    async fn close_on_drop() {
338        let (client_end, server_end) = Channel::create();
339        test_close_on_drop(client_end, server_end).await;
340    }
341
342    #[fasync::run_singlethreaded(test)]
343    async fn one_way() {
344        let (client_end, server_end) = Channel::create();
345        test_one_way(client_end, server_end).await;
346    }
347
348    #[fasync::run_singlethreaded(test)]
349    async fn two_way() {
350        let (client_end, server_end) = Channel::create();
351        test_two_way(client_end, server_end).await;
352    }
353
354    #[fasync::run_singlethreaded(test)]
355    async fn multiple_two_way() {
356        let (client_end, server_end) = Channel::create();
357        test_multiple_two_way(client_end, server_end).await;
358    }
359
360    #[fasync::run_singlethreaded(test)]
361    async fn event() {
362        let (client_end, server_end) = Channel::create();
363        test_event(client_end, server_end).await;
364    }
365}