1use core::iter::FromIterator;
2use core::mem::{self, ManuallyDrop};
3use core::ops::{Deref, RangeBounds};
4use core::ptr::NonNull;
5use core::{cmp, fmt, hash, ptr, slice, usize};
6
7use alloc::{
8 alloc::{dealloc, Layout},
9 borrow::Borrow,
10 boxed::Box,
11 string::String,
12 vec::Vec,
13};
14
15use crate::buf::IntoIter;
16#[allow(unused)]
17use crate::loom::sync::atomic::AtomicMut;
18use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
19use crate::{offset_from, Buf, BytesMut};
20
21pub struct Bytes {
103 ptr: *const u8,
104 len: usize,
105 data: AtomicPtr<()>,
107 vtable: &'static Vtable,
108}
109
110pub(crate) struct Vtable {
111 pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
113 pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
117 pub to_mut: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> BytesMut,
118 pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool,
120 pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
122}
123
124impl Bytes {
125 #[inline]
138 #[cfg(not(all(loom, test)))]
139 pub const fn new() -> Self {
140 const EMPTY: &[u8] = &[];
143 Bytes::from_static(EMPTY)
144 }
145
146 #[cfg(all(loom, test))]
148 pub fn new() -> Self {
149 const EMPTY: &[u8] = &[];
150 Bytes::from_static(EMPTY)
151 }
152
153 #[inline]
167 #[cfg(not(all(loom, test)))]
168 pub const fn from_static(bytes: &'static [u8]) -> Self {
169 Bytes {
170 ptr: bytes.as_ptr(),
171 len: bytes.len(),
172 data: AtomicPtr::new(ptr::null_mut()),
173 vtable: &STATIC_VTABLE,
174 }
175 }
176
177 #[cfg(all(loom, test))]
179 pub fn from_static(bytes: &'static [u8]) -> Self {
180 Bytes {
181 ptr: bytes.as_ptr(),
182 len: bytes.len(),
183 data: AtomicPtr::new(ptr::null_mut()),
184 vtable: &STATIC_VTABLE,
185 }
186 }
187
188 fn new_empty_with_ptr(ptr: *const u8) -> Self {
190 debug_assert!(!ptr.is_null());
191
192 let ptr = without_provenance(ptr as usize);
195
196 Bytes {
197 ptr,
198 len: 0,
199 data: AtomicPtr::new(ptr::null_mut()),
200 vtable: &STATIC_VTABLE,
201 }
202 }
203
204 pub fn from_owner<T>(owner: T) -> Self
252 where
253 T: AsRef<[u8]> + Send + 'static,
254 {
255 let owned = Box::into_raw(Box::new(Owned {
271 lifetime: OwnedLifetime {
272 ref_cnt: AtomicUsize::new(1),
273 drop: owned_box_and_drop::<T>,
274 },
275 owner,
276 }));
277
278 let mut ret = Bytes {
279 ptr: NonNull::dangling().as_ptr(),
280 len: 0,
281 data: AtomicPtr::new(owned.cast()),
282 vtable: &OWNED_VTABLE,
283 };
284
285 let buf = unsafe { &*owned }.owner.as_ref();
286 ret.ptr = buf.as_ptr();
287 ret.len = buf.len();
288
289 ret
290 }
291
292 #[inline]
303 pub const fn len(&self) -> usize {
304 self.len
305 }
306
307 #[inline]
318 pub const fn is_empty(&self) -> bool {
319 self.len == 0
320 }
321
322 pub fn is_unique(&self) -> bool {
343 unsafe { (self.vtable.is_unique)(&self.data) }
344 }
345
346 pub fn copy_from_slice(data: &[u8]) -> Self {
348 data.to_vec().into()
349 }
350
351 pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
374 use core::ops::Bound;
375
376 let len = self.len();
377
378 let begin = match range.start_bound() {
379 Bound::Included(&n) => n,
380 Bound::Excluded(&n) => n.checked_add(1).expect("out of range"),
381 Bound::Unbounded => 0,
382 };
383
384 let end = match range.end_bound() {
385 Bound::Included(&n) => n.checked_add(1).expect("out of range"),
386 Bound::Excluded(&n) => n,
387 Bound::Unbounded => len,
388 };
389
390 assert!(
391 begin <= end,
392 "range start must not be greater than end: {:?} <= {:?}",
393 begin,
394 end,
395 );
396 assert!(
397 end <= len,
398 "range end out of bounds: {:?} <= {:?}",
399 end,
400 len,
401 );
402
403 if end == begin {
404 return Bytes::new();
405 }
406
407 let mut ret = self.clone();
408
409 ret.len = end - begin;
410 ret.ptr = unsafe { ret.ptr.add(begin) };
411
412 ret
413 }
414
415 pub fn slice_ref(&self, subset: &[u8]) -> Self {
441 if subset.is_empty() {
444 return Bytes::new();
445 }
446
447 let bytes_p = self.as_ptr() as usize;
448 let bytes_len = self.len();
449
450 let sub_p = subset.as_ptr() as usize;
451 let sub_len = subset.len();
452
453 assert!(
454 sub_p >= bytes_p,
455 "subset pointer ({:p}) is smaller than self pointer ({:p})",
456 subset.as_ptr(),
457 self.as_ptr(),
458 );
459 assert!(
460 sub_p + sub_len <= bytes_p + bytes_len,
461 "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
462 self.as_ptr(),
463 bytes_len,
464 subset.as_ptr(),
465 sub_len,
466 );
467
468 let sub_offset = sub_p - bytes_p;
469
470 self.slice(sub_offset..(sub_offset + sub_len))
471 }
472
473 #[must_use = "consider Bytes::truncate if you don't need the other half"]
499 pub fn split_off(&mut self, at: usize) -> Self {
500 if at == self.len() {
501 return Bytes::new_empty_with_ptr(self.ptr.wrapping_add(at));
502 }
503
504 if at == 0 {
505 return mem::replace(self, Bytes::new_empty_with_ptr(self.ptr));
506 }
507
508 assert!(
509 at <= self.len(),
510 "split_off out of bounds: {:?} <= {:?}",
511 at,
512 self.len(),
513 );
514
515 let mut ret = self.clone();
516
517 self.len = at;
518
519 unsafe { ret.inc_start(at) };
520
521 ret
522 }
523
524 #[must_use = "consider Bytes::advance if you don't need the other half"]
548 pub fn split_to(&mut self, at: usize) -> Self {
549 if at == self.len() {
550 let end_ptr = self.ptr.wrapping_add(at);
551 return mem::replace(self, Bytes::new_empty_with_ptr(end_ptr));
552 }
553
554 if at == 0 {
555 return Bytes::new_empty_with_ptr(self.ptr);
556 }
557
558 assert!(
559 at <= self.len(),
560 "split_to out of bounds: {:?} <= {:?}",
561 at,
562 self.len(),
563 );
564
565 let mut ret = self.clone();
566
567 unsafe { self.inc_start(at) };
568
569 ret.len = at;
570 ret
571 }
572
573 #[inline]
592 pub fn truncate(&mut self, len: usize) {
593 if len < self.len {
594 if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
598 || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
599 {
600 drop(self.split_off(len));
601 } else {
602 self.len = len;
603 }
604 }
605 }
606
607 #[inline]
619 pub fn clear(&mut self) {
620 self.truncate(0);
621 }
622
623 pub fn try_into_mut(self) -> Result<BytesMut, Bytes> {
642 if self.is_unique() {
643 Ok(self.into())
644 } else {
645 Err(self)
646 }
647 }
648
649 #[inline]
650 pub(crate) unsafe fn with_vtable(
651 ptr: *const u8,
652 len: usize,
653 data: AtomicPtr<()>,
654 vtable: &'static Vtable,
655 ) -> Bytes {
656 Bytes {
657 ptr,
658 len,
659 data,
660 vtable,
661 }
662 }
663
664 #[inline]
667 fn as_slice(&self) -> &[u8] {
668 unsafe { slice::from_raw_parts(self.ptr, self.len) }
669 }
670
671 #[inline]
672 unsafe fn inc_start(&mut self, by: usize) {
673 debug_assert!(self.len >= by, "internal: inc_start out of bounds");
675 self.len -= by;
676 self.ptr = self.ptr.add(by);
677 }
678}
679
680unsafe impl Send for Bytes {}
682unsafe impl Sync for Bytes {}
683
684impl Drop for Bytes {
685 #[inline]
686 fn drop(&mut self) {
687 unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
688 }
689}
690
691impl Clone for Bytes {
692 #[inline]
693 fn clone(&self) -> Bytes {
694 unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
695 }
696}
697
698impl Buf for Bytes {
699 #[inline]
700 fn remaining(&self) -> usize {
701 self.len()
702 }
703
704 #[inline]
705 fn chunk(&self) -> &[u8] {
706 self.as_slice()
707 }
708
709 #[inline]
710 fn advance(&mut self, cnt: usize) {
711 assert!(
712 cnt <= self.len(),
713 "cannot advance past `remaining`: {:?} <= {:?}",
714 cnt,
715 self.len(),
716 );
717
718 unsafe {
719 self.inc_start(cnt);
720 }
721 }
722
723 fn copy_to_bytes(&mut self, len: usize) -> Self {
724 self.split_to(len)
725 }
726}
727
728impl Deref for Bytes {
729 type Target = [u8];
730
731 #[inline]
732 fn deref(&self) -> &[u8] {
733 self.as_slice()
734 }
735}
736
737impl AsRef<[u8]> for Bytes {
738 #[inline]
739 fn as_ref(&self) -> &[u8] {
740 self.as_slice()
741 }
742}
743
744impl hash::Hash for Bytes {
745 fn hash<H>(&self, state: &mut H)
746 where
747 H: hash::Hasher,
748 {
749 self.as_slice().hash(state);
750 }
751}
752
753impl Borrow<[u8]> for Bytes {
754 fn borrow(&self) -> &[u8] {
755 self.as_slice()
756 }
757}
758
759impl IntoIterator for Bytes {
760 type Item = u8;
761 type IntoIter = IntoIter<Bytes>;
762
763 fn into_iter(self) -> Self::IntoIter {
764 IntoIter::new(self)
765 }
766}
767
768impl<'a> IntoIterator for &'a Bytes {
769 type Item = &'a u8;
770 type IntoIter = core::slice::Iter<'a, u8>;
771
772 fn into_iter(self) -> Self::IntoIter {
773 self.as_slice().iter()
774 }
775}
776
777impl FromIterator<u8> for Bytes {
778 fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
779 Vec::from_iter(into_iter).into()
780 }
781}
782
783impl PartialEq for Bytes {
786 fn eq(&self, other: &Bytes) -> bool {
787 self.as_slice() == other.as_slice()
788 }
789}
790
791impl PartialOrd for Bytes {
792 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
793 self.as_slice().partial_cmp(other.as_slice())
794 }
795}
796
797impl Ord for Bytes {
798 fn cmp(&self, other: &Bytes) -> cmp::Ordering {
799 self.as_slice().cmp(other.as_slice())
800 }
801}
802
803impl Eq for Bytes {}
804
805impl PartialEq<[u8]> for Bytes {
806 fn eq(&self, other: &[u8]) -> bool {
807 self.as_slice() == other
808 }
809}
810
811impl PartialOrd<[u8]> for Bytes {
812 fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
813 self.as_slice().partial_cmp(other)
814 }
815}
816
817impl PartialEq<Bytes> for [u8] {
818 fn eq(&self, other: &Bytes) -> bool {
819 *other == *self
820 }
821}
822
823impl PartialOrd<Bytes> for [u8] {
824 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
825 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
826 }
827}
828
829impl PartialEq<str> for Bytes {
830 fn eq(&self, other: &str) -> bool {
831 self.as_slice() == other.as_bytes()
832 }
833}
834
835impl PartialOrd<str> for Bytes {
836 fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
837 self.as_slice().partial_cmp(other.as_bytes())
838 }
839}
840
841impl PartialEq<Bytes> for str {
842 fn eq(&self, other: &Bytes) -> bool {
843 *other == *self
844 }
845}
846
847impl PartialOrd<Bytes> for str {
848 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
849 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
850 }
851}
852
853impl PartialEq<Vec<u8>> for Bytes {
854 fn eq(&self, other: &Vec<u8>) -> bool {
855 *self == other[..]
856 }
857}
858
859impl PartialOrd<Vec<u8>> for Bytes {
860 fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
861 self.as_slice().partial_cmp(&other[..])
862 }
863}
864
865impl PartialEq<Bytes> for Vec<u8> {
866 fn eq(&self, other: &Bytes) -> bool {
867 *other == *self
868 }
869}
870
871impl PartialOrd<Bytes> for Vec<u8> {
872 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
873 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
874 }
875}
876
877impl PartialEq<String> for Bytes {
878 fn eq(&self, other: &String) -> bool {
879 *self == other[..]
880 }
881}
882
883impl PartialOrd<String> for Bytes {
884 fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
885 self.as_slice().partial_cmp(other.as_bytes())
886 }
887}
888
889impl PartialEq<Bytes> for String {
890 fn eq(&self, other: &Bytes) -> bool {
891 *other == *self
892 }
893}
894
895impl PartialOrd<Bytes> for String {
896 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
897 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
898 }
899}
900
901impl PartialEq<Bytes> for &[u8] {
902 fn eq(&self, other: &Bytes) -> bool {
903 *other == *self
904 }
905}
906
907impl PartialOrd<Bytes> for &[u8] {
908 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
909 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
910 }
911}
912
913impl PartialEq<Bytes> for &str {
914 fn eq(&self, other: &Bytes) -> bool {
915 *other == *self
916 }
917}
918
919impl PartialOrd<Bytes> for &str {
920 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
921 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
922 }
923}
924
925impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
926where
927 Bytes: PartialEq<T>,
928{
929 fn eq(&self, other: &&'a T) -> bool {
930 *self == **other
931 }
932}
933
934impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
935where
936 Bytes: PartialOrd<T>,
937{
938 fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
939 self.partial_cmp(&**other)
940 }
941}
942
943impl Default for Bytes {
946 #[inline]
947 fn default() -> Bytes {
948 Bytes::new()
949 }
950}
951
952impl From<&'static [u8]> for Bytes {
953 fn from(slice: &'static [u8]) -> Bytes {
954 Bytes::from_static(slice)
955 }
956}
957
958impl From<&'static str> for Bytes {
959 fn from(slice: &'static str) -> Bytes {
960 Bytes::from_static(slice.as_bytes())
961 }
962}
963
964impl From<Vec<u8>> for Bytes {
965 fn from(vec: Vec<u8>) -> Bytes {
966 let mut vec = ManuallyDrop::new(vec);
967 let ptr = vec.as_mut_ptr();
968 let len = vec.len();
969 let cap = vec.capacity();
970
971 if len == cap {
973 let vec = ManuallyDrop::into_inner(vec);
974 return Bytes::from(vec.into_boxed_slice());
975 }
976
977 let shared = Box::new(Shared {
978 buf: ptr,
979 cap,
980 ref_cnt: AtomicUsize::new(1),
981 });
982
983 let shared = Box::into_raw(shared);
984 debug_assert!(
987 0 == (shared as usize & KIND_MASK),
988 "internal: Box<Shared> should have an aligned pointer",
989 );
990 Bytes {
991 ptr,
992 len,
993 data: AtomicPtr::new(shared as _),
994 vtable: &SHARED_VTABLE,
995 }
996 }
997}
998
999impl From<Box<[u8]>> for Bytes {
1000 fn from(slice: Box<[u8]>) -> Bytes {
1001 if slice.is_empty() {
1005 return Bytes::new();
1006 }
1007
1008 let len = slice.len();
1009 let ptr = Box::into_raw(slice) as *mut u8;
1010
1011 if ptr as usize & 0x1 == 0 {
1012 let data = ptr_map(ptr, |addr| addr | KIND_VEC);
1013 Bytes {
1014 ptr,
1015 len,
1016 data: AtomicPtr::new(data.cast()),
1017 vtable: &PROMOTABLE_EVEN_VTABLE,
1018 }
1019 } else {
1020 Bytes {
1021 ptr,
1022 len,
1023 data: AtomicPtr::new(ptr.cast()),
1024 vtable: &PROMOTABLE_ODD_VTABLE,
1025 }
1026 }
1027 }
1028}
1029
1030impl From<Bytes> for BytesMut {
1031 fn from(bytes: Bytes) -> Self {
1047 let bytes = ManuallyDrop::new(bytes);
1048 unsafe { (bytes.vtable.to_mut)(&bytes.data, bytes.ptr, bytes.len) }
1049 }
1050}
1051
1052impl From<String> for Bytes {
1053 fn from(s: String) -> Bytes {
1054 Bytes::from(s.into_bytes())
1055 }
1056}
1057
1058impl From<Bytes> for Vec<u8> {
1059 fn from(bytes: Bytes) -> Vec<u8> {
1060 let bytes = ManuallyDrop::new(bytes);
1061 unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) }
1062 }
1063}
1064
1065impl fmt::Debug for Vtable {
1068 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1069 f.debug_struct("Vtable")
1070 .field("clone", &(self.clone as *const ()))
1071 .field("drop", &(self.drop as *const ()))
1072 .finish()
1073 }
1074}
1075
1076const STATIC_VTABLE: Vtable = Vtable {
1079 clone: static_clone,
1080 to_vec: static_to_vec,
1081 to_mut: static_to_mut,
1082 is_unique: static_is_unique,
1083 drop: static_drop,
1084};
1085
1086unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1087 let slice = slice::from_raw_parts(ptr, len);
1088 Bytes::from_static(slice)
1089}
1090
1091unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1092 let slice = slice::from_raw_parts(ptr, len);
1093 slice.to_vec()
1094}
1095
1096unsafe fn static_to_mut(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1097 let slice = slice::from_raw_parts(ptr, len);
1098 BytesMut::from(slice)
1099}
1100
1101fn static_is_unique(_: &AtomicPtr<()>) -> bool {
1102 false
1103}
1104
1105unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
1106 }
1108
1109#[repr(C)]
1112struct OwnedLifetime {
1113 ref_cnt: AtomicUsize,
1114 drop: unsafe fn(*mut ()),
1115}
1116
1117#[repr(C)]
1118struct Owned<T> {
1119 lifetime: OwnedLifetime,
1120 owner: T,
1121}
1122
1123unsafe fn owned_box_and_drop<T>(ptr: *mut ()) {
1124 let b: Box<Owned<T>> = Box::from_raw(ptr as _);
1125 drop(b);
1126}
1127
1128unsafe fn owned_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1129 let owned = data.load(Ordering::Relaxed);
1130 let ref_cnt = &(*owned.cast::<OwnedLifetime>()).ref_cnt;
1131 let old_cnt = ref_cnt.fetch_add(1, Ordering::Relaxed);
1132 if old_cnt > usize::MAX >> 1 {
1133 crate::abort()
1134 }
1135
1136 Bytes {
1137 ptr,
1138 len,
1139 data: AtomicPtr::new(owned as _),
1140 vtable: &OWNED_VTABLE,
1141 }
1142}
1143
1144unsafe fn owned_to_vec(_data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1145 let slice = slice::from_raw_parts(ptr, len);
1146 slice.to_vec()
1147}
1148
1149unsafe fn owned_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1150 let bytes_mut = BytesMut::from_vec(owned_to_vec(data, ptr, len));
1151 owned_drop_impl(data.load(Ordering::Relaxed));
1152 bytes_mut
1153}
1154
1155unsafe fn owned_is_unique(_data: &AtomicPtr<()>) -> bool {
1156 false
1157}
1158
1159unsafe fn owned_drop_impl(owned: *mut ()) {
1160 let lifetime = owned.cast::<OwnedLifetime>();
1161 let ref_cnt = &(*lifetime).ref_cnt;
1162
1163 let old_cnt = ref_cnt.fetch_sub(1, Ordering::Release);
1164 if old_cnt != 1 {
1165 return;
1166 }
1167 ref_cnt.load(Ordering::Acquire);
1168
1169 let drop_fn = &(*lifetime).drop;
1170 drop_fn(owned)
1171}
1172
1173unsafe fn owned_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1174 let owned = data.load(Ordering::Relaxed);
1175 owned_drop_impl(owned);
1176}
1177
1178static OWNED_VTABLE: Vtable = Vtable {
1179 clone: owned_clone,
1180 to_vec: owned_to_vec,
1181 to_mut: owned_to_mut,
1182 is_unique: owned_is_unique,
1183 drop: owned_drop,
1184};
1185
1186static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
1189 clone: promotable_even_clone,
1190 to_vec: promotable_even_to_vec,
1191 to_mut: promotable_even_to_mut,
1192 is_unique: promotable_is_unique,
1193 drop: promotable_even_drop,
1194};
1195
1196static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
1197 clone: promotable_odd_clone,
1198 to_vec: promotable_odd_to_vec,
1199 to_mut: promotable_odd_to_mut,
1200 is_unique: promotable_is_unique,
1201 drop: promotable_odd_drop,
1202};
1203
1204unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1205 let shared = data.load(Ordering::Acquire);
1206 let kind = shared as usize & KIND_MASK;
1207
1208 if kind == KIND_ARC {
1209 shallow_clone_arc(shared.cast(), ptr, len)
1210 } else {
1211 debug_assert_eq!(kind, KIND_VEC);
1212 let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1213 shallow_clone_vec(data, shared, buf, ptr, len)
1214 }
1215}
1216
1217unsafe fn promotable_to_vec(
1218 data: &AtomicPtr<()>,
1219 ptr: *const u8,
1220 len: usize,
1221 f: fn(*mut ()) -> *mut u8,
1222) -> Vec<u8> {
1223 let shared = data.load(Ordering::Acquire);
1224 let kind = shared as usize & KIND_MASK;
1225
1226 if kind == KIND_ARC {
1227 shared_to_vec_impl(shared.cast(), ptr, len)
1228 } else {
1229 debug_assert_eq!(kind, KIND_VEC);
1231
1232 let buf = f(shared);
1233
1234 let cap = offset_from(ptr, buf) + len;
1235
1236 ptr::copy(ptr, buf, len);
1238
1239 Vec::from_raw_parts(buf, len, cap)
1240 }
1241}
1242
1243unsafe fn promotable_to_mut(
1244 data: &AtomicPtr<()>,
1245 ptr: *const u8,
1246 len: usize,
1247 f: fn(*mut ()) -> *mut u8,
1248) -> BytesMut {
1249 let shared = data.load(Ordering::Acquire);
1250 let kind = shared as usize & KIND_MASK;
1251
1252 if kind == KIND_ARC {
1253 shared_to_mut_impl(shared.cast(), ptr, len)
1254 } else {
1255 debug_assert_eq!(kind, KIND_VEC);
1260
1261 let buf = f(shared);
1262 let off = offset_from(ptr, buf);
1263 let cap = off + len;
1264 let v = Vec::from_raw_parts(buf, cap, cap);
1265
1266 let mut b = BytesMut::from_vec(v);
1267 b.advance_unchecked(off);
1268 b
1269 }
1270}
1271
1272unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1273 promotable_to_vec(data, ptr, len, |shared| {
1274 ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1275 })
1276}
1277
1278unsafe fn promotable_even_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1279 promotable_to_mut(data, ptr, len, |shared| {
1280 ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1281 })
1282}
1283
1284unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1285 data.with_mut(|shared| {
1286 let shared = *shared;
1287 let kind = shared as usize & KIND_MASK;
1288
1289 if kind == KIND_ARC {
1290 release_shared(shared.cast());
1291 } else {
1292 debug_assert_eq!(kind, KIND_VEC);
1293 let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1294 free_boxed_slice(buf, ptr, len);
1295 }
1296 });
1297}
1298
1299unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1300 let shared = data.load(Ordering::Acquire);
1301 let kind = shared as usize & KIND_MASK;
1302
1303 if kind == KIND_ARC {
1304 shallow_clone_arc(shared as _, ptr, len)
1305 } else {
1306 debug_assert_eq!(kind, KIND_VEC);
1307 shallow_clone_vec(data, shared, shared.cast(), ptr, len)
1308 }
1309}
1310
1311unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1312 promotable_to_vec(data, ptr, len, |shared| shared.cast())
1313}
1314
1315unsafe fn promotable_odd_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1316 promotable_to_mut(data, ptr, len, |shared| shared.cast())
1317}
1318
1319unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1320 data.with_mut(|shared| {
1321 let shared = *shared;
1322 let kind = shared as usize & KIND_MASK;
1323
1324 if kind == KIND_ARC {
1325 release_shared(shared.cast());
1326 } else {
1327 debug_assert_eq!(kind, KIND_VEC);
1328
1329 free_boxed_slice(shared.cast(), ptr, len);
1330 }
1331 });
1332}
1333
1334unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool {
1335 let shared = data.load(Ordering::Acquire);
1336 let kind = shared as usize & KIND_MASK;
1337
1338 if kind == KIND_ARC {
1339 let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1340 ref_cnt == 1
1341 } else {
1342 true
1343 }
1344}
1345
1346unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
1347 let cap = offset_from(offset, buf) + len;
1348 dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
1349}
1350
1351struct Shared {
1354 buf: *mut u8,
1356 cap: usize,
1357 ref_cnt: AtomicUsize,
1358}
1359
1360impl Drop for Shared {
1361 fn drop(&mut self) {
1362 unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
1363 }
1364}
1365
1366const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; static SHARED_VTABLE: Vtable = Vtable {
1373 clone: shared_clone,
1374 to_vec: shared_to_vec,
1375 to_mut: shared_to_mut,
1376 is_unique: shared_is_unique,
1377 drop: shared_drop,
1378};
1379
1380const KIND_ARC: usize = 0b0;
1381const KIND_VEC: usize = 0b1;
1382const KIND_MASK: usize = 0b1;
1383
1384unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1385 let shared = data.load(Ordering::Relaxed);
1386 shallow_clone_arc(shared as _, ptr, len)
1387}
1388
1389unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
1390 if (*shared)
1397 .ref_cnt
1398 .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
1399 .is_ok()
1400 {
1401 let shared = *Box::from_raw(shared);
1403 let shared = ManuallyDrop::new(shared);
1404 let buf = shared.buf;
1405 let cap = shared.cap;
1406
1407 ptr::copy(ptr, buf, len);
1409
1410 Vec::from_raw_parts(buf, len, cap)
1411 } else {
1412 let v = slice::from_raw_parts(ptr, len).to_vec();
1413 release_shared(shared);
1414 v
1415 }
1416}
1417
1418unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1419 shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1420}
1421
1422unsafe fn shared_to_mut_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> BytesMut {
1423 if (*shared).ref_cnt.load(Ordering::Acquire) == 1 {
1436 let shared = *Box::from_raw(shared);
1438 let shared = ManuallyDrop::new(shared);
1439 let buf = shared.buf;
1440 let cap = shared.cap;
1441
1442 let off = offset_from(ptr, buf);
1444 let v = Vec::from_raw_parts(buf, len + off, cap);
1445
1446 let mut b = BytesMut::from_vec(v);
1447 b.advance_unchecked(off);
1448 b
1449 } else {
1450 let v = slice::from_raw_parts(ptr, len).to_vec();
1452 release_shared(shared);
1453 BytesMut::from_vec(v)
1454 }
1455}
1456
1457unsafe fn shared_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1458 shared_to_mut_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1459}
1460
1461pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool {
1462 let shared = data.load(Ordering::Acquire);
1463 let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1464 ref_cnt == 1
1465}
1466
1467unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1468 data.with_mut(|shared| {
1469 release_shared(shared.cast());
1470 });
1471}
1472
1473unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
1474 let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
1475
1476 if old_size > usize::MAX >> 1 {
1477 crate::abort();
1478 }
1479
1480 Bytes {
1481 ptr,
1482 len,
1483 data: AtomicPtr::new(shared as _),
1484 vtable: &SHARED_VTABLE,
1485 }
1486}
1487
1488#[cold]
1489unsafe fn shallow_clone_vec(
1490 atom: &AtomicPtr<()>,
1491 ptr: *const (),
1492 buf: *mut u8,
1493 offset: *const u8,
1494 len: usize,
1495) -> Bytes {
1496 let shared = Box::new(Shared {
1508 buf,
1509 cap: offset_from(offset, buf) + len,
1510 ref_cnt: AtomicUsize::new(2),
1514 });
1515
1516 let shared = Box::into_raw(shared);
1517
1518 debug_assert!(
1521 0 == (shared as usize & KIND_MASK),
1522 "internal: Box<Shared> should have an aligned pointer",
1523 );
1524
1525 match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
1535 Ok(actual) => {
1536 debug_assert!(actual as usize == ptr as usize);
1537 Bytes {
1540 ptr: offset,
1541 len,
1542 data: AtomicPtr::new(shared as _),
1543 vtable: &SHARED_VTABLE,
1544 }
1545 }
1546 Err(actual) => {
1547 let shared = Box::from_raw(shared);
1551 mem::forget(*shared);
1552
1553 shallow_clone_arc(actual as _, offset, len)
1556 }
1557 }
1558}
1559
1560unsafe fn release_shared(ptr: *mut Shared) {
1561 if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
1563 return;
1564 }
1565
1566 (*ptr).ref_cnt.load(Ordering::Acquire);
1587
1588 drop(Box::from_raw(ptr));
1590}
1591
1592#[cfg(miri)]
1599fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1600where
1601 F: FnOnce(usize) -> usize,
1602{
1603 let old_addr = ptr as usize;
1604 let new_addr = f(old_addr);
1605 let diff = new_addr.wrapping_sub(old_addr);
1606 ptr.wrapping_add(diff)
1607}
1608
1609#[cfg(not(miri))]
1610fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1611where
1612 F: FnOnce(usize) -> usize,
1613{
1614 let old_addr = ptr as usize;
1615 let new_addr = f(old_addr);
1616 new_addr as *mut u8
1617}
1618
1619fn without_provenance(ptr: usize) -> *const u8 {
1620 core::ptr::null::<u8>().wrapping_add(ptr)
1621}
1622
1623fn _split_to_must_use() {}
1634
1635fn _split_off_must_use() {}
1644
1645#[cfg(all(test, loom))]
1647mod fuzz {
1648 use loom::sync::Arc;
1649 use loom::thread;
1650
1651 use super::Bytes;
1652 #[test]
1653 fn bytes_cloning_vec() {
1654 loom::model(|| {
1655 let a = Bytes::from(b"abcdefgh".to_vec());
1656 let addr = a.as_ptr() as usize;
1657
1658 let a1 = Arc::new(a);
1660 let a2 = a1.clone();
1661
1662 let t1 = thread::spawn(move || {
1663 let b: Bytes = (*a1).clone();
1664 assert_eq!(b.as_ptr() as usize, addr);
1665 });
1666
1667 let t2 = thread::spawn(move || {
1668 let b: Bytes = (*a2).clone();
1669 assert_eq!(b.as_ptr() as usize, addr);
1670 });
1671
1672 t1.join().unwrap();
1673 t2.join().unwrap();
1674 });
1675 }
1676}