mirror of
https://github.com/ntex-rs/ntex.git
synced 2025-04-04 13:27:39 +03:00
proper use of NonNull
This commit is contained in:
parent
ace681ba74
commit
709cd3216b
5 changed files with 76 additions and 65 deletions
|
@ -1,6 +1,6 @@
|
||||||
# Changes
|
# Changes
|
||||||
|
|
||||||
## 0.1.3 (2021-06-27)
|
## 0.1.4 (2021-06-27)
|
||||||
|
|
||||||
* Reduce size of Option<Bytes> by using NonNull
|
* Reduce size of Option<Bytes> by using NonNull
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "ntex-bytes"
|
name = "ntex-bytes"
|
||||||
version = "0.1.3"
|
version = "0.1.4"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
authors = ["Carl Lerche <me@carllerche.com>"]
|
authors = ["Carl Lerche <me@carllerche.com>"]
|
||||||
description = "Types and traits for working with bytes (bytes crate fork)"
|
description = "Types and traits for working with bytes (bytes crate fork)"
|
||||||
|
|
|
@ -296,8 +296,8 @@ pub struct BytesMut {
|
||||||
struct Inner {
|
struct Inner {
|
||||||
// WARNING: Do not access the fields directly unless you know what you are
|
// WARNING: Do not access the fields directly unless you know what you are
|
||||||
// doing. Instead, use the fns. See implementation comment above.
|
// doing. Instead, use the fns. See implementation comment above.
|
||||||
arc: AtomicPtr<Shared>,
|
arc: NonNull<Shared>,
|
||||||
ptr: NonNull<u8>,
|
ptr: *mut u8,
|
||||||
len: usize,
|
len: usize,
|
||||||
cap: usize,
|
cap: usize,
|
||||||
}
|
}
|
||||||
|
@ -307,10 +307,10 @@ struct Inner {
|
||||||
struct Inner {
|
struct Inner {
|
||||||
// WARNING: Do not access the fields directly unless you know what you are
|
// WARNING: Do not access the fields directly unless you know what you are
|
||||||
// doing. Instead, use the fns. See implementation comment above.
|
// doing. Instead, use the fns. See implementation comment above.
|
||||||
ptr: NonNull<u8>,
|
ptr: *mut u8,
|
||||||
len: usize,
|
len: usize,
|
||||||
cap: usize,
|
cap: usize,
|
||||||
arc: AtomicPtr<Shared>,
|
arc: NonNull<Shared>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Thread-safe reference-counted container for the shared storage. This mostly
|
// Thread-safe reference-counted container for the shared storage. This mostly
|
||||||
|
@ -1756,7 +1756,7 @@ impl<'a> From<&'a [u8]> for BytesMut {
|
||||||
let mut inner: Inner = mem::MaybeUninit::uninit().assume_init();
|
let mut inner: Inner = mem::MaybeUninit::uninit().assume_init();
|
||||||
|
|
||||||
// Set inline mask
|
// Set inline mask
|
||||||
inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared);
|
inner.arc = NonNull::new_unchecked(KIND_INLINE as *mut Shared);
|
||||||
inner.set_inline_len(len);
|
inner.set_inline_len(len);
|
||||||
inner.as_raw()[0..len].copy_from_slice(src);
|
inner.as_raw()[0..len].copy_from_slice(src);
|
||||||
|
|
||||||
|
@ -1916,8 +1916,8 @@ impl Inner {
|
||||||
// `arc` won't ever store a pointer. Instead, use it to
|
// `arc` won't ever store a pointer. Instead, use it to
|
||||||
// track the fact that the `Bytes` handle is backed by a
|
// track the fact that the `Bytes` handle is backed by a
|
||||||
// static buffer.
|
// static buffer.
|
||||||
arc: AtomicPtr::new(KIND_STATIC as *mut Shared),
|
arc: unsafe { NonNull::new_unchecked(KIND_STATIC as *mut Shared) },
|
||||||
ptr: unsafe { NonNull::new_unchecked(ptr) },
|
ptr,
|
||||||
len: bytes.len(),
|
len: bytes.len(),
|
||||||
cap: bytes.len(),
|
cap: bytes.len(),
|
||||||
}
|
}
|
||||||
|
@ -1935,8 +1935,8 @@ impl Inner {
|
||||||
let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
|
let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
|
||||||
|
|
||||||
Inner {
|
Inner {
|
||||||
arc: AtomicPtr::new(arc as *mut Shared),
|
arc: unsafe { NonNull::new_unchecked(arc as *mut Shared) },
|
||||||
ptr: unsafe { NonNull::new_unchecked(ptr) },
|
ptr,
|
||||||
len,
|
len,
|
||||||
cap,
|
cap,
|
||||||
}
|
}
|
||||||
|
@ -1949,7 +1949,7 @@ impl Inner {
|
||||||
// Using uninitialized memory is ~30% faster
|
// Using uninitialized memory is ~30% faster
|
||||||
#[allow(invalid_value, clippy::uninit_assumed_init)]
|
#[allow(invalid_value, clippy::uninit_assumed_init)]
|
||||||
let mut inner: Inner = mem::MaybeUninit::uninit().assume_init();
|
let mut inner: Inner = mem::MaybeUninit::uninit().assume_init();
|
||||||
inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared);
|
inner.arc = NonNull::new_unchecked(KIND_INLINE as *mut Shared);
|
||||||
inner
|
inner
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1964,7 +1964,7 @@ impl Inner {
|
||||||
if self.is_inline() {
|
if self.is_inline() {
|
||||||
slice::from_raw_parts(self.inline_ptr(), self.inline_len())
|
slice::from_raw_parts(self.inline_ptr(), self.inline_len())
|
||||||
} else {
|
} else {
|
||||||
slice::from_raw_parts(self.ptr.as_ptr(), self.len)
|
slice::from_raw_parts(self.ptr, self.len)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1978,7 +1978,7 @@ impl Inner {
|
||||||
if self.is_inline() {
|
if self.is_inline() {
|
||||||
slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len())
|
slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len())
|
||||||
} else {
|
} else {
|
||||||
slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len)
|
slice::from_raw_parts_mut(self.ptr, self.len)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1992,7 +1992,7 @@ impl Inner {
|
||||||
if self.is_inline() {
|
if self.is_inline() {
|
||||||
slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP)
|
slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP)
|
||||||
} else {
|
} else {
|
||||||
slice::from_raw_parts_mut(self.ptr.as_ptr(), self.cap)
|
slice::from_raw_parts_mut(self.ptr, self.cap)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2009,7 +2009,7 @@ impl Inner {
|
||||||
} else {
|
} else {
|
||||||
assert!(self.len < self.cap);
|
assert!(self.len < self.cap);
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.ptr.as_ptr().add(self.len) = n;
|
*self.ptr.add(self.len) = n;
|
||||||
}
|
}
|
||||||
self.len += 1;
|
self.len += 1;
|
||||||
}
|
}
|
||||||
|
@ -2035,8 +2035,7 @@ impl Inner {
|
||||||
// This is undefind behavior due to a data race, but experimental
|
// This is undefind behavior due to a data race, but experimental
|
||||||
// evidence shows that it works in practice (discussion:
|
// evidence shows that it works in practice (discussion:
|
||||||
// https://internals.rust-lang.org/t/bit-wise-reasoning-for-atomic-accesses/8853).
|
// https://internals.rust-lang.org/t/bit-wise-reasoning-for-atomic-accesses/8853).
|
||||||
let p: *const usize = unsafe { mem::transmute(&self.arc) };
|
(self.arc.as_ptr() as usize & INLINE_LEN_MASK) >> INLINE_LEN_OFFSET
|
||||||
(unsafe { *p } & INLINE_LEN_MASK) >> INLINE_LEN_OFFSET
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the length of the inline buffer. This is done by writing to the
|
/// Set the length of the inline buffer. This is done by writing to the
|
||||||
|
@ -2044,8 +2043,12 @@ impl Inner {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn set_inline_len(&mut self, len: usize) {
|
fn set_inline_len(&mut self, len: usize) {
|
||||||
debug_assert!(len <= INLINE_CAP);
|
debug_assert!(len <= INLINE_CAP);
|
||||||
let p = self.arc.get_mut();
|
self.arc = unsafe {
|
||||||
*p = ((*p as usize & !INLINE_LEN_MASK) | (len << INLINE_LEN_OFFSET)) as _;
|
NonNull::new_unchecked(
|
||||||
|
((self.arc.as_ptr() as usize & !INLINE_LEN_MASK)
|
||||||
|
| (len << INLINE_LEN_OFFSET)) as _,
|
||||||
|
)
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// slice.
|
/// slice.
|
||||||
|
@ -2112,10 +2115,10 @@ impl Inner {
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
ptr = NonNull::new_unchecked(self.ptr.as_ptr().add(self.len));
|
ptr = self.ptr.add(self.len);
|
||||||
}
|
}
|
||||||
if ptr == other.ptr && self.kind() == KIND_ARC && other.kind() == KIND_ARC {
|
if ptr == other.ptr && self.kind() == KIND_ARC && other.kind() == KIND_ARC {
|
||||||
debug_assert_eq!(self.arc.load(Acquire), other.arc.load(Acquire));
|
// debug_assert_eq!(self.arc.load(Acquire), other.arc.load(Acquire));
|
||||||
// Contiguous blocks, just combine directly
|
// Contiguous blocks, just combine directly
|
||||||
self.len += other.len;
|
self.len += other.len;
|
||||||
self.cap += other.cap;
|
self.cap += other.cap;
|
||||||
|
@ -2197,7 +2200,7 @@ impl Inner {
|
||||||
// Updating the start of the view is setting `ptr` to point to the
|
// Updating the start of the view is setting `ptr` to point to the
|
||||||
// new start and updating the `len` field to reflect the new length
|
// new start and updating the `len` field to reflect the new length
|
||||||
// of the view.
|
// of the view.
|
||||||
self.ptr = NonNull::new_unchecked(self.ptr.as_ptr().add(start));
|
self.ptr = self.ptr.add(start);
|
||||||
|
|
||||||
if self.len >= start {
|
if self.len >= start {
|
||||||
self.len -= start;
|
self.len -= start;
|
||||||
|
@ -2241,7 +2244,7 @@ impl Inner {
|
||||||
} else {
|
} else {
|
||||||
// Otherwise, the underlying buffer is potentially shared with other
|
// Otherwise, the underlying buffer is potentially shared with other
|
||||||
// handles, so the ref_count needs to be checked.
|
// handles, so the ref_count needs to be checked.
|
||||||
unsafe { (**self.arc.get_mut()).is_unique() }
|
unsafe { (*self.arc.as_ptr()).is_unique() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2282,7 +2285,8 @@ impl Inner {
|
||||||
// `compare_and_swap` that comes later in this function. The goal is
|
// `compare_and_swap` that comes later in this function. The goal is
|
||||||
// to ensure that if `arc` is currently set to point to a `Shared`,
|
// to ensure that if `arc` is currently set to point to a `Shared`,
|
||||||
// that the current thread acquires the associated memory.
|
// that the current thread acquires the associated memory.
|
||||||
let arc = self.arc.load(Acquire);
|
let slf_arc: &AtomicPtr<Shared> = mem::transmute(&self.arc);
|
||||||
|
let arc = slf_arc.load(Acquire);
|
||||||
let kind = arc as usize & KIND_MASK;
|
let kind = arc as usize & KIND_MASK;
|
||||||
|
|
||||||
if kind == KIND_ARC {
|
if kind == KIND_ARC {
|
||||||
|
@ -2303,7 +2307,7 @@ impl Inner {
|
||||||
}
|
}
|
||||||
|
|
||||||
Inner {
|
Inner {
|
||||||
arc: AtomicPtr::new(arc),
|
arc: NonNull::new_unchecked(arc),
|
||||||
..*self
|
..*self
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2343,14 +2347,15 @@ impl Inner {
|
||||||
|
|
||||||
// The pointer should be aligned, so this assert should
|
// The pointer should be aligned, so this assert should
|
||||||
// always succeed.
|
// always succeed.
|
||||||
debug_assert!(0 == (shared as usize & 0b11));
|
debug_assert!(0 == (shared as usize & KIND_MASK));
|
||||||
|
|
||||||
// If there are no references to self in other threads,
|
// If there are no references to self in other threads,
|
||||||
// expensive atomic operations can be avoided.
|
// expensive atomic operations can be avoided.
|
||||||
if mut_self {
|
if mut_self {
|
||||||
self.arc.store(shared, Relaxed);
|
let slf_arc: &AtomicPtr<Shared> = mem::transmute(&self.arc);
|
||||||
|
slf_arc.store(shared, Relaxed);
|
||||||
return Inner {
|
return Inner {
|
||||||
arc: AtomicPtr::new(shared),
|
arc: NonNull::new_unchecked(shared),
|
||||||
..*self
|
..*self
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -2364,16 +2369,14 @@ impl Inner {
|
||||||
// ordering will synchronize with the `compare_and_swap`
|
// ordering will synchronize with the `compare_and_swap`
|
||||||
// that happened in the other thread and the `Shared`
|
// that happened in the other thread and the `Shared`
|
||||||
// pointed to by `actual` will be visible.
|
// pointed to by `actual` will be visible.
|
||||||
match self
|
let slf_arc: &AtomicPtr<Shared> = mem::transmute(&self.arc);
|
||||||
.arc
|
match slf_arc.compare_exchange(arc as *mut Shared, shared, AcqRel, Acquire) {
|
||||||
.compare_exchange(arc as *mut Shared, shared, AcqRel, Acquire)
|
|
||||||
{
|
|
||||||
Ok(actual) => {
|
Ok(actual) => {
|
||||||
debug_assert!(actual as usize == arc);
|
debug_assert!(actual as usize == arc);
|
||||||
// The upgrade was successful, the new handle can be
|
// The upgrade was successful, the new handle can be
|
||||||
// returned.
|
// returned.
|
||||||
Inner {
|
Inner {
|
||||||
arc: AtomicPtr::new(shared),
|
arc: NonNull::new_unchecked(shared),
|
||||||
..*self
|
..*self
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2421,13 +2424,13 @@ impl Inner {
|
||||||
let mut v = Vec::with_capacity(new_cap);
|
let mut v = Vec::with_capacity(new_cap);
|
||||||
v.extend_from_slice(self.as_ref());
|
v.extend_from_slice(self.as_ref());
|
||||||
|
|
||||||
self.ptr = unsafe { NonNull::new_unchecked(v.as_mut_ptr()) };
|
self.ptr = v.as_mut_ptr();
|
||||||
self.len = v.len();
|
self.len = v.len();
|
||||||
self.cap = v.capacity();
|
self.cap = v.capacity();
|
||||||
|
|
||||||
// Since the minimum capacity is `INLINE_CAP`, don't bother encoding
|
// Since the minimum capacity is `INLINE_CAP`, don't bother encoding
|
||||||
// the original capacity as INLINE_CAP
|
// the original capacity as INLINE_CAP
|
||||||
self.arc = AtomicPtr::new(KIND_VEC as *mut Shared);
|
self.arc = unsafe { NonNull::new_unchecked(KIND_VEC as *mut Shared) };
|
||||||
|
|
||||||
mem::forget(v);
|
mem::forget(v);
|
||||||
return;
|
return;
|
||||||
|
@ -2449,9 +2452,9 @@ impl Inner {
|
||||||
//
|
//
|
||||||
// Just move the pointer back to the start after copying
|
// Just move the pointer back to the start after copying
|
||||||
// data back.
|
// data back.
|
||||||
let base_ptr = self.ptr.as_ptr().offset(-(off as isize));
|
let base_ptr = self.ptr.offset(-(off as isize));
|
||||||
ptr::copy(self.ptr.as_ptr(), base_ptr, self.len);
|
ptr::copy(self.ptr, base_ptr, self.len);
|
||||||
self.ptr = NonNull::new_unchecked(base_ptr);
|
self.ptr = base_ptr;
|
||||||
self.uncoordinated_set_vec_pos(0, prev);
|
self.uncoordinated_set_vec_pos(0, prev);
|
||||||
|
|
||||||
// Length stays constant, but since we moved backwards we
|
// Length stays constant, but since we moved backwards we
|
||||||
|
@ -2463,7 +2466,7 @@ impl Inner {
|
||||||
v.reserve(additional);
|
v.reserve(additional);
|
||||||
|
|
||||||
// Update the info
|
// Update the info
|
||||||
self.ptr = NonNull::new_unchecked(v.as_mut_ptr().add(off));
|
self.ptr = v.as_mut_ptr().add(off);
|
||||||
self.len = v.len() - off;
|
self.len = v.len() - off;
|
||||||
self.cap = v.capacity() - off;
|
self.cap = v.capacity() - off;
|
||||||
|
|
||||||
|
@ -2474,7 +2477,7 @@ impl Inner {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let arc = *self.arc.get_mut();
|
let arc = self.arc.as_ptr();
|
||||||
|
|
||||||
debug_assert!(kind == KIND_ARC);
|
debug_assert!(kind == KIND_ARC);
|
||||||
|
|
||||||
|
@ -2502,9 +2505,9 @@ impl Inner {
|
||||||
// The capacity is sufficient, reclaim the buffer
|
// The capacity is sufficient, reclaim the buffer
|
||||||
let ptr = v.as_mut_ptr();
|
let ptr = v.as_mut_ptr();
|
||||||
|
|
||||||
ptr::copy(self.ptr.as_ptr(), ptr, len);
|
ptr::copy(self.ptr, ptr, len);
|
||||||
|
|
||||||
self.ptr = NonNull::new_unchecked(ptr);
|
self.ptr = ptr;
|
||||||
self.cap = v.capacity();
|
self.cap = v.capacity();
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
@ -2536,13 +2539,13 @@ impl Inner {
|
||||||
release_shared(arc);
|
release_shared(arc);
|
||||||
|
|
||||||
// Update self
|
// Update self
|
||||||
self.ptr = unsafe { NonNull::new_unchecked(v.as_mut_ptr()) };
|
self.ptr = v.as_mut_ptr();
|
||||||
self.len = v.len();
|
self.len = v.len();
|
||||||
self.cap = v.capacity();
|
self.cap = v.capacity();
|
||||||
|
|
||||||
let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
|
let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
|
||||||
|
|
||||||
self.arc = AtomicPtr::new(arc as *mut Shared);
|
self.arc = unsafe { NonNull::new_unchecked(arc as *mut Shared) };
|
||||||
|
|
||||||
// Forget the vector handle
|
// Forget the vector handle
|
||||||
mem::forget(v);
|
mem::forget(v);
|
||||||
|
@ -2605,23 +2608,20 @@ impl Inner {
|
||||||
|
|
||||||
#[cfg(target_endian = "little")]
|
#[cfg(target_endian = "little")]
|
||||||
#[inline]
|
#[inline]
|
||||||
fn imp(arc: &AtomicPtr<Shared>) -> usize {
|
fn imp(arc: *mut Shared) -> usize {
|
||||||
unsafe {
|
(arc as usize) & KIND_MASK
|
||||||
let p: *const u8 = mem::transmute(arc);
|
|
||||||
(*p as usize) & KIND_MASK
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(target_endian = "big")]
|
#[cfg(target_endian = "big")]
|
||||||
#[inline]
|
#[inline]
|
||||||
fn imp(arc: &AtomicPtr<Shared>) -> usize {
|
fn imp(arc: *mut Shared) -> usize {
|
||||||
unsafe {
|
unsafe {
|
||||||
let p: *const usize = mem::transmute(arc);
|
let p: *const usize = arc as *const usize;
|
||||||
*p & KIND_MASK
|
*p & KIND_MASK
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
imp(&self.arc)
|
imp(self.arc.as_ptr())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -2630,11 +2630,7 @@ impl Inner {
|
||||||
// be called when in the KIND_VEC mode. This + the &mut self argument
|
// be called when in the KIND_VEC mode. This + the &mut self argument
|
||||||
// guarantees that there is no possibility of concurrent calls to this
|
// guarantees that there is no possibility of concurrent calls to this
|
||||||
// function.
|
// function.
|
||||||
let prev = unsafe {
|
let prev = self.arc.as_ptr() as usize;
|
||||||
let p: &AtomicPtr<Shared> = &self.arc;
|
|
||||||
let p: *const usize = mem::transmute(p);
|
|
||||||
*p
|
|
||||||
};
|
|
||||||
|
|
||||||
(prev >> VEC_POS_OFFSET, prev)
|
(prev >> VEC_POS_OFFSET, prev)
|
||||||
}
|
}
|
||||||
|
@ -2645,16 +2641,16 @@ impl Inner {
|
||||||
debug_assert!(pos <= MAX_VEC_POS);
|
debug_assert!(pos <= MAX_VEC_POS);
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let p: &mut AtomicPtr<Shared> = &mut self.arc;
|
self.arc = NonNull::new_unchecked(
|
||||||
let p: &mut usize = &mut *(p as *mut _ as *mut usize);
|
((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK)) as *mut Shared,
|
||||||
*p = (pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rebuild_vec(ptr: NonNull<u8>, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
|
fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
|
||||||
unsafe {
|
unsafe {
|
||||||
let ptr = ptr.as_ptr().offset(-(off as isize));
|
let ptr = ptr.offset(-(off as isize));
|
||||||
len += off;
|
len += off;
|
||||||
cap += off;
|
cap += off;
|
||||||
|
|
||||||
|
@ -2672,7 +2668,7 @@ impl Drop for Inner {
|
||||||
// Vector storage, free the vector
|
// Vector storage, free the vector
|
||||||
let _ = rebuild_vec(self.ptr, self.len, self.cap, off);
|
let _ = rebuild_vec(self.ptr, self.len, self.cap, off);
|
||||||
} else if kind == KIND_ARC {
|
} else if kind == KIND_ARC {
|
||||||
release_shared(*self.arc.get_mut());
|
release_shared(self.arc.as_ptr());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,21 @@ fn is_send<T: Send>() {}
|
||||||
fn test_size() {
|
fn test_size() {
|
||||||
assert_eq!(32, std::mem::size_of::<Bytes>());
|
assert_eq!(32, std::mem::size_of::<Bytes>());
|
||||||
assert_eq!(32, std::mem::size_of::<Option<Bytes>>());
|
assert_eq!(32, std::mem::size_of::<Option<Bytes>>());
|
||||||
|
|
||||||
|
let mut t = BytesMut::new();
|
||||||
|
t.extend_from_slice(
|
||||||
|
&b"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"[..],
|
||||||
|
);
|
||||||
|
|
||||||
|
let val = t.freeze();
|
||||||
|
assert!(val.is_inline());
|
||||||
|
|
||||||
|
let val = Some(val);
|
||||||
|
assert!(val.is_some());
|
||||||
|
assert_eq!(
|
||||||
|
val.as_ref().unwrap(),
|
||||||
|
&b"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"[..]
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
|
@ -49,7 +49,7 @@ ntex-router = "0.5.0"
|
||||||
ntex-service = "0.1.9"
|
ntex-service = "0.1.9"
|
||||||
ntex-macros = "0.1.3"
|
ntex-macros = "0.1.3"
|
||||||
ntex-util = "0.1.1"
|
ntex-util = "0.1.1"
|
||||||
ntex-bytes = "0.1.3"
|
ntex-bytes = "0.1.4"
|
||||||
|
|
||||||
ahash = "0.7.4"
|
ahash = "0.7.4"
|
||||||
base64 = "0.13"
|
base64 = "0.13"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue