Reduce size of Option<Bytes> by using NonNull

This commit is contained in:
Nikolay Kim 2021-06-27 18:31:48 +06:00
parent 53e9487357
commit ace681ba74
5 changed files with 36 additions and 27 deletions

View file

@ -1,5 +1,9 @@
# Changes # Changes
## 0.1.3 (2021-06-27)
* Reduce size of Option<Bytes> by using NonNull
## 0.1.2 (2021-06-27) ## 0.1.2 (2021-06-27)
* Reserve space for put_slice * Reserve space for put_slice

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ntex-bytes" name = "ntex-bytes"
version = "0.1.2" version = "0.1.3"
license = "MIT" license = "MIT"
authors = ["Carl Lerche <me@carllerche.com>"] authors = ["Carl Lerche <me@carllerche.com>"]
description = "Types and traits for working with bytes (bytes crate fork)" description = "Types and traits for working with bytes (bytes crate fork)"
@ -15,8 +15,6 @@ edition = "2018"
serde = "1.0" serde = "1.0"
bytes = "1.0.1" bytes = "1.0.1"
backtrace = "*"
[dev-dependencies] [dev-dependencies]
serde_test = "1.0" serde_test = "1.0"
serde_json = "1.0" serde_json = "1.0"

View file

@ -3,7 +3,7 @@ use std::iter::{FromIterator, Iterator};
use std::ops::{Deref, DerefMut, RangeBounds}; use std::ops::{Deref, DerefMut, RangeBounds};
use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
use std::sync::atomic::{self, AtomicPtr, AtomicUsize}; use std::sync::atomic::{self, AtomicPtr, AtomicUsize};
use std::{cmp, fmt, hash, mem, ptr, slice, usize}; use std::{cmp, fmt, hash, mem, ptr, ptr::NonNull, slice, usize};
use crate::{buf::IntoIter, buf::UninitSlice, debug, Buf, BufMut}; use crate::{buf::IntoIter, buf::UninitSlice, debug, Buf, BufMut};
@ -297,7 +297,7 @@ struct Inner {
// WARNING: Do not access the fields directly unless you know what you are // WARNING: Do not access the fields directly unless you know what you are
// doing. Instead, use the fns. See implementation comment above. // doing. Instead, use the fns. See implementation comment above.
arc: AtomicPtr<Shared>, arc: AtomicPtr<Shared>,
ptr: *mut u8, ptr: NonNull<u8>,
len: usize, len: usize,
cap: usize, cap: usize,
} }
@ -307,7 +307,7 @@ struct Inner {
struct Inner { struct Inner {
// WARNING: Do not access the fields directly unless you know what you are // WARNING: Do not access the fields directly unless you know what you are
// doing. Instead, use the fns. See implementation comment above. // doing. Instead, use the fns. See implementation comment above.
ptr: *mut u8, ptr: NonNull<u8>,
len: usize, len: usize,
cap: usize, cap: usize,
arc: AtomicPtr<Shared>, arc: AtomicPtr<Shared>,
@ -1752,7 +1752,7 @@ impl<'a> From<&'a [u8]> for BytesMut {
BytesMut::new() BytesMut::new()
} else if len <= INLINE_CAP { } else if len <= INLINE_CAP {
unsafe { unsafe {
#[allow(clippy::uninit_assumed_init)] #[allow(invalid_value, clippy::uninit_assumed_init)]
let mut inner: Inner = mem::MaybeUninit::uninit().assume_init(); let mut inner: Inner = mem::MaybeUninit::uninit().assume_init();
// Set inline mask // Set inline mask
@ -1917,7 +1917,7 @@ impl Inner {
// track the fact that the `Bytes` handle is backed by a // track the fact that the `Bytes` handle is backed by a
// static buffer. // static buffer.
arc: AtomicPtr::new(KIND_STATIC as *mut Shared), arc: AtomicPtr::new(KIND_STATIC as *mut Shared),
ptr, ptr: unsafe { NonNull::new_unchecked(ptr) },
len: bytes.len(), len: bytes.len(),
cap: bytes.len(), cap: bytes.len(),
} }
@ -1936,7 +1936,7 @@ impl Inner {
Inner { Inner {
arc: AtomicPtr::new(arc as *mut Shared), arc: AtomicPtr::new(arc as *mut Shared),
ptr, ptr: unsafe { NonNull::new_unchecked(ptr) },
len, len,
cap, cap,
} }
@ -1947,7 +1947,7 @@ impl Inner {
if capacity <= INLINE_CAP { if capacity <= INLINE_CAP {
unsafe { unsafe {
// Using uninitialized memory is ~30% faster // Using uninitialized memory is ~30% faster
#[allow(clippy::uninit_assumed_init)] #[allow(invalid_value, clippy::uninit_assumed_init)]
let mut inner: Inner = mem::MaybeUninit::uninit().assume_init(); let mut inner: Inner = mem::MaybeUninit::uninit().assume_init();
inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared); inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared);
inner inner
@ -1964,7 +1964,7 @@ impl Inner {
if self.is_inline() { if self.is_inline() {
slice::from_raw_parts(self.inline_ptr(), self.inline_len()) slice::from_raw_parts(self.inline_ptr(), self.inline_len())
} else { } else {
slice::from_raw_parts(self.ptr, self.len) slice::from_raw_parts(self.ptr.as_ptr(), self.len)
} }
} }
} }
@ -1978,7 +1978,7 @@ impl Inner {
if self.is_inline() { if self.is_inline() {
slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len()) slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len())
} else { } else {
slice::from_raw_parts_mut(self.ptr, self.len) slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len)
} }
} }
} }
@ -1992,7 +1992,7 @@ impl Inner {
if self.is_inline() { if self.is_inline() {
slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP) slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP)
} else { } else {
slice::from_raw_parts_mut(self.ptr, self.cap) slice::from_raw_parts_mut(self.ptr.as_ptr(), self.cap)
} }
} }
@ -2009,7 +2009,7 @@ impl Inner {
} else { } else {
assert!(self.len < self.cap); assert!(self.len < self.cap);
unsafe { unsafe {
*self.ptr.add(self.len) = n; *self.ptr.as_ptr().add(self.len) = n;
} }
self.len += 1; self.len += 1;
} }
@ -2112,7 +2112,7 @@ impl Inner {
} }
unsafe { unsafe {
ptr = self.ptr.add(self.len); ptr = NonNull::new_unchecked(self.ptr.as_ptr().add(self.len));
} }
if ptr == other.ptr && self.kind() == KIND_ARC && other.kind() == KIND_ARC { if ptr == other.ptr && self.kind() == KIND_ARC && other.kind() == KIND_ARC {
debug_assert_eq!(self.arc.load(Acquire), other.arc.load(Acquire)); debug_assert_eq!(self.arc.load(Acquire), other.arc.load(Acquire));
@ -2197,7 +2197,7 @@ impl Inner {
// Updating the start of the view is setting `ptr` to point to the // Updating the start of the view is setting `ptr` to point to the
// new start and updating the `len` field to reflect the new length // new start and updating the `len` field to reflect the new length
// of the view. // of the view.
self.ptr = self.ptr.add(start); self.ptr = NonNull::new_unchecked(self.ptr.as_ptr().add(start));
if self.len >= start { if self.len >= start {
self.len -= start; self.len -= start;
@ -2421,7 +2421,7 @@ impl Inner {
let mut v = Vec::with_capacity(new_cap); let mut v = Vec::with_capacity(new_cap);
v.extend_from_slice(self.as_ref()); v.extend_from_slice(self.as_ref());
self.ptr = v.as_mut_ptr(); self.ptr = unsafe { NonNull::new_unchecked(v.as_mut_ptr()) };
self.len = v.len(); self.len = v.len();
self.cap = v.capacity(); self.cap = v.capacity();
@ -2449,9 +2449,9 @@ impl Inner {
// //
// Just move the pointer back to the start after copying // Just move the pointer back to the start after copying
// data back. // data back.
let base_ptr = self.ptr.offset(-(off as isize)); let base_ptr = self.ptr.as_ptr().offset(-(off as isize));
ptr::copy(self.ptr, base_ptr, self.len); ptr::copy(self.ptr.as_ptr(), base_ptr, self.len);
self.ptr = base_ptr; self.ptr = NonNull::new_unchecked(base_ptr);
self.uncoordinated_set_vec_pos(0, prev); self.uncoordinated_set_vec_pos(0, prev);
// Length stays constant, but since we moved backwards we // Length stays constant, but since we moved backwards we
@ -2463,7 +2463,7 @@ impl Inner {
v.reserve(additional); v.reserve(additional);
// Update the info // Update the info
self.ptr = v.as_mut_ptr().add(off); self.ptr = NonNull::new_unchecked(v.as_mut_ptr().add(off));
self.len = v.len() - off; self.len = v.len() - off;
self.cap = v.capacity() - off; self.cap = v.capacity() - off;
@ -2502,9 +2502,9 @@ impl Inner {
// The capacity is sufficient, reclaim the buffer // The capacity is sufficient, reclaim the buffer
let ptr = v.as_mut_ptr(); let ptr = v.as_mut_ptr();
ptr::copy(self.ptr, ptr, len); ptr::copy(self.ptr.as_ptr(), ptr, len);
self.ptr = ptr; self.ptr = NonNull::new_unchecked(ptr);
self.cap = v.capacity(); self.cap = v.capacity();
return; return;
@ -2536,7 +2536,7 @@ impl Inner {
release_shared(arc); release_shared(arc);
// Update self // Update self
self.ptr = v.as_mut_ptr(); self.ptr = unsafe { NonNull::new_unchecked(v.as_mut_ptr()) };
self.len = v.len(); self.len = v.len();
self.cap = v.capacity(); self.cap = v.capacity();
@ -2652,9 +2652,9 @@ impl Inner {
} }
} }
fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> { fn rebuild_vec(ptr: NonNull<u8>, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
unsafe { unsafe {
let ptr = ptr.offset(-(off as isize)); let ptr = ptr.as_ptr().offset(-(off as isize));
len += off; len += off;
cap += off; cap += off;

View file

@ -13,6 +13,13 @@ fn inline_cap() -> usize {
fn is_sync<T: Sync>() {} fn is_sync<T: Sync>() {}
fn is_send<T: Send>() {} fn is_send<T: Send>() {}
#[cfg(target_pointer_width = "64")]
#[test]
fn test_size() {
assert_eq!(32, std::mem::size_of::<Bytes>());
assert_eq!(32, std::mem::size_of::<Option<Bytes>>());
}
#[test] #[test]
fn test_bounds() { fn test_bounds() {
is_sync::<Bytes>(); is_sync::<Bytes>();

View file

@ -49,7 +49,7 @@ ntex-router = "0.5.0"
ntex-service = "0.1.9" ntex-service = "0.1.9"
ntex-macros = "0.1.3" ntex-macros = "0.1.3"
ntex-util = "0.1.1" ntex-util = "0.1.1"
ntex-bytes = "0.1.0" ntex-bytes = "0.1.3"
ahash = "0.7.4" ahash = "0.7.4"
base64 = "0.13" base64 = "0.13"