From ace681ba749d6f59aad35b6cae6e8c5eec5251dd Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sun, 27 Jun 2021 18:31:48 +0600 Subject: [PATCH] Reduce size of Option by using NonNull --- ntex-bytes/CHANGELOG.md | 4 +++ ntex-bytes/Cargo.toml | 4 +-- ntex-bytes/src/bytes.rs | 46 +++++++++++++++++----------------- ntex-bytes/tests/test_bytes.rs | 7 ++++++ ntex/Cargo.toml | 2 +- 5 files changed, 36 insertions(+), 27 deletions(-) diff --git a/ntex-bytes/CHANGELOG.md b/ntex-bytes/CHANGELOG.md index 01c76ce7..80b08d9f 100644 --- a/ntex-bytes/CHANGELOG.md +++ b/ntex-bytes/CHANGELOG.md @@ -1,5 +1,9 @@ # Changes +## 0.1.3 (2021-06-27) + +* Reduce size of Option by using NonNull + ## 0.1.2 (2021-06-27) * Reserve space for put_slice diff --git a/ntex-bytes/Cargo.toml b/ntex-bytes/Cargo.toml index c778932e..c2d3bdd0 100644 --- a/ntex-bytes/Cargo.toml +++ b/ntex-bytes/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ntex-bytes" -version = "0.1.2" +version = "0.1.3" license = "MIT" authors = ["Carl Lerche "] description = "Types and traits for working with bytes (bytes crate fork)" @@ -15,8 +15,6 @@ edition = "2018" serde = "1.0" bytes = "1.0.1" -backtrace = "*" - [dev-dependencies] serde_test = "1.0" serde_json = "1.0" diff --git a/ntex-bytes/src/bytes.rs b/ntex-bytes/src/bytes.rs index 42761872..b59c4651 100644 --- a/ntex-bytes/src/bytes.rs +++ b/ntex-bytes/src/bytes.rs @@ -3,7 +3,7 @@ use std::iter::{FromIterator, Iterator}; use std::ops::{Deref, DerefMut, RangeBounds}; use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; use std::sync::atomic::{self, AtomicPtr, AtomicUsize}; -use std::{cmp, fmt, hash, mem, ptr, slice, usize}; +use std::{cmp, fmt, hash, mem, ptr, ptr::NonNull, slice, usize}; use crate::{buf::IntoIter, buf::UninitSlice, debug, Buf, BufMut}; @@ -297,7 +297,7 @@ struct Inner { // WARNING: Do not access the fields directly unless you know what you are // doing. Instead, use the fns. See implementation comment above. arc: AtomicPtr, - ptr: *mut u8, + ptr: NonNull, len: usize, cap: usize, } @@ -307,7 +307,7 @@ struct Inner { struct Inner { // WARNING: Do not access the fields directly unless you know what you are // doing. Instead, use the fns. See implementation comment above. - ptr: *mut u8, + ptr: NonNull, len: usize, cap: usize, arc: AtomicPtr, @@ -1752,7 +1752,7 @@ impl<'a> From<&'a [u8]> for BytesMut { BytesMut::new() } else if len <= INLINE_CAP { unsafe { - #[allow(clippy::uninit_assumed_init)] + #[allow(invalid_value, clippy::uninit_assumed_init)] let mut inner: Inner = mem::MaybeUninit::uninit().assume_init(); // Set inline mask @@ -1917,7 +1917,7 @@ impl Inner { // track the fact that the `Bytes` handle is backed by a // static buffer. arc: AtomicPtr::new(KIND_STATIC as *mut Shared), - ptr, + ptr: unsafe { NonNull::new_unchecked(ptr) }, len: bytes.len(), cap: bytes.len(), } @@ -1936,7 +1936,7 @@ impl Inner { Inner { arc: AtomicPtr::new(arc as *mut Shared), - ptr, + ptr: unsafe { NonNull::new_unchecked(ptr) }, len, cap, } @@ -1947,7 +1947,7 @@ impl Inner { if capacity <= INLINE_CAP { unsafe { // Using uninitialized memory is ~30% faster - #[allow(clippy::uninit_assumed_init)] + #[allow(invalid_value, clippy::uninit_assumed_init)] let mut inner: Inner = mem::MaybeUninit::uninit().assume_init(); inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared); inner @@ -1964,7 +1964,7 @@ impl Inner { if self.is_inline() { slice::from_raw_parts(self.inline_ptr(), self.inline_len()) } else { - slice::from_raw_parts(self.ptr, self.len) + slice::from_raw_parts(self.ptr.as_ptr(), self.len) } } } @@ -1978,7 +1978,7 @@ impl Inner { if self.is_inline() { slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len()) } else { - slice::from_raw_parts_mut(self.ptr, self.len) + slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) } } } @@ -1992,7 +1992,7 @@ impl Inner { if self.is_inline() { slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP) } else { - slice::from_raw_parts_mut(self.ptr, self.cap) + slice::from_raw_parts_mut(self.ptr.as_ptr(), self.cap) } } @@ -2009,7 +2009,7 @@ impl Inner { } else { assert!(self.len < self.cap); unsafe { - *self.ptr.add(self.len) = n; + *self.ptr.as_ptr().add(self.len) = n; } self.len += 1; } @@ -2112,7 +2112,7 @@ impl Inner { } unsafe { - ptr = self.ptr.add(self.len); + ptr = NonNull::new_unchecked(self.ptr.as_ptr().add(self.len)); } if ptr == other.ptr && self.kind() == KIND_ARC && other.kind() == KIND_ARC { debug_assert_eq!(self.arc.load(Acquire), other.arc.load(Acquire)); @@ -2197,7 +2197,7 @@ impl Inner { // Updating the start of the view is setting `ptr` to point to the // new start and updating the `len` field to reflect the new length // of the view. - self.ptr = self.ptr.add(start); + self.ptr = NonNull::new_unchecked(self.ptr.as_ptr().add(start)); if self.len >= start { self.len -= start; @@ -2421,7 +2421,7 @@ impl Inner { let mut v = Vec::with_capacity(new_cap); v.extend_from_slice(self.as_ref()); - self.ptr = v.as_mut_ptr(); + self.ptr = unsafe { NonNull::new_unchecked(v.as_mut_ptr()) }; self.len = v.len(); self.cap = v.capacity(); @@ -2449,9 +2449,9 @@ impl Inner { // // Just move the pointer back to the start after copying // data back. - let base_ptr = self.ptr.offset(-(off as isize)); - ptr::copy(self.ptr, base_ptr, self.len); - self.ptr = base_ptr; + let base_ptr = self.ptr.as_ptr().offset(-(off as isize)); + ptr::copy(self.ptr.as_ptr(), base_ptr, self.len); + self.ptr = NonNull::new_unchecked(base_ptr); self.uncoordinated_set_vec_pos(0, prev); // Length stays constant, but since we moved backwards we @@ -2463,7 +2463,7 @@ impl Inner { v.reserve(additional); // Update the info - self.ptr = v.as_mut_ptr().add(off); + self.ptr = NonNull::new_unchecked(v.as_mut_ptr().add(off)); self.len = v.len() - off; self.cap = v.capacity() - off; @@ -2502,9 +2502,9 @@ impl Inner { // The capacity is sufficient, reclaim the buffer let ptr = v.as_mut_ptr(); - ptr::copy(self.ptr, ptr, len); + ptr::copy(self.ptr.as_ptr(), ptr, len); - self.ptr = ptr; + self.ptr = NonNull::new_unchecked(ptr); self.cap = v.capacity(); return; @@ -2536,7 +2536,7 @@ impl Inner { release_shared(arc); // Update self - self.ptr = v.as_mut_ptr(); + self.ptr = unsafe { NonNull::new_unchecked(v.as_mut_ptr()) }; self.len = v.len(); self.cap = v.capacity(); @@ -2652,9 +2652,9 @@ impl Inner { } } -fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec { +fn rebuild_vec(ptr: NonNull, mut len: usize, mut cap: usize, off: usize) -> Vec { unsafe { - let ptr = ptr.offset(-(off as isize)); + let ptr = ptr.as_ptr().offset(-(off as isize)); len += off; cap += off; diff --git a/ntex-bytes/tests/test_bytes.rs b/ntex-bytes/tests/test_bytes.rs index 29f2ac83..e395ab86 100644 --- a/ntex-bytes/tests/test_bytes.rs +++ b/ntex-bytes/tests/test_bytes.rs @@ -13,6 +13,13 @@ fn inline_cap() -> usize { fn is_sync() {} fn is_send() {} +#[cfg(target_pointer_width = "64")] +#[test] +fn test_size() { + assert_eq!(32, std::mem::size_of::()); + assert_eq!(32, std::mem::size_of::>()); +} + #[test] fn test_bounds() { is_sync::(); diff --git a/ntex/Cargo.toml b/ntex/Cargo.toml index 398b320a..960fe37d 100644 --- a/ntex/Cargo.toml +++ b/ntex/Cargo.toml @@ -49,7 +49,7 @@ ntex-router = "0.5.0" ntex-service = "0.1.9" ntex-macros = "0.1.3" ntex-util = "0.1.1" -ntex-bytes = "0.1.0" +ntex-bytes = "0.1.3" ahash = "0.7.4" base64 = "0.13"