mirror of
https://github.com/ntex-rs/ntex.git
synced 2025-04-03 21:07:39 +03:00
Read frame timeout (#241)
This commit is contained in:
parent
e020bb5296
commit
a32e25d72d
21 changed files with 598 additions and 156 deletions
|
@ -35,3 +35,5 @@ ntex-util = { path = "ntex-util" }
|
|||
ntex-glommio = { path = "ntex-glommio" }
|
||||
ntex-tokio = { path = "ntex-tokio" }
|
||||
ntex-async-std = { path = "ntex-async-std" }
|
||||
|
||||
ntex-h2 = { git = "https://github.com/ntex-rs/ntex-h2.git" }
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
# Changes
|
||||
|
||||
## [0.3.6] - 2023-11-xx
|
||||
|
||||
* Add DispatcherConfig type
|
||||
|
||||
## [0.3.5] - 2023-11-03
|
||||
|
||||
* Add Io::force_ready_ready() and Io::poll_force_ready_ready() methods
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "ntex-io"
|
||||
version = "0.3.5"
|
||||
version = "0.3.6"
|
||||
authors = ["ntex contributors <team@ntex.rs>"]
|
||||
description = "Utilities for encoding and decoding frames"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
|
@ -17,9 +17,9 @@ path = "src/lib.rs"
|
|||
|
||||
[dependencies]
|
||||
ntex-codec = "0.6.2"
|
||||
ntex-bytes = "0.1.19"
|
||||
ntex-util = "0.3.2"
|
||||
ntex-service = "1.2.6"
|
||||
ntex-bytes = "0.1.20"
|
||||
ntex-util = "0.3.4"
|
||||
ntex-service = "1.2.7"
|
||||
|
||||
bitflags = "2.4"
|
||||
log = "0.4"
|
||||
|
|
|
@ -4,13 +4,110 @@ use std::{cell::Cell, future, pin::Pin, rc::Rc, task::Context, task::Poll, time}
|
|||
use ntex_bytes::Pool;
|
||||
use ntex_codec::{Decoder, Encoder};
|
||||
use ntex_service::{IntoService, Pipeline, Service};
|
||||
use ntex_util::time::Seconds;
|
||||
use ntex_util::time::{now, Seconds};
|
||||
use ntex_util::{future::Either, ready, spawn};
|
||||
|
||||
use crate::{DispatchItem, IoBoxed, IoStatusUpdate, RecvError};
|
||||
|
||||
const ONE_SEC: time::Duration = time::Duration::from_secs(1);
|
||||
|
||||
type Response<U> = <U as Encoder>::Item;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
/// Shared dispatcher configuration
|
||||
pub struct DispatcherConfig(Rc<DispatcherConfigInner>);
|
||||
|
||||
#[derive(Debug)]
|
||||
struct DispatcherConfigInner {
|
||||
keepalive_timeout: Cell<Seconds>,
|
||||
disconnect_timeout: Cell<Seconds>,
|
||||
frame_read_rate: Cell<u16>,
|
||||
frame_read_timeout: Cell<Seconds>,
|
||||
frame_read_max_timeout: Cell<Seconds>,
|
||||
}
|
||||
|
||||
impl Default for DispatcherConfig {
|
||||
fn default() -> Self {
|
||||
DispatcherConfig(Rc::new(DispatcherConfigInner {
|
||||
keepalive_timeout: Cell::new(Seconds(30)),
|
||||
disconnect_timeout: Cell::new(Seconds(1)),
|
||||
frame_read_rate: Cell::new(0),
|
||||
frame_read_timeout: Cell::new(Seconds::ZERO),
|
||||
frame_read_max_timeout: Cell::new(Seconds::ZERO),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl DispatcherConfig {
|
||||
#[inline]
|
||||
/// Get keep-alive timeout
|
||||
pub fn keepalive_timeout(&self) -> Seconds {
|
||||
self.0.keepalive_timeout.get()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Get disconnect timeout
|
||||
pub fn disconnect_timeout(&self) -> Seconds {
|
||||
self.0.disconnect_timeout.get()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Get frame read rate
|
||||
pub fn frame_read_rate(&self) -> Option<(Seconds, Seconds, u16)> {
|
||||
let to = self.0.frame_read_timeout.get();
|
||||
if to.is_zero() {
|
||||
None
|
||||
} else {
|
||||
Some((
|
||||
to,
|
||||
self.0.frame_read_max_timeout.get(),
|
||||
self.0.frame_read_rate.get(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Set keep-alive timeout in seconds.
|
||||
///
|
||||
/// To disable timeout set value to 0.
|
||||
///
|
||||
/// By default keep-alive timeout is set to 30 seconds.
|
||||
pub fn set_keepalive_timeout(&self, timeout: Seconds) -> &Self {
|
||||
self.0.keepalive_timeout.set(timeout);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set connection disconnect timeout.
|
||||
///
|
||||
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
|
||||
/// within this time, the connection get dropped.
|
||||
///
|
||||
/// To disable timeout set value to 0.
|
||||
///
|
||||
/// By default disconnect timeout is set to 1 seconds.
|
||||
pub fn set_disconnect_timeout(&self, timeout: Seconds) -> &Self {
|
||||
self.0.disconnect_timeout.set(timeout);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set read rate parameters for single frame.
|
||||
///
|
||||
/// Set max timeout for reading single frame. If the client sends data,
|
||||
/// increase the timeout by 1 second for every.
|
||||
///
|
||||
/// By default frame read rate is disabled.
|
||||
pub fn set_frame_read_rate(
|
||||
&self,
|
||||
timeout: Seconds,
|
||||
max_timeout: Seconds,
|
||||
rate: u16,
|
||||
) -> &Self {
|
||||
self.0.frame_read_timeout.set(timeout);
|
||||
self.0.frame_read_max_timeout.set(max_timeout);
|
||||
self.0.frame_read_rate.set(rate);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pin_project_lite::pin_project! {
|
||||
/// Dispatcher - is a future that reads frames from bytes stream
|
||||
/// and pass then to the service.
|
||||
|
@ -27,8 +124,9 @@ pin_project_lite::pin_project! {
|
|||
bitflags::bitflags! {
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
struct Flags: u8 {
|
||||
const READY_ERR = 0b0001;
|
||||
const IO_ERR = 0b0010;
|
||||
const READY_ERR = 0b0001;
|
||||
const IO_ERR = 0b0010;
|
||||
const TIMEOUT = 0b0100;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,11 +136,14 @@ where
|
|||
U: Encoder + Decoder,
|
||||
{
|
||||
st: Cell<DispatcherState>,
|
||||
ka_timeout: Cell<time::Duration>,
|
||||
error: Cell<Option<S::Error>>,
|
||||
flags: Cell<Flags>,
|
||||
shared: Rc<DispatcherShared<S, U>>,
|
||||
pool: Pool,
|
||||
cfg: DispatcherConfig,
|
||||
read_timeout: Cell<time::Instant>,
|
||||
read_max_timeout: Cell<time::Instant>,
|
||||
read_bytes: Cell<u32>,
|
||||
}
|
||||
|
||||
pub(crate) struct DispatcherShared<S, U>
|
||||
|
@ -92,16 +193,20 @@ where
|
|||
U: Decoder + Encoder,
|
||||
{
|
||||
/// Construct new `Dispatcher` instance.
|
||||
pub fn new<Io, F>(io: Io, codec: U, service: F) -> Dispatcher<S, U>
|
||||
pub fn with_config<Io, F>(
|
||||
io: Io,
|
||||
codec: U,
|
||||
service: F,
|
||||
cfg: &DispatcherConfig,
|
||||
) -> Dispatcher<S, U>
|
||||
where
|
||||
IoBoxed: From<Io>,
|
||||
F: IntoService<S, DispatchItem<U>>,
|
||||
{
|
||||
let io = IoBoxed::from(io);
|
||||
let ka_timeout = Cell::new(Seconds(30).into());
|
||||
|
||||
// register keepalive timer
|
||||
io.start_keepalive_timer(ka_timeout.get());
|
||||
io.start_keepalive_timer(cfg.keepalive_timeout().into());
|
||||
|
||||
let pool = io.memory_pool().pool();
|
||||
let shared = Rc::new(DispatcherShared {
|
||||
|
@ -116,13 +221,27 @@ where
|
|||
inner: DispatcherInner {
|
||||
pool,
|
||||
shared,
|
||||
ka_timeout,
|
||||
cfg: cfg.clone(),
|
||||
error: Cell::new(None),
|
||||
flags: Cell::new(Flags::empty()),
|
||||
read_timeout: Cell::new(now()),
|
||||
read_max_timeout: Cell::new(now()),
|
||||
read_bytes: Cell::new(0),
|
||||
st: Cell::new(DispatcherState::Processing),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.3.6", note = "Use Dispatcher::with_config() method")]
|
||||
/// Construct new `Dispatcher` instance.
|
||||
pub fn new<Io, F>(io: Io, codec: U, service: F) -> Dispatcher<S, U>
|
||||
where
|
||||
IoBoxed: From<Io>,
|
||||
F: IntoService<S, DispatchItem<U>>,
|
||||
{
|
||||
Self::with_config(io, codec, service, &DispatcherConfig::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, U> Dispatcher<S, U>
|
||||
|
@ -130,21 +249,22 @@ where
|
|||
S: Service<DispatchItem<U>, Response = Option<Response<U>>>,
|
||||
U: Decoder + Encoder,
|
||||
{
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.3.6", note = "Use DispatcherConfig methods")]
|
||||
/// Set keep-alive timeout.
|
||||
///
|
||||
/// To disable timeout set value to 0.
|
||||
///
|
||||
/// By default keep-alive timeout is set to 30 seconds.
|
||||
pub fn keepalive_timeout(self, timeout: Seconds) -> Self {
|
||||
let ka_timeout = time::Duration::from(timeout);
|
||||
|
||||
// register keepalive timer
|
||||
self.inner.shared.io.start_keepalive_timer(ka_timeout);
|
||||
self.inner.ka_timeout.set(ka_timeout);
|
||||
|
||||
self.inner.shared.io.start_keepalive_timer(timeout.into());
|
||||
self.inner.cfg.set_keepalive_timeout(timeout);
|
||||
self
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.3.6", note = "Use DispatcherConfig methods")]
|
||||
/// Set connection disconnect timeout in seconds.
|
||||
///
|
||||
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
|
||||
|
@ -154,7 +274,7 @@ where
|
|||
///
|
||||
/// By default disconnect timeout is set to 1 seconds.
|
||||
pub fn disconnect_timeout(self, val: Seconds) -> Self {
|
||||
self.inner.shared.io.set_disconnect_timeout(val.into());
|
||||
self.inner.shared.io.set_disconnect_timeout(val);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
@ -200,23 +320,31 @@ where
|
|||
loop {
|
||||
match slf.st.get() {
|
||||
DispatcherState::Processing => {
|
||||
let item = match ready!(slf.poll_service(
|
||||
&this.inner.shared.service,
|
||||
cx,
|
||||
io
|
||||
)) {
|
||||
let srv = ready!(slf.poll_service(&this.inner.shared.service, cx, io));
|
||||
let item = match srv {
|
||||
PollService::Ready => {
|
||||
// decode incoming bytes if buffer is ready
|
||||
match ready!(io.poll_recv(&slf.shared.codec, cx)) {
|
||||
Ok(el) => {
|
||||
slf.update_keepalive();
|
||||
DispatchItem::Item(el)
|
||||
match io.poll_recv_decode(&slf.shared.codec, cx) {
|
||||
Ok(decoded) => {
|
||||
if let Some(el) = decoded.item {
|
||||
slf.update_keepalive();
|
||||
slf.remove_timeout();
|
||||
DispatchItem::Item(el)
|
||||
} else {
|
||||
slf.update_timeout(decoded.remains);
|
||||
return Poll::Pending;
|
||||
}
|
||||
}
|
||||
Err(RecvError::KeepAlive) => {
|
||||
log::trace!("keep-alive error, stopping dispatcher");
|
||||
slf.st.set(DispatcherState::Stop);
|
||||
DispatchItem::KeepAliveTimeout
|
||||
}
|
||||
Err(RecvError::Timeout) => {
|
||||
log::trace!("timeout error, stopping dispatcher");
|
||||
slf.st.set(DispatcherState::Stop);
|
||||
DispatchItem::ReadTimeout
|
||||
}
|
||||
Err(RecvError::Stop) => {
|
||||
log::trace!("dispatcher is instructed to stop");
|
||||
slf.st.set(DispatcherState::Stop);
|
||||
|
@ -259,6 +387,8 @@ where
|
|||
}
|
||||
// handle write back-pressure
|
||||
DispatcherState::Backpressure => {
|
||||
slf.shared.io.stop_keepalive_timer();
|
||||
|
||||
let result =
|
||||
ready!(slf.poll_service(&this.inner.shared.service, cx, io));
|
||||
let item = match result {
|
||||
|
@ -277,6 +407,7 @@ where
|
|||
// call service
|
||||
let shared = slf.shared.clone();
|
||||
shared.inflight.set(shared.inflight.get() + 1);
|
||||
slf.update_keepalive();
|
||||
spawn(async move {
|
||||
let result = shared.service.call(item).await;
|
||||
shared.handle_result(result, &shared.io);
|
||||
|
@ -284,7 +415,7 @@ where
|
|||
}
|
||||
// drain service responses and shutdown io
|
||||
DispatcherState::Stop => {
|
||||
slf.unregister_keepalive();
|
||||
slf.shared.io.stop_keepalive_timer();
|
||||
|
||||
// service may relay on poll_ready for response results
|
||||
if !slf.flags.get().contains(Flags::READY_ERR) {
|
||||
|
@ -300,7 +431,8 @@ where
|
|||
match ready!(slf.shared.io.poll_status_update(cx)) {
|
||||
IoStatusUpdate::PeerGone(_)
|
||||
| IoStatusUpdate::Stop
|
||||
| IoStatusUpdate::KeepAlive => {
|
||||
| IoStatusUpdate::KeepAlive
|
||||
| IoStatusUpdate::Timeout => {
|
||||
slf.insert_flags(Flags::IO_ERR);
|
||||
continue;
|
||||
}
|
||||
|
@ -375,6 +507,11 @@ where
|
|||
self.st.set(DispatcherState::Stop);
|
||||
Poll::Ready(PollService::Item(DispatchItem::KeepAliveTimeout))
|
||||
}
|
||||
IoStatusUpdate::Timeout => {
|
||||
log::trace!("read timeout, stopping dispatcher during pause");
|
||||
self.st.set(DispatcherState::Stop);
|
||||
Poll::Ready(PollService::Item(DispatchItem::ReadTimeout))
|
||||
}
|
||||
IoStatusUpdate::Stop => {
|
||||
log::trace!("dispatcher is instructed to stop during pause");
|
||||
self.st.set(DispatcherState::Stop);
|
||||
|
@ -410,13 +547,59 @@ where
|
|||
|
||||
/// update keep-alive timer
|
||||
fn update_keepalive(&self) {
|
||||
self.shared.io.start_keepalive_timer(self.ka_timeout.get());
|
||||
self.shared
|
||||
.io
|
||||
.start_keepalive_timer(self.cfg.keepalive_timeout().into());
|
||||
}
|
||||
|
||||
/// unregister keep-alive timer
|
||||
fn unregister_keepalive(&self) {
|
||||
self.shared.io.stop_keepalive_timer();
|
||||
self.ka_timeout.set(time::Duration::ZERO);
|
||||
fn remove_timeout(&self) {
|
||||
let mut flags = self.flags.get();
|
||||
|
||||
if self.flags.get().contains(Flags::TIMEOUT) {
|
||||
flags.remove(Flags::TIMEOUT);
|
||||
self.flags.set(flags);
|
||||
self.shared.io.stop_timer(self.read_timeout.get());
|
||||
}
|
||||
}
|
||||
|
||||
fn update_timeout(&self, remains: usize) {
|
||||
if let Some((period, max, rate)) = self.cfg.frame_read_rate() {
|
||||
let bytes = remains as u32;
|
||||
let mut flags = self.flags.get();
|
||||
|
||||
if flags.contains(Flags::TIMEOUT) {
|
||||
// update existing timeout
|
||||
let delta = (bytes - self.read_bytes.get())
|
||||
.try_into()
|
||||
.unwrap_or(u16::MAX);
|
||||
|
||||
if delta >= rate {
|
||||
let n = now();
|
||||
let to = self.read_timeout.get();
|
||||
let next = to + ONE_SEC;
|
||||
let new_timeout = if n >= next { ONE_SEC } else { next - n };
|
||||
|
||||
// max timeout
|
||||
if max.is_zero() || (n + new_timeout) <= self.read_max_timeout.get() {
|
||||
self.shared.io.stop_timer(to);
|
||||
self.read_bytes.set(bytes);
|
||||
self.read_timeout
|
||||
.set(self.shared.io.start_timer(new_timeout));
|
||||
}
|
||||
}
|
||||
} else if remains != 0 {
|
||||
// we got new data but not enough to parse single frame
|
||||
flags.insert(Flags::TIMEOUT);
|
||||
self.flags.set(flags);
|
||||
|
||||
self.read_bytes.set(bytes);
|
||||
self.read_timeout
|
||||
.set(self.shared.io.start_timer(period.into()));
|
||||
if !max.is_zero() {
|
||||
self.read_max_timeout.set(now() + time::Duration::from(max));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -424,9 +607,9 @@ where
|
|||
mod tests {
|
||||
use rand::Rng;
|
||||
use std::sync::{atomic::AtomicBool, atomic::Ordering::Relaxed, Arc, Mutex};
|
||||
use std::{cell::RefCell, time::Duration};
|
||||
use std::{cell::RefCell, io, time::Duration};
|
||||
|
||||
use ntex_bytes::{Bytes, PoolId, PoolRef};
|
||||
use ntex_bytes::{Bytes, BytesMut, PoolId, PoolRef};
|
||||
use ntex_codec::BytesCodec;
|
||||
use ntex_service::ServiceCtx;
|
||||
use ntex_util::{future::Ready, time::sleep, time::Millis, time::Seconds};
|
||||
|
@ -455,6 +638,32 @@ mod tests {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct BCodec(usize);
|
||||
|
||||
impl Encoder for BCodec {
|
||||
type Item = Bytes;
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(&self, item: Bytes, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
dst.extend_from_slice(&item[..]);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for BCodec {
|
||||
type Item = BytesMut;
|
||||
type Error = io::Error;
|
||||
|
||||
fn decode(&self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
if src.len() < self.0 {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(src.split_to(self.0)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, U> Dispatcher<S, U>
|
||||
where
|
||||
S: Service<DispatchItem<U>, Response = Option<Response<U>>> + 'static,
|
||||
|
@ -468,7 +677,9 @@ mod tests {
|
|||
) -> (Self, State) {
|
||||
let state = Io::new(io);
|
||||
let pool = state.memory_pool().pool();
|
||||
let ka_timeout = Cell::new(Seconds(1).into());
|
||||
let cfg = DispatcherConfig::default()
|
||||
.set_keepalive_timeout(Seconds(1))
|
||||
.clone();
|
||||
|
||||
let inner = State(state.get_ref());
|
||||
state.start_keepalive_timer(Duration::from_millis(500));
|
||||
|
@ -487,9 +698,12 @@ mod tests {
|
|||
error: Cell::new(None),
|
||||
flags: Cell::new(super::Flags::empty()),
|
||||
st: Cell::new(DispatcherState::Processing),
|
||||
read_timeout: Cell::new(time::Instant::now()),
|
||||
read_max_timeout: Cell::new(time::Instant::now()),
|
||||
read_bytes: Cell::new(0),
|
||||
pool,
|
||||
shared,
|
||||
ka_timeout,
|
||||
cfg,
|
||||
},
|
||||
},
|
||||
inner,
|
||||
|
@ -548,6 +762,7 @@ mod tests {
|
|||
}
|
||||
}),
|
||||
);
|
||||
#[allow(deprecated)]
|
||||
spawn(async move {
|
||||
let _ = disp.disconnect_timeout(Seconds(1)).await;
|
||||
});
|
||||
|
@ -747,7 +962,7 @@ mod tests {
|
|||
}),
|
||||
),
|
||||
);
|
||||
let disp = disp.keepalive_timeout(Seconds::ZERO);
|
||||
disp.inner.cfg.set_keepalive_timeout(Seconds::ZERO);
|
||||
let pool = PoolId::P10.pool_ref();
|
||||
pool.set_read_params(1024, 512);
|
||||
state.set_memory_pool(pool);
|
||||
|
@ -802,13 +1017,14 @@ mod tests {
|
|||
}
|
||||
}),
|
||||
);
|
||||
#[allow(deprecated)]
|
||||
spawn(async move {
|
||||
let _ = disp
|
||||
.keepalive_timeout(Seconds::ZERO)
|
||||
.keepalive_timeout(Seconds(1))
|
||||
.await;
|
||||
});
|
||||
state.0 .0.disconnect_timeout.set(Millis::ONE_SEC);
|
||||
state.0 .0.disconnect_timeout.set(Seconds(1));
|
||||
|
||||
let buf = client.read().await.unwrap();
|
||||
assert_eq!(buf, Bytes::from_static(b"GET /test HTTP/1\r\n\r\n"));
|
||||
|
@ -821,6 +1037,61 @@ mod tests {
|
|||
assert_eq!(&data.lock().unwrap().borrow()[..], &[0, 1]);
|
||||
}
|
||||
|
||||
#[ntex::test]
|
||||
async fn test_read_timeout() {
|
||||
let (client, server) = IoTest::create();
|
||||
client.remote_buffer_cap(1024);
|
||||
|
||||
let data = Arc::new(Mutex::new(RefCell::new(Vec::new())));
|
||||
let data2 = data.clone();
|
||||
|
||||
let (disp, state) = Dispatcher::debug(
|
||||
server,
|
||||
BCodec(8),
|
||||
ntex_service::fn_service(move |msg: DispatchItem<BCodec>| {
|
||||
let data = data2.clone();
|
||||
async move {
|
||||
match msg {
|
||||
DispatchItem::Item(bytes) => {
|
||||
data.lock().unwrap().borrow_mut().push(0);
|
||||
return Ok::<_, ()>(Some(bytes.freeze()));
|
||||
}
|
||||
DispatchItem::ReadTimeout => {
|
||||
data.lock().unwrap().borrow_mut().push(1);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
}),
|
||||
);
|
||||
spawn(async move {
|
||||
disp.inner
|
||||
.cfg
|
||||
.set_keepalive_timeout(Seconds::ZERO)
|
||||
.set_frame_read_rate(Seconds(1), Seconds(2), 2);
|
||||
let _ = disp.await;
|
||||
});
|
||||
|
||||
client.write("12345678");
|
||||
let buf = client.read().await.unwrap();
|
||||
assert_eq!(buf, Bytes::from_static(b"12345678"));
|
||||
|
||||
client.write("1");
|
||||
sleep(Millis(500)).await;
|
||||
assert!(!state.flags().contains(Flags::IO_STOPPING));
|
||||
client.write("23");
|
||||
sleep(Millis(500)).await;
|
||||
assert!(!state.flags().contains(Flags::IO_STOPPING));
|
||||
client.write("4");
|
||||
sleep(Millis(1100)).await;
|
||||
|
||||
// write side must be closed, dispatcher should fail with keep-alive
|
||||
assert!(state.flags().contains(Flags::IO_STOPPING));
|
||||
assert!(client.is_closed());
|
||||
assert_eq!(&data.lock().unwrap().borrow()[..], &[0, 1]);
|
||||
}
|
||||
|
||||
#[ntex::test]
|
||||
async fn test_unhandled_data() {
|
||||
let handled = Arc::new(AtomicBool::new(false));
|
||||
|
|
|
@ -97,14 +97,18 @@ impl Filter for Base {
|
|||
if flags.contains(Flags::IO_STOPPED) {
|
||||
Poll::Ready(WriteStatus::Terminate)
|
||||
} else if flags.intersects(Flags::IO_STOPPING) {
|
||||
Poll::Ready(WriteStatus::Shutdown(self.0 .0.disconnect_timeout.get()))
|
||||
Poll::Ready(WriteStatus::Shutdown(
|
||||
self.0 .0.disconnect_timeout.get().into(),
|
||||
))
|
||||
} else if flags.contains(Flags::IO_STOPPING_FILTERS)
|
||||
&& !flags.contains(Flags::IO_FILTERS_TIMEOUT)
|
||||
{
|
||||
flags.insert(Flags::IO_FILTERS_TIMEOUT);
|
||||
self.0.set_flags(flags);
|
||||
self.0 .0.write_task.register(cx.waker());
|
||||
Poll::Ready(WriteStatus::Timeout(self.0 .0.disconnect_timeout.get()))
|
||||
Poll::Ready(WriteStatus::Timeout(
|
||||
self.0 .0.disconnect_timeout.get().into(),
|
||||
))
|
||||
} else {
|
||||
self.0 .0.write_task.register(cx.waker());
|
||||
Poll::Ready(WriteStatus::Ready)
|
||||
|
|
|
@ -4,14 +4,14 @@ use std::{fmt, future::Future, hash, io, marker, mem, ops, pin::Pin, ptr, rc::Rc
|
|||
|
||||
use ntex_bytes::{PoolId, PoolRef};
|
||||
use ntex_codec::{Decoder, Encoder};
|
||||
use ntex_util::time::{now, Millis};
|
||||
use ntex_util::time::{now, Seconds};
|
||||
use ntex_util::{future::poll_fn, future::Either, task::LocalWaker};
|
||||
|
||||
use crate::buf::Stack;
|
||||
use crate::filter::{Base, Filter, Layer, NullFilter};
|
||||
use crate::seal::Sealed;
|
||||
use crate::tasks::{ReadContext, WriteContext};
|
||||
use crate::{FilterLayer, Handle, IoStatusUpdate, IoStream, RecvError};
|
||||
use crate::{Decoded, FilterLayer, Handle, IoStatusUpdate, IoStream, RecvError};
|
||||
|
||||
bitflags::bitflags! {
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
|
@ -45,9 +45,11 @@ bitflags::bitflags! {
|
|||
const DSP_STOP = 0b0001_0000_0000_0000;
|
||||
/// keep-alive timeout occured
|
||||
const DSP_KEEPALIVE = 0b0010_0000_0000_0000;
|
||||
/// custom timeout occured
|
||||
const DSP_TIMEOUT = 0b0100_0000_0000_0000;
|
||||
|
||||
/// keep-alive timeout started
|
||||
const KEEPALIVE = 0b0100_0000_0000_0000;
|
||||
const KEEPALIVE = 0b1000_0000_0000_0000;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -60,7 +62,7 @@ pub struct IoRef(pub(super) Rc<IoState>);
|
|||
pub(crate) struct IoState {
|
||||
pub(super) flags: Cell<Flags>,
|
||||
pub(super) pool: Cell<PoolRef>,
|
||||
pub(super) disconnect_timeout: Cell<Millis>,
|
||||
pub(super) disconnect_timeout: Cell<Seconds>,
|
||||
pub(super) error: Cell<Option<io::Error>>,
|
||||
pub(super) read_task: LocalWaker,
|
||||
pub(super) write_task: LocalWaker,
|
||||
|
@ -91,13 +93,18 @@ impl IoState {
|
|||
}
|
||||
}
|
||||
|
||||
pub(super) fn notify_keepalive(&self) {
|
||||
log::trace!("keep-alive timeout, notify dispatcher");
|
||||
pub(super) fn notify_timeout(&self, custom: bool) {
|
||||
let mut flags = self.flags.get();
|
||||
flags.remove(Flags::KEEPALIVE);
|
||||
if !flags.contains(Flags::DSP_KEEPALIVE) {
|
||||
flags.insert(Flags::DSP_KEEPALIVE);
|
||||
if custom {
|
||||
flags.insert(Flags::DSP_TIMEOUT);
|
||||
self.dispatch_task.wake();
|
||||
} else {
|
||||
log::trace!("keep-alive timeout, notify dispatcher");
|
||||
flags.remove(Flags::KEEPALIVE);
|
||||
if !flags.contains(Flags::DSP_KEEPALIVE) {
|
||||
flags.insert(Flags::DSP_KEEPALIVE);
|
||||
self.dispatch_task.wake();
|
||||
}
|
||||
}
|
||||
self.flags.set(flags);
|
||||
}
|
||||
|
@ -192,7 +199,7 @@ impl Io {
|
|||
pool: Cell::new(pool),
|
||||
flags: Cell::new(Flags::empty()),
|
||||
error: Cell::new(None),
|
||||
disconnect_timeout: Cell::new(Millis::ONE_SEC),
|
||||
disconnect_timeout: Cell::new(Seconds(1)),
|
||||
dispatch_task: LocalWaker::new(),
|
||||
read_task: LocalWaker::new(),
|
||||
write_task: LocalWaker::new(),
|
||||
|
@ -230,7 +237,7 @@ impl<F> Io<F> {
|
|||
|
||||
#[inline]
|
||||
/// Set io disconnect timeout in millis
|
||||
pub fn set_disconnect_timeout(&self, timeout: Millis) {
|
||||
pub fn set_disconnect_timeout(&self, timeout: Seconds) {
|
||||
self.0 .0.disconnect_timeout.set(timeout);
|
||||
}
|
||||
|
||||
|
@ -248,7 +255,7 @@ impl<F> Io<F> {
|
|||
| Flags::IO_STOPPING_FILTERS,
|
||||
),
|
||||
error: Cell::new(None),
|
||||
disconnect_timeout: Cell::new(Millis::ONE_SEC),
|
||||
disconnect_timeout: Cell::new(Seconds(1)),
|
||||
dispatch_task: LocalWaker::new(),
|
||||
read_task: LocalWaker::new(),
|
||||
write_task: LocalWaker::new(),
|
||||
|
@ -345,10 +352,9 @@ impl<F> Io<F> {
|
|||
loop {
|
||||
return match poll_fn(|cx| self.poll_recv(codec, cx)).await {
|
||||
Ok(item) => Ok(Some(item)),
|
||||
Err(RecvError::KeepAlive) => Err(Either::Right(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Keep-alive",
|
||||
))),
|
||||
Err(RecvError::KeepAlive) | Err(RecvError::Timeout) => Err(Either::Right(
|
||||
io::Error::new(io::ErrorKind::Other, "Keep-alive"),
|
||||
)),
|
||||
Err(RecvError::Stop) => Err(Either::Right(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Dispatcher stopped",
|
||||
|
@ -515,36 +521,60 @@ impl<F> Io<F> {
|
|||
where
|
||||
U: Decoder,
|
||||
{
|
||||
match self.decode(codec) {
|
||||
Ok(Some(el)) => Poll::Ready(Ok(el)),
|
||||
Ok(None) => {
|
||||
let flags = self.flags();
|
||||
if flags.contains(Flags::IO_STOPPED) {
|
||||
Poll::Ready(Err(RecvError::PeerGone(self.error())))
|
||||
} else if flags.contains(Flags::DSP_STOP) {
|
||||
self.0 .0.remove_flags(Flags::DSP_STOP);
|
||||
Poll::Ready(Err(RecvError::Stop))
|
||||
} else if flags.contains(Flags::DSP_KEEPALIVE) {
|
||||
self.0 .0.remove_flags(Flags::DSP_KEEPALIVE);
|
||||
Poll::Ready(Err(RecvError::KeepAlive))
|
||||
} else if flags.contains(Flags::WR_BACKPRESSURE) {
|
||||
Poll::Ready(Err(RecvError::WriteBackpressure))
|
||||
} else {
|
||||
match self.poll_read_ready(cx) {
|
||||
Poll::Pending | Poll::Ready(Ok(Some(()))) => {
|
||||
log::trace!("not enough data to decode next frame");
|
||||
Poll::Pending
|
||||
}
|
||||
Poll::Ready(Err(e)) => {
|
||||
Poll::Ready(Err(RecvError::PeerGone(Some(e))))
|
||||
}
|
||||
Poll::Ready(Ok(None)) => {
|
||||
Poll::Ready(Err(RecvError::PeerGone(None)))
|
||||
}
|
||||
let decoded = self.poll_recv_decode(codec, cx)?;
|
||||
|
||||
if let Some(item) = decoded.item {
|
||||
Poll::Ready(Ok(item))
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[inline]
|
||||
/// Decode codec item from incoming bytes stream.
|
||||
///
|
||||
/// Wake read task and request to read more data if data is not enough for decoding.
|
||||
/// If error get returned this method does not register waker for later wake up action.
|
||||
pub fn poll_recv_decode<U>(
|
||||
&self,
|
||||
codec: &U,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Result<Decoded<U::Item>, RecvError<U>>
|
||||
where
|
||||
U: Decoder,
|
||||
{
|
||||
let decoded = self
|
||||
.decode_item(codec)
|
||||
.map_err(|err| RecvError::Decoder(err))?;
|
||||
|
||||
if decoded.item.is_some() {
|
||||
Ok(decoded)
|
||||
} else {
|
||||
let flags = self.flags();
|
||||
if flags.contains(Flags::IO_STOPPED) {
|
||||
Err(RecvError::PeerGone(self.error()))
|
||||
} else if flags.contains(Flags::DSP_STOP) {
|
||||
self.0 .0.remove_flags(Flags::DSP_STOP);
|
||||
Err(RecvError::Stop)
|
||||
} else if flags.contains(Flags::DSP_KEEPALIVE) {
|
||||
self.0 .0.remove_flags(Flags::DSP_KEEPALIVE);
|
||||
Err(RecvError::KeepAlive)
|
||||
} else if flags.contains(Flags::DSP_TIMEOUT) {
|
||||
self.0 .0.remove_flags(Flags::DSP_TIMEOUT);
|
||||
Err(RecvError::Timeout)
|
||||
} else if flags.contains(Flags::WR_BACKPRESSURE) {
|
||||
Err(RecvError::WriteBackpressure)
|
||||
} else {
|
||||
match self.poll_read_ready(cx) {
|
||||
Poll::Pending | Poll::Ready(Ok(Some(()))) => {
|
||||
log::trace!("not enough data to decode next frame");
|
||||
Ok(decoded)
|
||||
}
|
||||
Poll::Ready(Err(e)) => Err(RecvError::PeerGone(Some(e))),
|
||||
Poll::Ready(Ok(None)) => Err(RecvError::PeerGone(None)),
|
||||
}
|
||||
}
|
||||
Err(err) => Poll::Ready(Err(RecvError::Decoder(err))),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -626,6 +656,9 @@ impl<F> Io<F> {
|
|||
} else if flags.contains(Flags::DSP_KEEPALIVE) {
|
||||
self.0 .0.remove_flags(Flags::DSP_KEEPALIVE);
|
||||
Poll::Ready(IoStatusUpdate::KeepAlive)
|
||||
} else if flags.contains(Flags::DSP_TIMEOUT) {
|
||||
self.0 .0.remove_flags(Flags::DSP_TIMEOUT);
|
||||
Poll::Ready(IoStatusUpdate::Timeout)
|
||||
} else if flags.contains(Flags::WR_BACKPRESSURE) {
|
||||
Poll::Ready(IoStatusUpdate::WriteBackpressure)
|
||||
} else {
|
||||
|
@ -916,7 +949,7 @@ mod tests {
|
|||
let server = Io::new(server);
|
||||
assert!(server.eq(&server));
|
||||
|
||||
server.0 .0.notify_keepalive();
|
||||
server.0 .0.notify_timeout(false);
|
||||
let err = server.recv(&BytesCodec).await.err().unwrap();
|
||||
assert!(format!("{:?}", err).contains("Keep-alive"));
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ use std::{any, fmt, hash, io, time};
|
|||
use ntex_bytes::{BytesVec, PoolRef};
|
||||
use ntex_codec::{Decoder, Encoder};
|
||||
|
||||
use super::{io::Flags, timer, types, Filter, IoRef, OnDisconnect, WriteBuf};
|
||||
use super::{io::Flags, timer, types, Decoded, Filter, IoRef, OnDisconnect, WriteBuf};
|
||||
|
||||
impl IoRef {
|
||||
#[inline]
|
||||
|
@ -137,6 +137,25 @@ impl IoRef {
|
|||
.with_read_destination(self, |buf| codec.decode_vec(buf))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Attempts to decode a frame from the read buffer
|
||||
pub fn decode_item<U>(
|
||||
&self,
|
||||
codec: &U,
|
||||
) -> Result<Decoded<<U as Decoder>::Item>, <U as Decoder>::Error>
|
||||
where
|
||||
U: Decoder,
|
||||
{
|
||||
self.0.buffer.with_read_destination(self, |buf| {
|
||||
let len = buf.len();
|
||||
codec.decode_vec(buf).map(|item| Decoded {
|
||||
item,
|
||||
remains: buf.len(),
|
||||
consumed: len - buf.len(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Write bytes to a buffer and wake up write task
|
||||
pub fn write(&self, src: &[u8]) -> io::Result<()> {
|
||||
|
@ -190,12 +209,12 @@ impl IoRef {
|
|||
/// Start keep-alive timer
|
||||
pub fn start_keepalive_timer(&self, timeout: time::Duration) {
|
||||
if self.flags().contains(Flags::KEEPALIVE) {
|
||||
timer::unregister(self.0.keepalive.get(), self);
|
||||
timer::unregister(self.0.keepalive.get(), self, false);
|
||||
}
|
||||
if !timeout.is_zero() {
|
||||
log::debug!("start keep-alive timeout {:?}", timeout);
|
||||
self.0.insert_flags(Flags::KEEPALIVE);
|
||||
self.0.keepalive.set(timer::register(timeout, self));
|
||||
self.0.keepalive.set(timer::register(timeout, self, false));
|
||||
} else {
|
||||
self.0.remove_flags(Flags::KEEPALIVE);
|
||||
}
|
||||
|
@ -206,10 +225,24 @@ impl IoRef {
|
|||
pub fn stop_keepalive_timer(&self) {
|
||||
if self.flags().contains(Flags::KEEPALIVE) {
|
||||
log::debug!("unregister keep-alive timeout");
|
||||
timer::unregister(self.0.keepalive.get(), self)
|
||||
timer::unregister(self.0.keepalive.get(), self, false)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Start custom timer
|
||||
pub fn start_timer(&self, timeout: time::Duration) -> time::Instant {
|
||||
log::debug!("start custom timeout: {:?}", timeout);
|
||||
timer::register(timeout, self, true)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Stop custom timer
|
||||
pub fn stop_timer(&self, id: time::Instant) {
|
||||
log::debug!("unregister custom timeout");
|
||||
timer::unregister(id, self, true)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Notify when io stream get disconnected
|
||||
pub fn on_disconnect(&self) -> OnDisconnect {
|
||||
|
@ -305,6 +338,11 @@ mod tests {
|
|||
let buf = client.read().await.unwrap();
|
||||
assert_eq!(buf, Bytes::from_static(b"test"));
|
||||
|
||||
client.write(b"test");
|
||||
state.read_ready().await.unwrap();
|
||||
let buf = state.decode(&BytesCodec).unwrap().unwrap();
|
||||
assert_eq!(buf, Bytes::from_static(b"test"));
|
||||
|
||||
client.write_error(io::Error::new(io::ErrorKind::Other, "err"));
|
||||
let res = state.send(Bytes::from_static(b"test"), &BytesCodec).await;
|
||||
assert!(res.is_err());
|
||||
|
@ -409,13 +447,15 @@ mod tests {
|
|||
let write_order = Rc::new(RefCell::new(Vec::new()));
|
||||
|
||||
let (client, server) = IoTest::create();
|
||||
let io = Io::new(server).add_filter(Counter {
|
||||
let counter = Counter {
|
||||
idx: 1,
|
||||
in_bytes: in_bytes.clone(),
|
||||
out_bytes: out_bytes.clone(),
|
||||
read_order: read_order.clone(),
|
||||
write_order: write_order.clone(),
|
||||
});
|
||||
};
|
||||
format!("{:?}", counter);
|
||||
let io = Io::new(server).add_filter(counter);
|
||||
|
||||
client.remote_buffer_cap(1024);
|
||||
client.write(TEXT);
|
||||
|
|
|
@ -24,13 +24,13 @@ use ntex_codec::{Decoder, Encoder};
|
|||
use ntex_util::time::Millis;
|
||||
|
||||
pub use self::buf::{ReadBuf, WriteBuf};
|
||||
pub use self::dispatcher::Dispatcher;
|
||||
pub use self::dispatcher::{Dispatcher, DispatcherConfig};
|
||||
pub use self::filter::{Base, Filter, Layer};
|
||||
pub use self::framed::Framed;
|
||||
pub use self::io::{Io, IoRef, OnDisconnect};
|
||||
pub use self::seal::{IoBoxed, Sealed};
|
||||
pub use self::tasks::{ReadContext, WriteContext};
|
||||
pub use self::utils::{filter, seal};
|
||||
pub use self::utils::{filter, seal, Decoded};
|
||||
|
||||
/// Status for read task
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
|
@ -117,6 +117,8 @@ pub trait Handle {
|
|||
pub enum IoStatusUpdate {
|
||||
/// Keep-alive timeout occured
|
||||
KeepAlive,
|
||||
/// Custom timeout occured
|
||||
Timeout,
|
||||
/// Write backpressure is enabled
|
||||
WriteBackpressure,
|
||||
/// Stop io stream handling
|
||||
|
@ -130,6 +132,8 @@ pub enum IoStatusUpdate {
|
|||
pub enum RecvError<U: Decoder> {
|
||||
/// Keep-alive timeout occured
|
||||
KeepAlive,
|
||||
/// Custom timeout occured
|
||||
Timeout,
|
||||
/// Write backpressure is enabled
|
||||
WriteBackpressure,
|
||||
/// Stop io stream handling
|
||||
|
@ -149,6 +153,8 @@ pub enum DispatchItem<U: Encoder + Decoder> {
|
|||
WBackPressureDisabled,
|
||||
/// Keep alive timeout
|
||||
KeepAliveTimeout,
|
||||
/// Frame read timeout
|
||||
ReadTimeout,
|
||||
/// Decoder parse error
|
||||
DecoderError(<U as Decoder>::Error),
|
||||
/// Encoder parse error
|
||||
|
@ -176,6 +182,9 @@ where
|
|||
DispatchItem::KeepAliveTimeout => {
|
||||
write!(fmt, "DispatchItem::KeepAliveTimeout")
|
||||
}
|
||||
DispatchItem::ReadTimeout => {
|
||||
write!(fmt, "DispatchItem::ReadTimeout")
|
||||
}
|
||||
DispatchItem::EncoderError(ref e) => {
|
||||
write!(fmt, "DispatchItem::EncoderError({:?})", e)
|
||||
}
|
||||
|
@ -213,5 +222,6 @@ mod tests {
|
|||
assert!(
|
||||
format!("{:?}", T::KeepAliveTimeout).contains("DispatchItem::KeepAliveTimeout")
|
||||
);
|
||||
assert!(format!("{:?}", T::ReadTimeout).contains("DispatchItem::ReadTimeout"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,45 +1,76 @@
|
|||
use std::{cell::RefCell, collections::BTreeMap, rc::Rc, time::Duration, time::Instant};
|
||||
#![allow(clippy::mutable_key_type)]
|
||||
use std::collections::{BTreeMap, VecDeque};
|
||||
use std::{cell::RefCell, rc::Rc, time::Duration, time::Instant};
|
||||
|
||||
use ntex_util::time::{now, sleep, Millis};
|
||||
use ntex_util::{spawn, HashSet};
|
||||
|
||||
use crate::{io::IoState, IoRef};
|
||||
|
||||
const CAP: usize = 64;
|
||||
const SEC: Duration = Duration::from_secs(1);
|
||||
|
||||
thread_local! {
|
||||
static TIMER: Rc<RefCell<Inner>> = Rc::new(RefCell::new(
|
||||
Inner {
|
||||
running: false,
|
||||
cache: VecDeque::with_capacity(CAP),
|
||||
notifications: BTreeMap::default(),
|
||||
}));
|
||||
}
|
||||
|
||||
type Notifications = BTreeMap<Instant, (HashSet<Rc<IoState>>, HashSet<Rc<IoState>>)>;
|
||||
|
||||
struct Inner {
|
||||
running: bool,
|
||||
notifications: BTreeMap<Instant, HashSet<Rc<IoState>>>,
|
||||
cache: VecDeque<HashSet<Rc<IoState>>>,
|
||||
notifications: Notifications,
|
||||
}
|
||||
|
||||
impl Inner {
|
||||
fn unregister(&mut self, expire: Instant, io: &IoRef) {
|
||||
fn unregister(&mut self, expire: Instant, io: &IoRef, custom: bool) {
|
||||
if let Some(states) = self.notifications.get_mut(&expire) {
|
||||
states.remove(&io.0);
|
||||
if states.is_empty() {
|
||||
self.notifications.remove(&expire);
|
||||
if custom {
|
||||
states.1.remove(&io.0);
|
||||
} else {
|
||||
states.0.remove(&io.0);
|
||||
}
|
||||
if states.0.is_empty() && states.1.is_empty() {
|
||||
if let Some(items) = self.notifications.remove(&expire) {
|
||||
if self.cache.len() <= CAP {
|
||||
self.cache.push_back(items.0);
|
||||
self.cache.push_back(items.1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn register(timeout: Duration, io: &IoRef) -> Instant {
|
||||
let expire = now() + timeout;
|
||||
|
||||
pub(crate) fn register(timeout: Duration, io: &IoRef, custom: bool) -> Instant {
|
||||
TIMER.with(|timer| {
|
||||
let mut inner = timer.borrow_mut();
|
||||
|
||||
inner
|
||||
.notifications
|
||||
.entry(expire)
|
||||
.or_default()
|
||||
.insert(io.0.clone());
|
||||
let expire = now() + timeout;
|
||||
|
||||
// search existing key
|
||||
let expire = if let Some((expire, _)) =
|
||||
inner.notifications.range(expire..expire + SEC).next()
|
||||
{
|
||||
*expire
|
||||
} else {
|
||||
let n0 = inner.cache.pop_front().unwrap_or_default();
|
||||
let n1 = inner.cache.pop_front().unwrap_or_default();
|
||||
inner.notifications.insert(expire, (n0, n1));
|
||||
expire
|
||||
};
|
||||
|
||||
let notifications = inner.notifications.get_mut(&expire).unwrap();
|
||||
if custom {
|
||||
notifications.1.insert(io.0.clone());
|
||||
} else {
|
||||
notifications.0.insert(io.0.clone());
|
||||
};
|
||||
|
||||
if !inner.running {
|
||||
inner.running = true;
|
||||
|
@ -57,8 +88,12 @@ pub(crate) fn register(timeout: Duration, io: &IoRef) -> Instant {
|
|||
while let Some(key) = i.notifications.keys().next() {
|
||||
let key = *key;
|
||||
if key <= now_time {
|
||||
for st in i.notifications.remove(&key).unwrap() {
|
||||
st.notify_keepalive();
|
||||
let mut items = i.notifications.remove(&key).unwrap();
|
||||
items.0.drain().for_each(|st| st.notify_timeout(false));
|
||||
items.1.drain().for_each(|st| st.notify_timeout(true));
|
||||
if i.cache.len() <= CAP {
|
||||
i.cache.push_back(items.0);
|
||||
i.cache.push_back(items.1);
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
|
@ -75,21 +110,23 @@ pub(crate) fn register(timeout: Duration, io: &IoRef) -> Instant {
|
|||
drop(guard);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
expire
|
||||
expire
|
||||
})
|
||||
}
|
||||
|
||||
struct TimerGuard(Rc<RefCell<Inner>>);
|
||||
|
||||
impl Drop for TimerGuard {
|
||||
fn drop(&mut self) {
|
||||
self.0.borrow_mut().running = false;
|
||||
let mut inner = self.0.borrow_mut();
|
||||
inner.running = false;
|
||||
inner.notifications.clear();
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn unregister(expire: Instant, io: &IoRef) {
|
||||
pub(crate) fn unregister(expire: Instant, io: &IoRef, custom: bool) {
|
||||
TIMER.with(|timer| {
|
||||
timer.borrow_mut().unregister(expire, io);
|
||||
timer.borrow_mut().unregister(expire, io, custom);
|
||||
})
|
||||
}
|
||||
|
|
|
@ -5,6 +5,15 @@ use ntex_util::future::Ready;
|
|||
|
||||
use crate::{Filter, FilterFactory, Io, IoBoxed, Layer};
|
||||
|
||||
/// Decoded item from buffer
|
||||
#[doc(hidden)]
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
pub struct Decoded<T> {
|
||||
pub item: Option<T>,
|
||||
pub remains: usize,
|
||||
pub consumed: usize,
|
||||
}
|
||||
|
||||
/// Service that converts any Io<F> stream to IoBoxed stream
|
||||
pub fn seal<F, S, C>(
|
||||
srv: S,
|
||||
|
@ -176,6 +185,14 @@ mod tests {
|
|||
.unwrap();
|
||||
|
||||
let _ = svc.call(Io::new(server)).await;
|
||||
|
||||
let (client, _) = IoTest::create();
|
||||
let io = Io::new(client);
|
||||
format!("{:?}", TestFilter);
|
||||
let mut s = Stack::new();
|
||||
s.add_layer();
|
||||
let _ = s.read_buf(&io, 0, 0, |b| TestFilter.process_read_buf(b));
|
||||
let _ = s.write_buf(&io, 0, |b| TestFilter.process_write_buf(b));
|
||||
}
|
||||
|
||||
#[ntex::test]
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
# Changes
|
||||
|
||||
## [0.7.9] - 2023-11-10
|
||||
|
||||
* Update ntex io
|
||||
|
||||
## [0.7.8] - 2023-11-06
|
||||
|
||||
* Stopping Server does not release resources #233
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "ntex"
|
||||
version = "0.7.8"
|
||||
version = "0.7.9"
|
||||
authors = ["ntex contributors <team@ntex.rs>"]
|
||||
description = "Framework for composable network services"
|
||||
readme = "README.md"
|
||||
|
@ -52,13 +52,13 @@ ntex-codec = "0.6.2"
|
|||
ntex-connect = "0.3.2"
|
||||
ntex-http = "0.1.10"
|
||||
ntex-router = "0.5.2"
|
||||
ntex-service = "1.2.6"
|
||||
ntex-service = "1.2.7"
|
||||
ntex-macros = "0.1.3"
|
||||
ntex-util = "0.3.3"
|
||||
ntex-bytes = "0.1.19"
|
||||
ntex-h2 = "0.4.3"
|
||||
ntex-util = "0.3.4"
|
||||
ntex-bytes = "0.1.20"
|
||||
ntex-h2 = "0.4.4"
|
||||
ntex-rt = "0.4.10"
|
||||
ntex-io = "0.3.5"
|
||||
ntex-io = "0.3.6"
|
||||
ntex-tls = "0.3.2"
|
||||
ntex-tokio = { version = "0.3.0", optional = true }
|
||||
ntex-glommio = { version = "0.3.0", optional = true }
|
||||
|
|
|
@ -36,7 +36,7 @@ pub struct Connector {
|
|||
timeout: Millis,
|
||||
conn_lifetime: Duration,
|
||||
conn_keep_alive: Duration,
|
||||
disconnect_timeout: Millis,
|
||||
disconnect_timeout: Seconds,
|
||||
limit: usize,
|
||||
h2config: h2::Config,
|
||||
connector: BoxedConnector,
|
||||
|
@ -62,7 +62,7 @@ impl Connector {
|
|||
timeout: Millis(1_000),
|
||||
conn_lifetime: Duration::from_secs(75),
|
||||
conn_keep_alive: Duration::from_secs(15),
|
||||
disconnect_timeout: Millis(3_000),
|
||||
disconnect_timeout: Seconds(3),
|
||||
limit: 100,
|
||||
h2config: h2::Config::client(),
|
||||
};
|
||||
|
@ -171,7 +171,7 @@ impl Connector {
|
|||
/// To disable timeout set value to 0.
|
||||
///
|
||||
/// By default disconnect timeout is set to 3 seconds.
|
||||
pub fn disconnect_timeout<T: Into<Millis>>(mut self, timeout: T) -> Self {
|
||||
pub fn disconnect_timeout<T: Into<Seconds>>(mut self, timeout: T) -> Self {
|
||||
self.disconnect_timeout = timeout.into();
|
||||
self
|
||||
}
|
||||
|
@ -256,7 +256,7 @@ impl Connector {
|
|||
fn connector(
|
||||
connector: BoxedConnector,
|
||||
timeout: Millis,
|
||||
disconnect_timeout: Millis,
|
||||
disconnect_timeout: Seconds,
|
||||
) -> impl Service<Connect, Response = IoBoxed, Error = ConnectError> + fmt::Debug {
|
||||
TimeoutService::new(
|
||||
timeout,
|
||||
|
|
|
@ -172,6 +172,9 @@ impl Stream for PlStream {
|
|||
Err(RecvError::KeepAlive) => {
|
||||
Err(io::Error::new(io::ErrorKind::Other, "Keep-alive").into())
|
||||
}
|
||||
Err(RecvError::Timeout) => {
|
||||
Err(io::Error::new(io::ErrorKind::TimedOut, "Read timeout").into())
|
||||
}
|
||||
Err(RecvError::Stop) => {
|
||||
Err(io::Error::new(io::ErrorKind::Other, "Dispatcher stopped")
|
||||
.into())
|
||||
|
|
|
@ -7,7 +7,7 @@ use ntex_h2::{self as h2};
|
|||
use crate::http::uri::{Authority, Scheme, Uri};
|
||||
use crate::io::{types::HttpProtocol, IoBoxed};
|
||||
use crate::service::{Pipeline, PipelineCall, Service, ServiceCtx};
|
||||
use crate::time::{now, Millis};
|
||||
use crate::time::{now, Seconds};
|
||||
use crate::util::{ready, BoxFuture, ByteString, HashMap, HashSet};
|
||||
use crate::{channel::pool, rt::spawn, task::LocalWaker};
|
||||
|
||||
|
@ -57,7 +57,7 @@ where
|
|||
connector: T,
|
||||
conn_lifetime: Duration,
|
||||
conn_keep_alive: Duration,
|
||||
disconnect_timeout: Millis,
|
||||
disconnect_timeout: Seconds,
|
||||
limit: usize,
|
||||
h2config: h2::Config,
|
||||
) -> Self {
|
||||
|
@ -178,7 +178,7 @@ where
|
|||
pub(super) struct Inner {
|
||||
conn_lifetime: Duration,
|
||||
conn_keep_alive: Duration,
|
||||
disconnect_timeout: Millis,
|
||||
disconnect_timeout: Seconds,
|
||||
limit: usize,
|
||||
h2config: h2::Config,
|
||||
acquired: usize,
|
||||
|
@ -396,7 +396,7 @@ pin_project_lite::pin_project! {
|
|||
uri: Uri,
|
||||
tx: Option<Waiter>,
|
||||
guard: Option<OpenGuard>,
|
||||
disconnect_timeout: Millis,
|
||||
disconnect_timeout: Seconds,
|
||||
inner: Rc<RefCell<Inner>>,
|
||||
}
|
||||
}
|
||||
|
@ -612,9 +612,8 @@ mod tests {
|
|||
use std::{cell::RefCell, rc::Rc};
|
||||
|
||||
use super::*;
|
||||
use crate::{
|
||||
http::Uri, io as nio, service::fn_service, testing::Io, time::sleep, util::lazy,
|
||||
};
|
||||
use crate::time::{sleep, Millis};
|
||||
use crate::{http::Uri, io as nio, service::fn_service, testing::Io, util::lazy};
|
||||
|
||||
#[crate::rt_test]
|
||||
async fn test_basics() {
|
||||
|
@ -630,7 +629,7 @@ mod tests {
|
|||
}),
|
||||
Duration::from_secs(10),
|
||||
Duration::from_secs(10),
|
||||
Millis::ZERO,
|
||||
Seconds::ZERO,
|
||||
1,
|
||||
h2::Config::client(),
|
||||
)
|
||||
|
|
|
@ -110,7 +110,7 @@ where
|
|||
/// Construct new `Dispatcher` instance with outgoing messages stream.
|
||||
pub(in crate::http) fn new(io: Io<F>, config: Rc<DispatcherConfig<S, X, U>>) -> Self {
|
||||
let codec = Codec::new(config.timer.clone(), config.keep_alive_enabled());
|
||||
io.set_disconnect_timeout(config.client_disconnect.into());
|
||||
io.set_disconnect_timeout(config.client_disconnect);
|
||||
|
||||
// slow-request timer
|
||||
let flags = if config.client_timeout.is_zero() {
|
||||
|
@ -601,7 +601,7 @@ where
|
|||
log::trace!("dispatcher is instructed to stop");
|
||||
Poll::Ready(State::Stop)
|
||||
}
|
||||
Err(RecvError::KeepAlive) => {
|
||||
Err(RecvError::KeepAlive) | Err(RecvError::Timeout) => {
|
||||
// keep-alive timeout
|
||||
if !self.flags.contains(Flags::STARTED) {
|
||||
log::trace!("slow request timeout");
|
||||
|
@ -706,6 +706,7 @@ where
|
|||
Poll::Pending => false,
|
||||
Poll::Ready(
|
||||
IoStatusUpdate::KeepAlive
|
||||
| IoStatusUpdate::Timeout
|
||||
| IoStatusUpdate::Stop
|
||||
| IoStatusUpdate::PeerGone(_),
|
||||
) => true,
|
||||
|
@ -756,6 +757,12 @@ fn _poll_request_payload<F>(
|
|||
*slf_payload = None;
|
||||
io::Error::new(io::ErrorKind::Other, "Keep-alive").into()
|
||||
}
|
||||
RecvError::Timeout => {
|
||||
payload.1.set_error(PayloadError::EncodingCorrupted);
|
||||
*slf_payload = None;
|
||||
io::Error::new(io::ErrorKind::TimedOut, "Read timeout")
|
||||
.into()
|
||||
}
|
||||
RecvError::Stop => {
|
||||
payload.1.set_error(PayloadError::EncodingCorrupted);
|
||||
*slf_payload = None;
|
||||
|
|
|
@ -201,7 +201,7 @@ where
|
|||
X: 'static,
|
||||
U: 'static,
|
||||
{
|
||||
io.set_disconnect_timeout(config.client_disconnect.into());
|
||||
io.set_disconnect_timeout(config.client_disconnect);
|
||||
let ioref = io.get_ref();
|
||||
|
||||
let _ = server::handle_one(
|
||||
|
|
|
@ -711,7 +711,7 @@ where
|
|||
.lifetime(Seconds::ZERO)
|
||||
.keep_alive(Seconds(30))
|
||||
.timeout(Millis(30_000))
|
||||
.disconnect_timeout(Millis(5_000))
|
||||
.disconnect_timeout(Seconds(5))
|
||||
.openssl(builder.build())
|
||||
.finish()
|
||||
}
|
||||
|
|
|
@ -48,6 +48,9 @@ where
|
|||
DispatchItem::KeepAliveTimeout => {
|
||||
Either::Right(Ready::Err(WsError::KeepAlive))
|
||||
}
|
||||
DispatchItem::ReadTimeout => {
|
||||
Either::Right(Ready::Err(WsError::ReadTimeout))
|
||||
}
|
||||
DispatchItem::DecoderError(e) | DispatchItem::EncoderError(e) => {
|
||||
Either::Right(Ready::Err(WsError::Protocol(e)))
|
||||
}
|
||||
|
@ -97,11 +100,12 @@ where
|
|||
// create ws service
|
||||
let srv = factory.into_factory().create(sink.clone()).await?;
|
||||
|
||||
let cfg = crate::io::DispatcherConfig::default();
|
||||
cfg.set_keepalive_timeout(Seconds::ZERO);
|
||||
|
||||
// start websockets service dispatcher
|
||||
rt::spawn(async move {
|
||||
let res = crate::io::Dispatcher::new(io, codec, srv)
|
||||
.keepalive_timeout(Seconds::ZERO)
|
||||
.await;
|
||||
let res = crate::io::Dispatcher::with_config(io, codec, srv, &cfg).await;
|
||||
log::trace!("Ws handler is terminated: {:?}", res);
|
||||
});
|
||||
|
||||
|
|
|
@ -15,7 +15,9 @@ use crate::connect::{Connect, ConnectError, Connector};
|
|||
use crate::http::header::{self, HeaderMap, HeaderName, HeaderValue, AUTHORIZATION};
|
||||
use crate::http::{body::BodySize, client::ClientResponse, error::HttpError, h1};
|
||||
use crate::http::{ConnectionType, RequestHead, RequestHeadType, StatusCode, Uri};
|
||||
use crate::io::{Base, DispatchItem, Dispatcher, Filter, Io, Layer, Sealed};
|
||||
use crate::io::{
|
||||
Base, DispatchItem, Dispatcher, DispatcherConfig, Filter, Io, Layer, Sealed,
|
||||
};
|
||||
use crate::service::{apply_fn, into_service, IntoService, Pipeline, Service};
|
||||
use crate::time::{timeout, Millis, Seconds};
|
||||
use crate::{channel::mpsc, rt, util::Ready, ws};
|
||||
|
@ -31,8 +33,8 @@ pub struct WsClient<F, T> {
|
|||
max_size: usize,
|
||||
server_mode: bool,
|
||||
timeout: Millis,
|
||||
keepalive_timeout: Seconds,
|
||||
extra_headers: RefCell<Option<HeaderMap>>,
|
||||
config: DispatcherConfig,
|
||||
_t: marker::PhantomData<F>,
|
||||
}
|
||||
|
||||
|
@ -53,7 +55,7 @@ struct Inner<F, T> {
|
|||
max_size: usize,
|
||||
server_mode: bool,
|
||||
timeout: Millis,
|
||||
keepalive_timeout: Seconds,
|
||||
config: DispatcherConfig,
|
||||
_t: marker::PhantomData<F>,
|
||||
}
|
||||
|
||||
|
@ -136,7 +138,6 @@ where
|
|||
let max_size = self.max_size;
|
||||
let server_mode = self.server_mode;
|
||||
let to = self.timeout;
|
||||
let keepalive_timeout = self.keepalive_timeout;
|
||||
let mut headers = self.extra_headers.borrow_mut().take().unwrap_or_default();
|
||||
|
||||
// Generate a random key for the `Sec-WebSocket-Key` header.
|
||||
|
@ -248,7 +249,7 @@ where
|
|||
} else {
|
||||
ws::Codec::new().max_size(max_size).client_mode()
|
||||
},
|
||||
keepalive_timeout,
|
||||
self.config.clone(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
@ -282,18 +283,22 @@ impl WsClientBuilder<Base, ()> {
|
|||
Err(e) => (Default::default(), Some(e.into())),
|
||||
};
|
||||
|
||||
let config = DispatcherConfig::default()
|
||||
.set_keepalive_timeout(Seconds(600))
|
||||
.clone();
|
||||
|
||||
WsClientBuilder {
|
||||
err,
|
||||
origin: None,
|
||||
protocols: None,
|
||||
inner: Some(Inner {
|
||||
head,
|
||||
config,
|
||||
connector: Connector::<Uri>::default(),
|
||||
addr: None,
|
||||
max_size: 65_536,
|
||||
server_mode: false,
|
||||
timeout: Millis(5_000),
|
||||
keepalive_timeout: Seconds(600),
|
||||
_t: marker::PhantomData,
|
||||
}),
|
||||
#[cfg(feature = "cookie")]
|
||||
|
@ -486,7 +491,7 @@ where
|
|||
/// By default keep-alive timeout is set to 600 seconds.
|
||||
pub fn keepalive_timeout(&mut self, timeout: Seconds) -> &mut Self {
|
||||
if let Some(parts) = parts(&mut self.inner, &self.err) {
|
||||
parts.keepalive_timeout = timeout;
|
||||
parts.config.set_keepalive_timeout(timeout);
|
||||
}
|
||||
self
|
||||
}
|
||||
|
@ -507,7 +512,7 @@ where
|
|||
max_size: inner.max_size,
|
||||
server_mode: inner.server_mode,
|
||||
timeout: inner.timeout,
|
||||
keepalive_timeout: inner.keepalive_timeout,
|
||||
config: inner.config,
|
||||
_t: marker::PhantomData,
|
||||
}),
|
||||
err: self.err.take(),
|
||||
|
@ -632,7 +637,7 @@ where
|
|||
max_size: inner.max_size,
|
||||
server_mode: inner.server_mode,
|
||||
timeout: inner.timeout,
|
||||
keepalive_timeout: inner.keepalive_timeout,
|
||||
config: inner.config,
|
||||
extra_headers: RefCell::new(None),
|
||||
_t: marker::PhantomData,
|
||||
})
|
||||
|
@ -673,7 +678,7 @@ pub struct WsConnection<F> {
|
|||
io: Io<F>,
|
||||
codec: ws::Codec,
|
||||
res: ClientResponse,
|
||||
keepalive_timeout: Seconds,
|
||||
config: DispatcherConfig,
|
||||
}
|
||||
|
||||
impl<F> WsConnection<F> {
|
||||
|
@ -681,13 +686,13 @@ impl<F> WsConnection<F> {
|
|||
io: Io<F>,
|
||||
res: ClientResponse,
|
||||
codec: ws::Codec,
|
||||
keepalive_timeout: Seconds,
|
||||
config: DispatcherConfig,
|
||||
) -> Self {
|
||||
Self {
|
||||
io,
|
||||
codec,
|
||||
res,
|
||||
keepalive_timeout,
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -757,6 +762,7 @@ impl WsConnection<Sealed> {
|
|||
DispatchItem::WBackPressureEnabled
|
||||
| DispatchItem::WBackPressureDisabled => Ok(None),
|
||||
DispatchItem::KeepAliveTimeout => Err(WsError::KeepAlive),
|
||||
DispatchItem::ReadTimeout => Err(WsError::ReadTimeout),
|
||||
DispatchItem::DecoderError(e) | DispatchItem::EncoderError(e) => {
|
||||
Err(WsError::Protocol(e))
|
||||
}
|
||||
|
@ -765,9 +771,7 @@ impl WsConnection<Sealed> {
|
|||
},
|
||||
);
|
||||
|
||||
Dispatcher::new(self.io, self.codec, service)
|
||||
.keepalive_timeout(self.keepalive_timeout)
|
||||
.await
|
||||
Dispatcher::with_config(self.io, self.codec, service, &self.config).await
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -778,7 +782,7 @@ impl<F: Filter> WsConnection<F> {
|
|||
io: self.io.seal(),
|
||||
codec: self.codec,
|
||||
res: self.res,
|
||||
keepalive_timeout: self.keepalive_timeout,
|
||||
config: self.config,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,9 @@ pub enum WsError<E> {
|
|||
/// Keep-alive error
|
||||
#[error("Keep-alive error")]
|
||||
KeepAlive,
|
||||
/// Frame read timeout
|
||||
#[error("Frame read timeout")]
|
||||
ReadTimeout,
|
||||
/// Ws protocol level error
|
||||
#[error("Ws protocol level error")]
|
||||
Protocol(ProtocolError),
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue