Refactor async io support (#417)

This commit is contained in:
Nikolay Kim 2024-09-11 18:18:45 +05:00 committed by GitHub
parent db6d3a6e4c
commit 1d529fab3c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
24 changed files with 872 additions and 2508 deletions

View file

@ -1,5 +1,9 @@
# Changes
## [2.5.0] - 2024-09-10
* Refactor async io support
## [2.3.1] - 2024-09-05
* Tune async io tasks support

View file

@ -1,6 +1,6 @@
[package]
name = "ntex-io"
version = "2.4.0"
version = "2.5.0"
authors = ["ntex contributors <team@ntex.rs>"]
description = "Utilities for encoding and decoding frames"
keywords = ["network", "framework", "async", "futures"]

View file

@ -152,27 +152,6 @@ impl Stack {
}
}
pub(crate) fn with_read_source<F, R>(&self, io: &IoRef, f: F) -> R
where
F: FnOnce(&mut BytesVec) -> R,
{
let item = self.get_last_level();
let mut rb = item.0.take();
if rb.is_none() {
rb = Some(io.memory_pool().get_read_buf());
}
let result = f(rb.as_mut().unwrap());
if let Some(b) = rb {
if b.is_empty() {
io.memory_pool().release_read_buf(b);
} else {
item.0.set(Some(b));
}
}
result
}
pub(crate) fn with_read_destination<F, R>(&self, io: &IoRef, f: F) -> R
where
F: FnOnce(&mut BytesVec) -> R,
@ -226,6 +205,17 @@ impl Stack {
self.get_last_level().1.take()
}
pub(crate) fn set_write_destination(&self, buf: BytesVec) -> Option<BytesVec> {
let b = self.get_last_level().1.take();
if b.is_some() {
self.get_last_level().1.set(b);
Some(buf)
} else {
self.get_last_level().1.set(Some(buf));
None
}
}
pub(crate) fn with_write_destination<F, R>(&self, io: &IoRef, f: F) -> R
where
F: FnOnce(&mut Option<BytesVec>) -> R,

View file

@ -93,26 +93,16 @@ impl Filter for Base {
#[inline]
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<WriteStatus> {
let mut flags = self.0.flags();
let flags = self.0.flags();
if flags.contains(Flags::IO_STOPPED) {
if flags.is_stopped() {
Poll::Ready(WriteStatus::Terminate)
} else {
self.0 .0.write_task.register(cx.waker());
if flags.intersects(Flags::IO_STOPPING) {
Poll::Ready(WriteStatus::Shutdown(
self.0 .0.disconnect_timeout.get().into(),
))
} else if flags.contains(Flags::IO_STOPPING_FILTERS)
&& !flags.contains(Flags::IO_FILTERS_TIMEOUT)
{
flags.insert(Flags::IO_FILTERS_TIMEOUT);
self.0.set_flags(flags);
Poll::Ready(WriteStatus::Timeout(
self.0 .0.disconnect_timeout.get().into(),
))
} else if flags.intersects(Flags::WR_PAUSED) {
if flags.contains(Flags::IO_STOPPING) {
Poll::Ready(WriteStatus::Shutdown)
} else if flags.contains(Flags::WR_PAUSED) {
Poll::Pending
} else {
Poll::Ready(WriteStatus::Ready)
@ -242,20 +232,13 @@ where
Poll::Pending => Poll::Pending,
Poll::Ready(WriteStatus::Ready) => res2,
Poll::Ready(WriteStatus::Terminate) => Poll::Ready(WriteStatus::Terminate),
Poll::Ready(WriteStatus::Shutdown(t)) => {
Poll::Ready(WriteStatus::Shutdown) => {
if res2 == Poll::Ready(WriteStatus::Terminate) {
Poll::Ready(WriteStatus::Terminate)
} else {
Poll::Ready(WriteStatus::Shutdown(t))
Poll::Ready(WriteStatus::Shutdown)
}
}
Poll::Ready(WriteStatus::Timeout(t)) => match res2 {
Poll::Ready(WriteStatus::Terminate) => Poll::Ready(WriteStatus::Terminate),
Poll::Ready(WriteStatus::Shutdown(t)) => {
Poll::Ready(WriteStatus::Shutdown(t))
}
_ => Poll::Ready(WriteStatus::Timeout(t)),
},
}
}
}

View file

@ -7,8 +7,6 @@ bitflags::bitflags! {
const IO_STOPPING = 0b0000_0000_0000_0010;
/// shuting down filters
const IO_STOPPING_FILTERS = 0b0000_0000_0000_0100;
/// initiate filters shutdown timeout in write task
const IO_FILTERS_TIMEOUT = 0b0000_0000_0000_1000;
/// pause io read
const RD_PAUSED = 0b0000_0000_0001_0000;
@ -36,6 +34,10 @@ bitflags::bitflags! {
}
impl Flags {
pub(crate) fn is_stopped(&self) -> bool {
self.intersects(Flags::IO_STOPPED)
}
pub(crate) fn is_waiting_for_write(&self) -> bool {
self.intersects(Flags::BUF_W_MUST_FLUSH | Flags::BUF_W_BACKPRESSURE)
}

View file

@ -165,7 +165,7 @@ impl Io {
let inner = Rc::new(IoState {
filter: FilterPtr::null(),
pool: Cell::new(pool),
flags: Cell::new(Flags::empty()),
flags: Cell::new(Flags::WR_PAUSED),
error: Cell::new(None),
dispatch_task: LocalWaker::new(),
read_task: LocalWaker::new(),
@ -421,7 +421,7 @@ impl<F> Io<F> {
let st = self.st();
let mut flags = st.flags.get();
if flags.contains(Flags::IO_STOPPED) {
if flags.is_stopped() {
Poll::Ready(self.error().map(Err).unwrap_or(Ok(None)))
} else {
st.dispatch_task.register(cx.waker());
@ -531,7 +531,7 @@ impl<F> Io<F> {
} else {
let st = self.st();
let flags = st.flags.get();
if flags.contains(Flags::IO_STOPPED) {
if flags.is_stopped() {
Err(RecvError::PeerGone(self.error()))
} else if flags.contains(Flags::DSP_STOP) {
st.remove_flags(Flags::DSP_STOP);
@ -568,7 +568,7 @@ impl<F> Io<F> {
pub fn poll_flush(&self, cx: &mut Context<'_>, full: bool) -> Poll<io::Result<()>> {
let flags = self.flags();
if flags.contains(Flags::IO_STOPPED) {
if flags.is_stopped() {
Poll::Ready(self.error().map(Err).unwrap_or(Ok(())))
} else {
let st = self.st();
@ -595,7 +595,7 @@ impl<F> Io<F> {
let st = self.st();
let flags = st.flags.get();
if flags.intersects(Flags::IO_STOPPED) {
if flags.is_stopped() {
if let Some(err) = self.error() {
Poll::Ready(Err(err))
} else {
@ -700,7 +700,7 @@ impl<F> Drop for Io<F> {
if st.filter.is_set() {
// filter is unsafe and must be dropped explicitly,
// and wont be dropped without special attention
if !st.flags.get().contains(Flags::IO_STOPPED) {
if !st.flags.get().is_stopped() {
log::trace!(
"{}: Io is dropped, force stopping io streams {:?}",
st.tag.get(),
@ -884,7 +884,7 @@ pub struct OnDisconnect {
impl OnDisconnect {
pub(super) fn new(inner: Rc<IoState>) -> Self {
Self::new_inner(inner.flags.get().contains(Flags::IO_STOPPED), inner)
Self::new_inner(inner.flags.get().is_stopped(), inner)
}
fn new_inner(disconnected: bool, inner: Rc<IoState>) -> Self {
@ -909,7 +909,7 @@ impl OnDisconnect {
#[inline]
/// Check if connection is disconnected
pub fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<()> {
if self.token == usize::MAX || self.inner.flags.get().contains(Flags::IO_STOPPED) {
if self.token == usize::MAX || self.inner.flags.get().is_stopped() {
Poll::Ready(())
} else if let Some(on_disconnect) = self.inner.on_disconnect.take() {
on_disconnect[self.token].register(cx.waker());

View file

@ -14,12 +14,6 @@ impl IoRef {
self.0.flags.get()
}
#[inline]
/// Set flags
pub(crate) fn set_flags(&self, flags: Flags) {
self.0.flags.set(flags)
}
#[inline]
/// Get current filter
pub(crate) fn filter(&self) -> &dyn Filter {
@ -41,10 +35,6 @@ impl IoRef {
.intersects(Flags::IO_STOPPING | Flags::IO_STOPPED)
}
pub(crate) fn is_io_closed(&self) -> bool {
self.0.flags.get().intersects(Flags::IO_STOPPED)
}
#[inline]
/// Check if write back-pressure is enabled
pub fn is_wr_backpressure(&self) -> bool {

View file

@ -1,5 +1,6 @@
//! Utilities for abstructing io streams
#![deny(rust_2018_idioms, unreachable_pub, missing_debug_implementations)]
#![allow(async_fn_in_trait)]
use std::{
any::Any, any::TypeId, fmt, io as sio, io::Error as IoError, task::Context, task::Poll,
@ -20,8 +21,8 @@ mod tasks;
mod timer;
mod utils;
use ntex_bytes::BytesVec;
use ntex_codec::{Decoder, Encoder};
use ntex_util::time::Millis;
pub use self::buf::{ReadBuf, WriteBuf};
pub use self::dispatcher::{Dispatcher, DispatcherConfig};
@ -29,13 +30,27 @@ pub use self::filter::{Base, Filter, Layer};
pub use self::framed::Framed;
pub use self::io::{Io, IoRef, OnDisconnect};
pub use self::seal::{IoBoxed, Sealed};
pub use self::tasks::{ReadContext, WriteContext};
pub use self::tasks::{ReadContext, WriteContext, WriteContextBuf};
pub use self::timer::TimerHandle;
pub use self::utils::{seal, Decoded};
#[doc(hidden)]
pub use self::flags::Flags;
#[doc(hidden)]
pub trait AsyncRead {
async fn read(&mut self, buf: BytesVec) -> (BytesVec, sio::Result<usize>);
}
#[doc(hidden)]
pub trait AsyncWrite {
async fn write(&mut self, buf: &mut WriteContextBuf) -> sio::Result<()>;
async fn flush(&mut self) -> sio::Result<()>;
async fn shutdown(&mut self) -> sio::Result<()>;
}
/// Status for read task
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum ReadStatus {
@ -48,10 +63,8 @@ pub enum ReadStatus {
pub enum WriteStatus {
/// Write task is clear to proceed with write operation
Ready,
/// Initiate timeout for normal write operations, shutdown connection after timeout
Timeout(Millis),
/// Initiate graceful io shutdown operation with timeout
Shutdown(Millis),
/// Initiate graceful io shutdown operation
Shutdown,
/// Immediately terminate connection
Terminate,
}

View file

@ -1,16 +1,22 @@
use std::{future::poll_fn, future::Future, io, task::Context, task::Poll};
use std::{cell::Cell, fmt, future::poll_fn, io, task::Context, task::Poll};
use ntex_bytes::{BufMut, BytesVec, PoolRef};
use ntex_bytes::{BufMut, BytesVec};
use ntex_util::{future::lazy, future::select, future::Either, time::sleep, time::Sleep};
use crate::{Flags, IoRef, ReadStatus, WriteStatus};
use crate::{AsyncRead, AsyncWrite, Flags, IoRef, ReadStatus, WriteStatus};
#[derive(Debug)]
/// Context for io read task
pub struct ReadContext(IoRef);
pub struct ReadContext(IoRef, Cell<Option<Sleep>>);
impl fmt::Debug for ReadContext {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ReadContext").field("io", &self.0).finish()
}
}
impl ReadContext {
pub(crate) fn new(io: &IoRef) -> Self {
Self(io.clone())
Self(io.clone(), Cell::new(None))
}
#[inline]
@ -19,15 +25,8 @@ impl ReadContext {
self.0.tag()
}
#[inline]
/// Check readiness for read operations
pub async fn ready(&self) -> ReadStatus {
poll_fn(|cx| self.0.filter().poll_read_ready(cx)).await
}
#[inline]
/// Wait when io get closed or preparing for close
pub async fn wait_for_close(&self) {
async fn wait_for_close(&self) {
poll_fn(|cx| {
let flags = self.0.flags();
@ -36,7 +35,7 @@ impl ReadContext {
} else {
self.0 .0.read_task.register(cx.waker());
if flags.contains(Flags::IO_STOPPING_FILTERS) {
shutdown_filters(&self.0);
self.shutdown_filters(cx);
}
Poll::Pending
}
@ -44,222 +43,169 @@ impl ReadContext {
.await
}
#[inline]
/// Check readiness for read operations
pub fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<ReadStatus> {
self.0.filter().poll_read_ready(cx)
}
/// Get read buffer
pub fn with_buf<F>(&self, f: F) -> Poll<()>
/// Handle read io operations
pub async fn handle<T>(&self, io: &mut T)
where
F: FnOnce(&mut BytesVec, usize, usize) -> Poll<io::Result<()>>,
T: AsyncRead,
{
let inner = &self.0 .0;
let (hw, lw) = self.0.memory_pool().read_params().unpack();
let (result, nbytes, total) = inner.buffer.with_read_source(&self.0, |buf| {
loop {
let result = poll_fn(|cx| self.0.filter().poll_read_ready(cx)).await;
if result == ReadStatus::Terminate {
log::trace!("{}: Read task is instructed to shutdown", self.tag());
break;
}
let mut buf = if inner.flags.get().is_read_buf_ready() {
// read buffer is still not read by dispatcher
// we cannot touch it
inner.pool.get().get_read_buf()
} else {
inner
.buffer
.get_read_source()
.unwrap_or_else(|| inner.pool.get().get_read_buf())
};
// make sure we've got room
let (hw, lw) = self.0.memory_pool().read_params().unpack();
let remaining = buf.remaining_mut();
if remaining <= lw {
buf.reserve(hw - remaining);
}
let total = buf.len();
// call provided callback
let result = f(buf, hw, lw);
let (buf, result) = match select(io.read(buf), self.wait_for_close()).await {
Either::Left(res) => res,
Either::Right(_) => {
log::trace!("{}: Read io is closed, stop read task", self.tag());
break;
}
};
// handle incoming data
let total2 = buf.len();
let nbytes = if total2 > total { total2 - total } else { 0 };
(result, nbytes, total2)
});
let total = total2;
// handle buffer changes
if nbytes > 0 {
let filter = self.0.filter();
let _ = filter
.process_read_buf(&self.0, &inner.buffer, 0, nbytes)
.and_then(|status| {
if status.nbytes > 0 {
// dest buffer has new data, wake up dispatcher
if inner.buffer.read_destination_size() >= hw {
log::trace!(
if let Some(mut first_buf) = inner.buffer.get_read_source() {
first_buf.extend_from_slice(&buf);
inner.buffer.set_read_source(&self.0, first_buf);
} else {
inner.buffer.set_read_source(&self.0, buf);
}
// handle buffer changes
if nbytes > 0 {
let filter = self.0.filter();
let res = match filter.process_read_buf(&self.0, &inner.buffer, 0, nbytes) {
Ok(status) => {
if status.nbytes > 0 {
// check read back-pressure
if hw < inner.buffer.read_destination_size() {
log::trace!(
"{}: Io read buffer is too large {}, enable read back-pressure",
self.0.tag(),
total
);
inner.insert_flags(Flags::BUF_R_READY | Flags::BUF_R_FULL);
} else {
inner.insert_flags(Flags::BUF_R_READY);
if nbytes >= hw {
// read task is paused because of read back-pressure
// but there is no new data in top most read buffer
// so we need to wake up read task to read more data
// otherwise read task would sleep forever
inner.read_task.wake();
inner.insert_flags(Flags::BUF_R_READY | Flags::BUF_R_FULL);
} else {
inner.insert_flags(Flags::BUF_R_READY);
}
}
log::trace!(
"{}: New {} bytes available, wakeup dispatcher",
self.0.tag(),
nbytes
);
inner.dispatch_task.wake();
} else {
if nbytes >= hw {
// read task is paused because of read back-pressure
// but there is no new data in top most read buffer
// so we need to wake up read task to read more data
// otherwise read task would sleep forever
inner.read_task.wake();
}
if inner.flags.get().contains(Flags::RD_NOTIFY) {
log::trace!(
"{}: New {} bytes available, wakeup dispatcher",
self.0.tag(),
nbytes
);
// dest buffer has new data, wake up dispatcher
inner.dispatch_task.wake();
} else if inner.flags.get().contains(Flags::RD_NOTIFY) {
// in case of "notify" we must wake up dispatch task
// if we read any data from source
inner.dispatch_task.wake();
}
}
// while reading, filter wrote some data
// in that case filters need to process write buffers
// and potentialy wake write task
if status.need_write {
filter.process_write_buf(&self.0, &inner.buffer, 0)
} else {
Ok(())
// while reading, filter wrote some data
// in that case filters need to process write buffers
// and potentialy wake write task
if status.need_write {
filter.process_write_buf(&self.0, &inner.buffer, 0)
} else {
Ok(())
}
}
})
.map_err(|err| {
Err(err) => Err(err),
};
if let Err(err) = res {
inner.dispatch_task.wake();
inner.io_stopped(Some(err));
inner.insert_flags(Flags::BUF_R_READY);
});
}
match result {
Poll::Ready(Ok(())) => {
inner.io_stopped(None);
Poll::Ready(())
}
Poll::Ready(Err(e)) => {
inner.io_stopped(Some(e));
Poll::Ready(())
}
Poll::Pending => {
if inner.flags.get().contains(Flags::IO_STOPPING_FILTERS) {
shutdown_filters(&self.0);
}
Poll::Pending
}
match result {
Ok(0) => {
log::trace!("{}: Tcp stream is disconnected", self.tag());
inner.io_stopped(None);
break;
}
Ok(_) => {
if inner.flags.get().contains(Flags::IO_STOPPING_FILTERS) {
lazy(|cx| self.shutdown_filters(cx)).await;
}
}
Err(err) => {
log::trace!("{}: Read task failed on io {:?}", self.tag(), err);
inner.io_stopped(Some(err));
break;
}
}
}
}
/// Get read buffer (async)
pub async fn with_buf_async<F, R>(&self, f: F) -> Poll<()>
where
F: FnOnce(BytesVec) -> R,
R: Future<Output = (BytesVec, io::Result<usize>)>,
{
let inner = &self.0 .0;
fn shutdown_filters(&self, cx: &mut Context<'_>) {
let st = &self.0 .0;
let filter = self.0.filter();
// // we already pushed new data to read buffer,
// // we have to wait for dispatcher to read data from buffer
// if inner.flags.get().is_read_buf_ready() {
// ntex_util::task::yield_to().await;
// }
let mut buf = if inner.flags.get().is_read_buf_ready() {
// read buffer is still not read by dispatcher
// we cannot touch it
inner.pool.get().get_read_buf()
} else {
inner
.buffer
.get_read_source()
.unwrap_or_else(|| inner.pool.get().get_read_buf())
};
// make sure we've got room
let (hw, lw) = self.0.memory_pool().read_params().unpack();
let remaining = buf.remaining_mut();
if remaining <= lw {
buf.reserve(hw - remaining);
}
let total = buf.len();
// call provided callback
let (buf, result) = f(buf).await;
let total2 = buf.len();
let nbytes = if total2 > total { total2 - total } else { 0 };
let total = total2;
if let Some(mut first_buf) = inner.buffer.get_read_source() {
first_buf.extend_from_slice(&buf);
inner.buffer.set_read_source(&self.0, first_buf);
} else {
inner.buffer.set_read_source(&self.0, buf);
}
// handle buffer changes
if nbytes > 0 {
let filter = self.0.filter();
let res = match filter.process_read_buf(&self.0, &inner.buffer, 0, nbytes) {
Ok(status) => {
if status.nbytes > 0 {
// check read back-pressure
if hw < inner.buffer.read_destination_size() {
log::trace!(
"{}: Io read buffer is too large {}, enable read back-pressure",
self.0.tag(),
total
);
inner.insert_flags(Flags::BUF_R_READY | Flags::BUF_R_FULL);
} else {
inner.insert_flags(Flags::BUF_R_READY);
}
log::trace!(
"{}: New {} bytes available, wakeup dispatcher",
self.0.tag(),
nbytes
);
// dest buffer has new data, wake up dispatcher
inner.dispatch_task.wake();
} else if inner.flags.get().contains(Flags::RD_NOTIFY) {
// in case of "notify" we must wake up dispatch task
// if we read any data from source
inner.dispatch_task.wake();
}
// while reading, filter wrote some data
// in that case filters need to process write buffers
// and potentialy wake write task
if status.need_write {
filter.process_write_buf(&self.0, &inner.buffer, 0)
} else {
Ok(())
}
}
Err(err) => Err(err),
};
if let Err(err) = res {
inner.dispatch_task.wake();
inner.io_stopped(Some(err));
inner.insert_flags(Flags::BUF_R_READY);
match filter.shutdown(&self.0, &st.buffer, 0) {
Ok(Poll::Ready(())) => {
st.dispatch_task.wake();
st.insert_flags(Flags::IO_STOPPING);
}
}
Ok(Poll::Pending) => {
let flags = st.flags.get();
match result {
Ok(n) => {
if n == 0 {
inner.io_stopped(None);
Poll::Ready(())
// check read buffer, if buffer is not consumed it is unlikely
// that filter will properly complete shutdown
if flags.contains(Flags::RD_PAUSED)
|| flags.contains(Flags::BUF_R_FULL | Flags::BUF_R_READY)
{
st.dispatch_task.wake();
st.insert_flags(Flags::IO_STOPPING);
} else {
if inner.flags.get().contains(Flags::IO_STOPPING_FILTERS) {
shutdown_filters(&self.0);
// filter shutdown timeout
let timeout = self
.1
.take()
.unwrap_or_else(|| sleep(st.disconnect_timeout.get()));
if timeout.poll_elapsed(cx).is_ready() {
st.dispatch_task.wake();
st.insert_flags(Flags::IO_STOPPING);
} else {
self.1.set(Some(timeout));
}
Poll::Pending
}
}
Err(e) => {
inner.io_stopped(Some(e));
Poll::Ready(())
Err(err) => {
st.io_stopped(Some(err));
}
}
if let Err(err) = filter.process_write_buf(&self.0, &st.buffer, 0) {
st.io_stopped(Some(err));
}
}
}
@ -267,6 +213,13 @@ impl ReadContext {
/// Context for io write task
pub struct WriteContext(IoRef);
#[derive(Debug)]
/// Context buf for io write task
pub struct WriteContextBuf {
io: IoRef,
buf: Option<BytesVec>,
}
impl WriteContext {
pub(crate) fn new(io: &IoRef) -> Self {
Self(io.clone())
@ -278,104 +231,92 @@ impl WriteContext {
self.0.tag()
}
#[inline]
/// Return memory pool for this context
pub fn memory_pool(&self) -> PoolRef {
self.0.memory_pool()
}
#[inline]
/// Check readiness for write operations
pub async fn ready(&self) -> WriteStatus {
async fn ready(&self) -> WriteStatus {
poll_fn(|cx| self.0.filter().poll_write_ready(cx)).await
}
#[inline]
/// Check readiness for write operations
pub fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<WriteStatus> {
self.0.filter().poll_write_ready(cx)
/// Indicate that write io task is stopped
fn close(&self, err: Option<io::Error>) {
self.0 .0.io_stopped(err);
}
#[inline]
/// Check if io is closed
pub fn poll_close(&self, cx: &mut Context<'_>) -> Poll<()> {
if self.0.is_io_closed() {
Poll::Ready(())
} else {
self.0 .0.write_task.register(cx.waker());
Poll::Pending
}
}
/// Get write buffer
pub fn with_buf<F>(&self, f: F) -> Poll<io::Result<()>>
where
F: FnOnce(&mut Option<BytesVec>) -> Poll<io::Result<()>>,
{
let inner = &self.0 .0;
// call provided callback
let (result, len) = inner.buffer.with_write_destination(&self.0, |buf| {
let result = f(buf);
(result, buf.as_ref().map(|b| b.len()).unwrap_or(0))
});
// if write buffer is smaller than high watermark value, turn off back-pressure
let mut flags = inner.flags.get();
if len == 0 {
if flags.is_waiting_for_write() {
flags.waiting_for_write_is_done();
inner.dispatch_task.wake();
}
} else if flags.contains(Flags::BUF_W_BACKPRESSURE)
&& len < inner.pool.get().write_params_high() << 1
{
flags.remove(Flags::BUF_W_BACKPRESSURE);
inner.dispatch_task.wake();
}
match result {
Poll::Pending => flags.remove(Flags::WR_PAUSED),
Poll::Ready(Ok(())) => flags.insert(Flags::WR_PAUSED),
Poll::Ready(Err(_)) => {}
}
inner.flags.set(flags);
result
}
/// Get write buffer (async)
pub async fn with_buf_async<F, R>(&self, f: F) -> io::Result<()>
where
F: FnOnce(BytesVec) -> R,
R: Future<Output = io::Result<()>>,
{
let inner = &self.0 .0;
// running
let mut flags = inner.flags.get();
if flags.contains(Flags::WR_PAUSED) {
flags.remove(Flags::WR_PAUSED);
inner.flags.set(flags);
}
// buffer
let buf = inner.buffer.get_write_destination();
// call provided callback
let result = if let Some(buf) = buf {
if !buf.is_empty() {
f(buf).await
async fn when_stopped(&self) {
poll_fn(|cx| {
if self.0.flags().is_stopped() {
Poll::Ready(())
} else {
Ok(())
self.0 .0.write_task.register(cx.waker());
Poll::Pending
}
} else {
Ok(())
})
.await
}
/// Handle write io operations
pub async fn handle<T>(&self, io: &mut T)
where
T: AsyncWrite,
{
let mut buf = WriteContextBuf {
io: self.0.clone(),
buf: None,
};
loop {
match self.ready().await {
WriteStatus::Ready => {
// write io stream
match select(io.write(&mut buf), self.when_stopped()).await {
Either::Left(Ok(_)) => continue,
Either::Left(Err(e)) => self.close(Some(e)),
Either::Right(_) => return,
}
}
WriteStatus::Shutdown => {
log::trace!("{}: Write task is instructed to shutdown", self.tag());
let fut = async {
// write io stream
io.write(&mut buf).await?;
io.flush().await?;
io.shutdown().await?;
Ok(())
};
match select(sleep(self.0 .0.disconnect_timeout.get()), fut).await {
Either::Left(_) => self.close(None),
Either::Right(res) => self.close(res.err()),
}
}
WriteStatus::Terminate => {
log::trace!("{}: Write task is instructed to terminate", self.tag());
self.close(io.shutdown().await.err());
}
}
return;
}
}
}
impl WriteContextBuf {
pub fn set(&mut self, mut buf: BytesVec) {
if buf.is_empty() {
self.io.memory_pool().release_write_buf(buf);
} else if let Some(b) = self.buf.take() {
buf.extend_from_slice(&b);
self.io.memory_pool().release_write_buf(b);
self.buf = Some(buf);
} else if let Some(b) = self.io.0.buffer.set_write_destination(buf) {
// write buffer is already set
self.buf = Some(b);
}
// if write buffer is smaller than high watermark value, turn off back-pressure
let inner = &self.io.0;
let len = self.buf.as_ref().map(|b| b.len()).unwrap_or_default()
+ inner.buffer.write_destination_size();
let mut flags = inner.flags.get();
let len = inner.buffer.write_destination_size();
if len == 0 {
if flags.is_waiting_for_write() {
@ -391,44 +332,13 @@ impl WriteContext {
inner.flags.set(flags);
inner.dispatch_task.wake();
}
result
}
#[inline]
/// Indicate that write io task is stopped
pub fn close(&self, err: Option<io::Error>) {
self.0 .0.io_stopped(err);
}
}
fn shutdown_filters(io: &IoRef) {
let st = &io.0;
let flags = st.flags.get();
if !flags.intersects(Flags::IO_STOPPED | Flags::IO_STOPPING) {
let filter = io.filter();
match filter.shutdown(io, &st.buffer, 0) {
Ok(Poll::Ready(())) => {
st.dispatch_task.wake();
st.insert_flags(Flags::IO_STOPPING);
}
Ok(Poll::Pending) => {
// check read buffer, if buffer is not consumed it is unlikely
// that filter will properly complete shutdown
if flags.contains(Flags::RD_PAUSED)
|| flags.contains(Flags::BUF_R_FULL | Flags::BUF_R_READY)
{
st.dispatch_task.wake();
st.insert_flags(Flags::IO_STOPPING);
}
}
Err(err) => {
st.io_stopped(Some(err));
}
}
if let Err(err) = filter.process_write_buf(io, &st.buffer, 0) {
st.io_stopped(Some(err));
pub fn take(&mut self) -> Option<BytesVec> {
if let Some(buf) = self.buf.take() {
Some(buf)
} else {
self.io.0.buffer.get_write_destination()
}
}
}

View file

@ -1,14 +1,13 @@
//! utilities and helpers for testing
#![allow(clippy::let_underscore_future)]
use std::future::{poll_fn, Future};
use std::sync::{Arc, Mutex};
use std::task::{ready, Context, Poll, Waker};
use std::{any, cell::RefCell, cmp, fmt, io, mem, net, pin::Pin, rc::Rc};
use std::task::{Context, Poll, Waker};
use std::{any, cell::RefCell, cmp, fmt, future::poll_fn, io, mem, net, rc::Rc};
use ntex_bytes::{Buf, BufMut, Bytes, BytesVec};
use ntex_util::time::{sleep, Millis, Sleep};
use ntex_util::time::{sleep, Millis};
use crate::{types, Handle, IoStream, ReadContext, ReadStatus, WriteContext, WriteStatus};
use crate::{types, Handle, IoStream, ReadContext, WriteContext, WriteContextBuf};
#[derive(Default)]
struct AtomicWaker(Arc<Mutex<RefCell<Option<Waker>>>>);
@ -356,14 +355,14 @@ impl IoStream for IoTest {
fn start(self, read: ReadContext, write: WriteContext) -> Option<Box<dyn Handle>> {
let io = Rc::new(self);
let _ = ntex_util::spawn(ReadTask {
io: io.clone(),
state: read,
let mut rio = Read(io.clone());
let _ = ntex_util::spawn(async move {
read.handle(&mut rio).await;
});
let _ = ntex_util::spawn(WriteTask {
io: io.clone(),
state: write,
st: IoWriteState::Processing(None),
let mut wio = Write(io.clone());
let _ = ntex_util::spawn(async move {
write.handle(&mut wio).await;
});
Some(Box::new(io))
@ -382,271 +381,97 @@ impl Handle for Rc<IoTest> {
}
/// Read io task
struct ReadTask {
io: Rc<IoTest>,
state: ReadContext,
}
struct Read(Rc<IoTest>);
impl Future for ReadTask {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_ref();
this.state.with_buf(|buf, hw, lw| {
match this.state.poll_ready(cx) {
Poll::Ready(ReadStatus::Terminate) => {
log::trace!("read task is instructed to terminate");
Poll::Ready(Ok(()))
}
Poll::Ready(ReadStatus::Ready) => {
let io = &this.io;
// read data from socket
let mut new_bytes = 0;
loop {
// make sure we've got room
let remaining = buf.remaining_mut();
if remaining < lw {
buf.reserve(hw - remaining);
}
match io.poll_read_buf(cx, buf) {
Poll::Pending => {
log::trace!(
"no more data in io stream, read: {:?}",
new_bytes
);
break;
}
Poll::Ready(Ok(n)) => {
if n == 0 {
log::trace!("io stream is disconnected");
return Poll::Ready(Ok(()));
} else {
new_bytes += n;
if buf.len() >= hw {
log::trace!(
"high water mark pause reading, read: {:?}",
new_bytes
);
break;
}
}
}
Poll::Ready(Err(err)) => {
log::trace!("read task failed on io {:?}", err);
return Poll::Ready(Err(err));
}
}
}
Poll::Pending
}
Poll::Pending => Poll::Pending,
}
})
impl crate::AsyncRead for Read {
async fn read(&mut self, mut buf: BytesVec) -> (BytesVec, io::Result<usize>) {
// read data from socket
let result = poll_fn(|cx| self.0.poll_read_buf(cx, &mut buf)).await;
(buf, result)
}
}
#[derive(Debug)]
enum IoWriteState {
Processing(Option<Sleep>),
Shutdown(Option<Sleep>, Shutdown),
}
/// Write
struct Write(Rc<IoTest>);
#[derive(Debug)]
enum Shutdown {
None,
Flushed,
Stopping,
}
/// Write io task
struct WriteTask {
st: IoWriteState,
io: Rc<IoTest>,
state: WriteContext,
}
impl Future for WriteTask {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().get_mut();
match this.st {
IoWriteState::Processing(ref mut delay) => {
match this.state.poll_ready(cx) {
Poll::Ready(WriteStatus::Ready) => {
// flush framed instance
match ready!(flush_io(&this.io, &this.state, cx)) {
Ok(()) => Poll::Pending,
Err(e) => {
this.state.close(Some(e));
Poll::Ready(())
}
}
}
Poll::Ready(WriteStatus::Timeout(time)) => {
if delay.is_none() {
*delay = Some(sleep(time));
}
self.poll(cx)
}
Poll::Ready(WriteStatus::Shutdown(time)) => {
log::trace!("write task is instructed to shutdown");
let timeout = if let Some(delay) = delay.take() {
delay
} else {
sleep(time)
};
this.st = IoWriteState::Shutdown(Some(timeout), Shutdown::None);
self.poll(cx)
}
Poll::Ready(WriteStatus::Terminate) => {
log::trace!("write task is instructed to terminate");
// shutdown WRITE side
this.io
.local
.lock()
.unwrap()
.borrow_mut()
.flags
.insert(IoTestFlags::CLOSED);
this.state.close(None);
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
impl crate::AsyncWrite for Write {
async fn write(&mut self, buf: &mut WriteContextBuf) -> io::Result<()> {
poll_fn(|cx| {
if let Some(mut b) = buf.take() {
let result = write_io(&self.0, &mut b, cx);
buf.set(b);
result
} else {
Poll::Ready(Ok(()))
}
IoWriteState::Shutdown(ref mut delay, ref mut st) => {
// close WRITE side and wait for disconnect on read side.
// use disconnect timeout, otherwise it could hang forever.
loop {
match st {
Shutdown::None => {
// flush write buffer
match flush_io(&this.io, &this.state, cx) {
Poll::Ready(Ok(())) => {
*st = Shutdown::Flushed;
continue;
}
Poll::Ready(Err(err)) => {
log::trace!(
"write task is closed with err during flush {:?}",
err
);
this.state.close(Some(err));
return Poll::Ready(());
}
Poll::Pending => (),
}
}
Shutdown::Flushed => {
// shutdown WRITE side
this.io
.local
.lock()
.unwrap()
.borrow_mut()
.flags
.insert(IoTestFlags::CLOSED);
*st = Shutdown::Stopping;
continue;
}
Shutdown::Stopping => {
// read until 0 or err
let io = &this.io;
loop {
let mut buf = BytesVec::new();
match io.poll_read_buf(cx, &mut buf) {
Poll::Ready(Err(e)) => {
this.state.close(Some(e));
log::trace!("write task is stopped");
return Poll::Ready(());
}
Poll::Ready(Ok(0)) => {
this.state.close(None);
log::trace!("write task is stopped");
return Poll::Ready(());
}
Poll::Pending => break,
_ => (),
}
}
}
}
})
.await
}
// disconnect timeout
if let Some(ref delay) = delay {
if delay.poll_elapsed(cx).is_pending() {
return Poll::Pending;
}
}
log::trace!("write task is stopped after delay");
this.state.close(None);
return Poll::Ready(());
}
}
}
async fn flush(&mut self) -> io::Result<()> {
Ok(())
}
async fn shutdown(&mut self) -> io::Result<()> {
// shutdown WRITE side
self.0
.local
.lock()
.unwrap()
.borrow_mut()
.flags
.insert(IoTestFlags::CLOSED);
Ok(())
}
}
/// Flush write buffer to underlying I/O stream.
pub(super) fn flush_io(
pub(super) fn write_io(
io: &IoTest,
state: &WriteContext,
buf: &mut BytesVec,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
state.with_buf(|buf| {
if let Some(buf) = buf {
let len = buf.len();
let len = buf.len();
if len != 0 {
log::trace!("flushing framed transport: {}", len);
if len != 0 {
log::trace!("flushing framed transport: {}", len);
let mut written = 0;
let result = loop {
break match io.poll_write_buf(cx, &buf[written..]) {
Poll::Ready(Ok(n)) => {
if n == 0 {
log::trace!(
"disconnected during flush, written {}",
written
);
Poll::Ready(Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write frame to transport",
)))
} else {
written += n;
if written == len {
buf.clear();
Poll::Ready(Ok(()))
} else {
continue;
}
}
let mut written = 0;
let result = loop {
break match io.poll_write_buf(cx, &buf[written..]) {
Poll::Ready(Ok(n)) => {
if n == 0 {
log::trace!("disconnected during flush, written {}", written);
Poll::Ready(Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write frame to transport",
)))
} else {
written += n;
if written == len {
buf.clear();
Poll::Ready(Ok(()))
} else {
continue;
}
Poll::Pending => {
// remove written data
buf.advance(written);
Poll::Pending
}
Poll::Ready(Err(e)) => {
log::trace!("error during flush: {}", e);
Poll::Ready(Err(e))
}
};
};
log::trace!("flushed {} bytes", written);
return result;
}
}
}
}
Poll::Pending => {
// remove written data
buf.advance(written);
Poll::Pending
}
Poll::Ready(Err(e)) => {
log::trace!("error during flush: {}", e);
Poll::Ready(Err(e))
}
};
};
log::trace!("flushed {} bytes", written);
result
} else {
Poll::Ready(Ok(()))
})
}
}
#[cfg(test)]