mirror of
https://github.com/ntex-rs/ntex.git
synced 2025-04-04 21:37:58 +03:00
Compare commits
58 commits
ntex-v2.11
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
01d3a2440b | ||
|
f5ee55d598 | ||
|
e4f24ee41f | ||
|
f6fe9c3e10 | ||
|
30928d019c | ||
|
e9a1284151 | ||
|
8f2d5056c9 | ||
|
f647ad2eac | ||
|
728ab919a3 | ||
|
b2915f4868 | ||
|
eb4ec4b3e1 | ||
|
0d3f1293c9 | ||
|
e903e65e27 | ||
|
eaec50d8a2 | ||
|
b32df88500 | ||
|
5484009c92 | ||
|
bf6b1d6c79 | ||
|
e3f58cce27 | ||
|
e904cf85f1 | ||
|
3b58f5a111 | ||
|
5621ca1898 | ||
|
11734e8f1b | ||
|
5426790eb0 | ||
|
7417ee3a4b | ||
|
1f71b200ad | ||
|
f15c3203b1 | ||
|
a83ed4c3fa | ||
|
ae5980cdd9 | ||
|
5db953cea5 | ||
|
d3f9275f7a | ||
|
fe108f30c9 | ||
|
14d2634e3d | ||
|
81eaf88752 | ||
|
9a8a2b3216 | ||
|
ab5fb624b7 | ||
|
cfc32ed74f | ||
|
ecfc2936b5 | ||
|
2db266ca0c | ||
|
12afaa00ea | ||
|
db16b71c5f | ||
|
dcc08b72d8 | ||
|
9c78181c7b | ||
|
b8f8d637b0 | ||
|
60a686b2f6 | ||
|
47afec7351 | ||
|
6a0aa33504 | ||
|
f520e88dd7 | ||
|
8cfe0e50b1 | ||
|
e6a25db7ee | ||
|
4e77e9ce24 | ||
|
8ffa646af7 | ||
|
59ffd17b91 | ||
|
4c1bc3249b | ||
|
3e5211eb79 | ||
|
415711222e | ||
|
ae9d4ab331 | ||
|
9b7d001f4f | ||
|
bbbb7a393e |
88 changed files with 3411 additions and 1983 deletions
4
.github/workflows/checks.yml
vendored
4
.github/workflows/checks.yml
vendored
|
@ -12,7 +12,7 @@ jobs:
|
|||
with:
|
||||
toolchain: stable
|
||||
- run:
|
||||
cargo check --tests --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
|
||||
cargo check --tests --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
|
@ -24,7 +24,7 @@ jobs:
|
|||
toolchain: stable
|
||||
components: clippy
|
||||
- run:
|
||||
cargo clippy --tests --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
|
||||
cargo clippy --tests --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
|
|
19
.github/workflows/cov.yml
vendored
19
.github/workflows/cov.yml
vendored
|
@ -8,11 +8,6 @@ jobs:
|
|||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
steps:
|
||||
- name: Free Disk Space
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
tool-cache: true
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Rust
|
||||
run: rustup update nightly
|
||||
|
@ -26,18 +21,20 @@ jobs:
|
|||
- name: Clean coverage results
|
||||
run: cargo llvm-cov clean --workspace
|
||||
|
||||
- name: Code coverage (glommio)
|
||||
continue-on-error: true
|
||||
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/glommio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" -- --skip test_unhandled_data
|
||||
- name: Code coverage (neon)
|
||||
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/neon,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
|
||||
|
||||
- name: Code coverage (neon-uring)
|
||||
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/neon-uring,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
|
||||
|
||||
- name: Code coverage (tokio)
|
||||
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
|
||||
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
|
||||
|
||||
- name: Code coverage (compio)
|
||||
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
|
||||
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
|
||||
|
||||
- name: Generate coverage report
|
||||
run: cargo llvm-cov report --lcov --output-path lcov.info --ignore-filename-regex="ntex-compio|ntex-tokio|ntex-glommio|ntex-async-std"
|
||||
run: cargo llvm-cov report --lcov --output-path lcov.info --ignore-filename-regex="ntex-compio|ntex-tokio"
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
|
|
25
.github/workflows/linux.yml
vendored
25
.github/workflows/linux.yml
vendored
|
@ -16,11 +16,6 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Free Disk Space
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
tool-cache: true
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
|
@ -44,21 +39,25 @@ jobs:
|
|||
path: ~/.cargo/git
|
||||
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-index-trimmed-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Run tests (neon)
|
||||
timeout-minutes: 40
|
||||
run: |
|
||||
cargo test --all --no-default-features --features="ntex/neon,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
|
||||
|
||||
- name: Run tests (neon-uring)
|
||||
timeout-minutes: 40
|
||||
run: |
|
||||
cargo test --all --no-default-features --features="ntex/neon-uring,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
|
||||
|
||||
- name: Run tests (tokio)
|
||||
timeout-minutes: 40
|
||||
run: |
|
||||
cargo test --all --no-fail-fast --no-default-features --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
|
||||
cargo test --all --no-fail-fast --no-default-features --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
|
||||
|
||||
- name: Run tests (compio)
|
||||
timeout-minutes: 40
|
||||
run: |
|
||||
cargo test --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
|
||||
|
||||
- name: Run tests (async-std)
|
||||
timeout-minutes: 40
|
||||
continue-on-error: true
|
||||
run: |
|
||||
cargo test --all --no-default-features --features="ntex/async-std,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
|
||||
cargo test --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
|
||||
|
||||
- name: Install cargo-cache
|
||||
continue-on-error: true
|
||||
|
|
8
.github/workflows/osx.yml
vendored
8
.github/workflows/osx.yml
vendored
|
@ -37,12 +37,16 @@ jobs:
|
|||
path: ~/.cargo/git
|
||||
key: ${{ matrix.version }}-aarch64-apple-darwin-cargo-index-trimmed-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Run tests (neon)
|
||||
timeout-minutes: 40
|
||||
run: cargo test --all --no-default-features --no-fail-fast --features="ntex/neon,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
|
||||
|
||||
- name: Run tests (tokio)
|
||||
run: cargo test --all --no-default-features --no-fail-fast --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
|
||||
run: cargo test --all --no-default-features --no-fail-fast --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
|
||||
|
||||
- name: Run tests (compio)
|
||||
timeout-minutes: 40
|
||||
run: cargo test --all --no-default-features --no-fail-fast --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
|
||||
run: cargo test --all --no-default-features --no-fail-fast --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
|
||||
|
||||
- name: Install cargo-cache
|
||||
continue-on-error: true
|
||||
|
|
4
.github/workflows/windows.yml
vendored
4
.github/workflows/windows.yml
vendored
|
@ -63,8 +63,8 @@ jobs:
|
|||
|
||||
- name: Run tests (tokio)
|
||||
run: |
|
||||
cargo test --all --lib --no-default-features --no-fail-fast --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" -- --skip test_timer
|
||||
cargo test --all --lib --no-default-features --no-fail-fast --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws" -- --skip test_timer
|
||||
|
||||
- name: Run tests (compio)
|
||||
run: |
|
||||
cargo test --all --lib --no-default-features --no-fail-fast --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" -- --skip test_timer
|
||||
cargo test --all --lib --no-default-features --no-fail-fast --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws" -- --skip test_timer
|
||||
|
|
36
Cargo.toml
36
Cargo.toml
|
@ -15,12 +15,18 @@ members = [
|
|||
"ntex-macros",
|
||||
"ntex-util",
|
||||
|
||||
"ntex-async-std",
|
||||
"ntex-compio",
|
||||
"ntex-glommio",
|
||||
"ntex-tokio",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
authors = ["ntex contributors <team@ntex.rs>"]
|
||||
repository = "https://github.com/ntex-rs/ntex"
|
||||
documentation = "https://docs.rs/ntex/"
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2021"
|
||||
rust-version = "1.75"
|
||||
|
||||
[patch.crates-io]
|
||||
ntex = { path = "ntex" }
|
||||
ntex-bytes = { path = "ntex-bytes" }
|
||||
|
@ -37,6 +43,28 @@ ntex-macros = { path = "ntex-macros" }
|
|||
ntex-util = { path = "ntex-util" }
|
||||
|
||||
ntex-compio = { path = "ntex-compio" }
|
||||
ntex-glommio = { path = "ntex-glommio" }
|
||||
ntex-tokio = { path = "ntex-tokio" }
|
||||
ntex-async-std = { path = "ntex-async-std" }
|
||||
|
||||
[workspace.dependencies]
|
||||
async-channel = "2"
|
||||
async-task = "4.5.0"
|
||||
atomic-waker = "1.1"
|
||||
core_affinity = "0.8"
|
||||
bitflags = "2"
|
||||
cfg_aliases = "0.2.1"
|
||||
cfg-if = "1.0.0"
|
||||
crossbeam-channel = "0.5.8"
|
||||
crossbeam-queue = "0.3.8"
|
||||
futures-util = "0.3.29"
|
||||
fxhash = "0.2"
|
||||
libc = "0.2.164"
|
||||
log = "0.4"
|
||||
io-uring = "0.7.4"
|
||||
oneshot = "0.1"
|
||||
polling = "3.7.4"
|
||||
nohash-hasher = "0.2.0"
|
||||
scoped-tls = "1.0.1"
|
||||
slab = "0.4.9"
|
||||
socket2 = "0.5.6"
|
||||
windows-sys = "0.52.0"
|
||||
thiserror = "1"
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
## Usage
|
||||
|
||||
ntex supports multiple async runtimes, runtime must be selected as a feature. Available options are `compio`, `tokio`,
|
||||
`glommio` or `async-std`.
|
||||
`neon` or `neon-uring`.
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
# Changes
|
||||
|
||||
## [0.4.0] - 2024-01-09
|
||||
|
||||
* Release
|
||||
|
||||
## [0.4.0-b.0] - 2024-01-07
|
||||
|
||||
* Use "async fn" in trait for Service definition
|
||||
|
||||
## [0.3.2] - 2023-11-22
|
||||
|
||||
* Replace async-oneshot with oneshot
|
||||
|
||||
## [0.3.1] - 2023-11-12
|
||||
|
||||
* Optimize io read task
|
||||
|
||||
## [0.3.0] - 2023-06-22
|
||||
|
||||
* Release v0.3.0
|
||||
|
||||
## [0.3.0-beta.0] - 2023-06-16
|
||||
|
||||
* Migrate to ntex-service 1.2
|
||||
|
||||
## [0.2.2] - 2023-01-26
|
||||
|
||||
* Update io api usage
|
||||
|
||||
## [0.2.0] - 2023-01-04
|
||||
|
||||
* Release
|
||||
|
||||
## [0.2.0-beta.0] - 2022-12-28
|
||||
|
||||
* Migrate to ntex-service 1.0
|
||||
|
||||
## [0.1.1] - 2022-01-30
|
||||
|
||||
* Update to ntex-io 0.1.7
|
||||
|
||||
## [0.1.0] - 2022-01-03
|
||||
|
||||
* Initial release
|
|
@ -1,24 +0,0 @@
|
|||
[package]
|
||||
name = "ntex-async-std"
|
||||
version = "0.5.1"
|
||||
authors = ["ntex contributors <team@ntex.rs>"]
|
||||
description = "async-std intergration for ntex framework"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
homepage = "https://ntex.rs"
|
||||
repository = "https://github.com/ntex-rs/ntex.git"
|
||||
documentation = "https://docs.rs/ntex-rt-async-std/"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
name = "ntex_async_std"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
ntex-bytes = "0.1"
|
||||
ntex-io = "2.5"
|
||||
ntex-util = "2.0"
|
||||
log = "0.4"
|
||||
async-std = { version = "1", features = ["unstable"] }
|
||||
oneshot = { version = "0.1", default-features = false, features = ["async"] }
|
|
@ -1 +0,0 @@
|
|||
../LICENSE-APACHE
|
|
@ -1 +0,0 @@
|
|||
../LICENSE-MIT
|
|
@ -1,220 +0,0 @@
|
|||
use std::{
|
||||
any, cell::RefCell, future::poll_fn, io, pin::Pin, task::ready, task::Context,
|
||||
task::Poll,
|
||||
};
|
||||
|
||||
use async_std::io::{Read as ARead, Write as AWrite};
|
||||
use ntex_bytes::{Buf, BufMut, BytesVec};
|
||||
use ntex_io::{types, Handle, IoStream, ReadContext, WriteContext, WriteContextBuf};
|
||||
|
||||
use crate::TcpStream;
|
||||
|
||||
impl IoStream for TcpStream {
|
||||
fn start(self, read: ReadContext, write: WriteContext) -> Option<Box<dyn Handle>> {
|
||||
let mut rio = Read(RefCell::new(self.clone()));
|
||||
async_std::task::spawn_local(async move {
|
||||
read.handle(&mut rio).await;
|
||||
});
|
||||
let mut wio = Write(RefCell::new(self.clone()));
|
||||
async_std::task::spawn_local(async move {
|
||||
write.handle(&mut wio).await;
|
||||
});
|
||||
Some(Box::new(self))
|
||||
}
|
||||
}
|
||||
|
||||
impl Handle for TcpStream {
|
||||
fn query(&self, id: any::TypeId) -> Option<Box<dyn any::Any>> {
|
||||
if id == any::TypeId::of::<types::PeerAddr>() {
|
||||
if let Ok(addr) = self.0.peer_addr() {
|
||||
return Some(Box::new(types::PeerAddr(addr)));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Read io task
|
||||
struct Read(RefCell<TcpStream>);
|
||||
|
||||
impl ntex_io::AsyncRead for Read {
|
||||
async fn read(&mut self, mut buf: BytesVec) -> (BytesVec, io::Result<usize>) {
|
||||
// read data from socket
|
||||
let result = poll_fn(|cx| {
|
||||
let mut io = self.0.borrow_mut();
|
||||
poll_read_buf(Pin::new(&mut io.0), cx, &mut buf)
|
||||
})
|
||||
.await;
|
||||
(buf, result)
|
||||
}
|
||||
}
|
||||
|
||||
struct Write(RefCell<TcpStream>);
|
||||
|
||||
impl ntex_io::AsyncWrite for Write {
|
||||
#[inline]
|
||||
async fn write(&mut self, buf: &mut WriteContextBuf) -> io::Result<()> {
|
||||
poll_fn(|cx| {
|
||||
if let Some(mut b) = buf.take() {
|
||||
let result = flush_io(&mut self.0.borrow_mut().0, &mut b, cx);
|
||||
buf.set(b);
|
||||
result
|
||||
} else {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[inline]
|
||||
async fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
async fn shutdown(&mut self) -> io::Result<()> {
|
||||
self.0.borrow().0.shutdown(std::net::Shutdown::Both)
|
||||
}
|
||||
}
|
||||
|
||||
/// Flush write buffer to underlying I/O stream.
|
||||
pub(super) fn flush_io<T: ARead + AWrite + Unpin>(
|
||||
io: &mut T,
|
||||
buf: &mut BytesVec,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
let len = buf.len();
|
||||
|
||||
if len != 0 {
|
||||
// log::trace!("flushing framed transport: {:?}", buf.len());
|
||||
|
||||
let mut written = 0;
|
||||
let result = loop {
|
||||
break match Pin::new(&mut *io).poll_write(cx, &buf[written..]) {
|
||||
Poll::Ready(Ok(n)) => {
|
||||
if n == 0 {
|
||||
log::trace!("Disconnected during flush, written {}", written);
|
||||
Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::WriteZero,
|
||||
"failed to write frame to transport",
|
||||
)))
|
||||
} else {
|
||||
written += n;
|
||||
if written == len {
|
||||
buf.clear();
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
Poll::Pending => {
|
||||
// remove written data
|
||||
buf.advance(written);
|
||||
Poll::Pending
|
||||
}
|
||||
Poll::Ready(Err(e)) => {
|
||||
log::trace!("Error during flush: {}", e);
|
||||
Poll::Ready(Err(e))
|
||||
}
|
||||
};
|
||||
};
|
||||
// log::trace!("flushed {} bytes", written);
|
||||
|
||||
// flush
|
||||
if written > 0 {
|
||||
match Pin::new(&mut *io).poll_flush(cx) {
|
||||
Poll::Ready(Ok(_)) => result,
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Err(e)) => {
|
||||
log::trace!("error during flush: {}", e);
|
||||
Poll::Ready(Err(e))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
result
|
||||
}
|
||||
} else {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn poll_read_buf<T: ARead>(
|
||||
io: Pin<&mut T>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut BytesVec,
|
||||
) -> Poll<io::Result<usize>> {
|
||||
let dst = unsafe { &mut *(buf.chunk_mut() as *mut _ as *mut [u8]) };
|
||||
let n = ready!(io.poll_read(cx, dst))?;
|
||||
|
||||
// Safety: This is guaranteed to be the number of initialized (and read)
|
||||
// bytes due to the invariants provided by Read::poll_read() api
|
||||
unsafe {
|
||||
buf.advance_mut(n);
|
||||
}
|
||||
|
||||
Poll::Ready(Ok(n))
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
mod unixstream {
|
||||
use super::*;
|
||||
use crate::UnixStream;
|
||||
|
||||
impl IoStream for UnixStream {
|
||||
fn start(self, read: ReadContext, write: WriteContext) -> Option<Box<dyn Handle>> {
|
||||
let mut rio = Read(RefCell::new(self.clone()));
|
||||
async_std::task::spawn_local(async move {
|
||||
read.handle(&mut rio).await;
|
||||
});
|
||||
let mut wio = Write(RefCell::new(self));
|
||||
async_std::task::spawn_local(async move {
|
||||
write.handle(&mut wio).await;
|
||||
});
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Read io task
|
||||
struct Read(RefCell<UnixStream>);
|
||||
|
||||
impl ntex_io::AsyncRead for Read {
|
||||
async fn read(&mut self, mut buf: BytesVec) -> (BytesVec, io::Result<usize>) {
|
||||
// read data from socket
|
||||
let result = poll_fn(|cx| {
|
||||
let mut io = self.0.borrow_mut();
|
||||
poll_read_buf(Pin::new(&mut io.0), cx, &mut buf)
|
||||
})
|
||||
.await;
|
||||
(buf, result)
|
||||
}
|
||||
}
|
||||
|
||||
struct Write(RefCell<UnixStream>);
|
||||
|
||||
impl ntex_io::AsyncWrite for Write {
|
||||
#[inline]
|
||||
async fn write(&mut self, buf: &mut WriteContextBuf) -> io::Result<()> {
|
||||
poll_fn(|cx| {
|
||||
if let Some(mut b) = buf.take() {
|
||||
let result = flush_io(&mut self.0.borrow_mut().0, &mut b, cx);
|
||||
buf.set(b);
|
||||
result
|
||||
} else {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[inline]
|
||||
async fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
async fn shutdown(&mut self) -> io::Result<()> {
|
||||
self.0.borrow().0.shutdown(std::net::Shutdown::Both)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
use std::{io::Result, net, net::SocketAddr};
|
||||
|
||||
use ntex_bytes::PoolRef;
|
||||
use ntex_io::Io;
|
||||
|
||||
mod io;
|
||||
mod signals;
|
||||
|
||||
pub use self::signals::{signal, Signal};
|
||||
|
||||
#[derive(Clone)]
|
||||
struct TcpStream(async_std::net::TcpStream);
|
||||
|
||||
#[cfg(unix)]
|
||||
#[derive(Clone)]
|
||||
struct UnixStream(async_std::os::unix::net::UnixStream);
|
||||
|
||||
/// Opens a TCP connection to a remote host.
|
||||
pub async fn tcp_connect(addr: SocketAddr) -> Result<Io> {
|
||||
let sock = async_std::net::TcpStream::connect(addr).await?;
|
||||
sock.set_nodelay(true)?;
|
||||
Ok(Io::new(TcpStream(sock)))
|
||||
}
|
||||
|
||||
/// Opens a TCP connection to a remote host and use specified memory pool.
|
||||
pub async fn tcp_connect_in(addr: SocketAddr, pool: PoolRef) -> Result<Io> {
|
||||
let sock = async_std::net::TcpStream::connect(addr).await?;
|
||||
sock.set_nodelay(true)?;
|
||||
Ok(Io::with_memory_pool(TcpStream(sock), pool))
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
/// Opens a unix stream connection.
|
||||
pub async fn unix_connect<P>(addr: P) -> Result<Io>
|
||||
where
|
||||
P: AsRef<async_std::path::Path>,
|
||||
{
|
||||
let sock = async_std::os::unix::net::UnixStream::connect(addr).await?;
|
||||
Ok(Io::new(UnixStream(sock)))
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
/// Opens a unix stream connection and specified memory pool.
|
||||
pub async fn unix_connect_in<P>(addr: P, pool: PoolRef) -> Result<Io>
|
||||
where
|
||||
P: AsRef<async_std::path::Path>,
|
||||
{
|
||||
let sock = async_std::os::unix::net::UnixStream::connect(addr).await?;
|
||||
Ok(Io::with_memory_pool(UnixStream(sock), pool))
|
||||
}
|
||||
|
||||
/// Convert std TcpStream to async-std's TcpStream
|
||||
pub fn from_tcp_stream(stream: net::TcpStream) -> Result<Io> {
|
||||
stream.set_nonblocking(true)?;
|
||||
stream.set_nodelay(true)?;
|
||||
Ok(Io::new(TcpStream(async_std::net::TcpStream::from(stream))))
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
/// Convert std UnixStream to async-std's UnixStream
|
||||
pub fn from_unix_stream(stream: std::os::unix::net::UnixStream) -> Result<Io> {
|
||||
stream.set_nonblocking(true)?;
|
||||
Ok(Io::new(UnixStream(From::from(stream))))
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
use std::{cell::RefCell, future::Future, pin::Pin, rc::Rc, task::Context, task::Poll};
|
||||
|
||||
thread_local! {
|
||||
static SRUN: RefCell<bool> = const { RefCell::new(false) };
|
||||
static SHANDLERS: Rc<RefCell<Vec<oneshot::Sender<Signal>>>> = Default::default();
|
||||
}
|
||||
|
||||
/// Different types of process signals
|
||||
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
|
||||
pub enum Signal {
|
||||
/// SIGHUP
|
||||
Hup,
|
||||
/// SIGINT
|
||||
Int,
|
||||
/// SIGTERM
|
||||
Term,
|
||||
/// SIGQUIT
|
||||
Quit,
|
||||
}
|
||||
|
||||
/// Register signal handler.
|
||||
///
|
||||
/// Signals are handled by oneshots, you have to re-register
|
||||
/// after each signal.
|
||||
pub fn signal() -> Option<oneshot::Receiver<Signal>> {
|
||||
if !SRUN.with(|v| *v.borrow()) {
|
||||
async_std::task::spawn_local(Signals::new());
|
||||
}
|
||||
SHANDLERS.with(|handlers| {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
handlers.borrow_mut().push(tx);
|
||||
Some(rx)
|
||||
})
|
||||
}
|
||||
|
||||
struct Signals {}
|
||||
|
||||
impl Signals {
|
||||
pub(super) fn new() -> Signals {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for Signals {
|
||||
type Output = ();
|
||||
|
||||
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
Poll::Ready(())
|
||||
}
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
# Changes
|
||||
|
||||
## [0.5.2] - 2024-09-xx
|
||||
|
||||
* Update to glommio v0.9
|
||||
|
||||
## [0.4.0] - 2024-01-09
|
||||
|
||||
* Release
|
||||
|
||||
## [0.4.0-b.0] - 2024-01-07
|
||||
|
||||
* Use "async fn" in trait for Service definition
|
||||
|
||||
## [0.3.1] - 2023-11-22
|
||||
|
||||
* Replace async-oneshot with oneshot
|
||||
|
||||
## [0.3.0] - 2023-06-22
|
||||
|
||||
* Release v0.3.0
|
||||
|
||||
## [0.3.0-beta.0] - 2023-06-16
|
||||
|
||||
* Migrate to ntex-service 1.2
|
||||
|
||||
## [0.2.4] - 2023-05-30
|
||||
|
||||
* Fix borrow mut panic #204
|
||||
|
||||
## [0.2.3] - 2023-04-11
|
||||
|
||||
* Chore upgrade glommio to 0.8
|
||||
|
||||
## [0.2.2] - 2023-01-26
|
||||
|
||||
* Update io api usage
|
||||
|
||||
## [0.2.0] - 2023-01-04
|
||||
|
||||
* Release
|
||||
|
||||
## [0.2.0-beta.0] - 2022-12-28
|
||||
|
||||
* Migrate to ntex-service 1.0
|
||||
|
||||
## [0.1.2] - 2022-02-20
|
||||
|
||||
* Upgrade to glommio 0.7
|
||||
|
||||
## [0.1.1] - 2022-01-30
|
||||
|
||||
* Update to ntex-io 0.1.7
|
||||
|
||||
## [0.1.0] - 2022-01-17
|
||||
|
||||
* Initial release
|
|
@ -1,27 +0,0 @@
|
|||
[package]
|
||||
name = "ntex-glommio"
|
||||
version = "0.5.2"
|
||||
authors = ["ntex contributors <team@ntex.rs>"]
|
||||
description = "glommio intergration for ntex framework"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
homepage = "https://ntex.rs"
|
||||
repository = "https://github.com/ntex-rs/ntex.git"
|
||||
documentation = "https://docs.rs/ntex-rt-glommio/"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
name = "ntex_glommio"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
ntex-bytes = "0.1"
|
||||
ntex-io = "2.5"
|
||||
ntex-util = "2.0"
|
||||
futures-lite = "2.2"
|
||||
log = "0.4"
|
||||
oneshot = { version = "0.1", default-features = false, features = ["async"] }
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
glommio = "0.9"
|
|
@ -1 +0,0 @@
|
|||
../LICENSE-APACHE
|
|
@ -1 +0,0 @@
|
|||
../LICENSE-MIT
|
|
@ -1,205 +0,0 @@
|
|||
use std::{any, future::poll_fn, io, pin::Pin, task::ready, task::Context, task::Poll};
|
||||
|
||||
use futures_lite::io::{AsyncRead, AsyncWrite};
|
||||
use ntex_bytes::{Buf, BufMut, BytesVec};
|
||||
use ntex_io::{types, Handle, IoStream, ReadContext, WriteContext, WriteContextBuf};
|
||||
|
||||
use crate::net_impl::{TcpStream, UnixStream};
|
||||
|
||||
impl IoStream for TcpStream {
|
||||
fn start(self, read: ReadContext, write: WriteContext) -> Option<Box<dyn Handle>> {
|
||||
let mut rio = Read(self.clone());
|
||||
glommio::spawn_local(async move { read.handle(&mut rio).await }).detach();
|
||||
let mut wio = Write(self.clone());
|
||||
glommio::spawn_local(async move { write.handle(&mut wio).await }).detach();
|
||||
Some(Box::new(self))
|
||||
}
|
||||
}
|
||||
|
||||
impl IoStream for UnixStream {
|
||||
fn start(self, read: ReadContext, write: WriteContext) -> Option<Box<dyn Handle>> {
|
||||
let mut rio = UnixRead(self.clone());
|
||||
glommio::spawn_local(async move {
|
||||
read.handle(&mut rio).await;
|
||||
})
|
||||
.detach();
|
||||
let mut wio = UnixWrite(self);
|
||||
glommio::spawn_local(async move { write.handle(&mut wio).await }).detach();
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Handle for TcpStream {
|
||||
fn query(&self, id: any::TypeId) -> Option<Box<dyn any::Any>> {
|
||||
if id == any::TypeId::of::<types::PeerAddr>() {
|
||||
if let Ok(addr) = self.0.borrow().peer_addr() {
|
||||
return Some(Box::new(types::PeerAddr(addr)));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Read io task
|
||||
struct Read(TcpStream);
|
||||
|
||||
impl ntex_io::AsyncRead for Read {
|
||||
async fn read(&mut self, mut buf: BytesVec) -> (BytesVec, io::Result<usize>) {
|
||||
// read data from socket
|
||||
let result = poll_fn(|cx| {
|
||||
let mut io = self.0 .0.borrow_mut();
|
||||
poll_read_buf(Pin::new(&mut *io), cx, &mut buf)
|
||||
})
|
||||
.await;
|
||||
(buf, result)
|
||||
}
|
||||
}
|
||||
|
||||
struct Write(TcpStream);
|
||||
|
||||
impl ntex_io::AsyncWrite for Write {
|
||||
#[inline]
|
||||
async fn write(&mut self, buf: &mut WriteContextBuf) -> io::Result<()> {
|
||||
poll_fn(|cx| {
|
||||
if let Some(mut b) = buf.take() {
|
||||
let result = flush_io(&mut *self.0 .0.borrow_mut(), &mut b, cx);
|
||||
buf.set(b);
|
||||
result
|
||||
} else {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[inline]
|
||||
async fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
async fn shutdown(&mut self) -> io::Result<()> {
|
||||
poll_fn(|cx| Pin::new(&mut *self.0 .0.borrow_mut()).poll_close(cx)).await
|
||||
}
|
||||
}
|
||||
|
||||
struct UnixRead(UnixStream);
|
||||
|
||||
impl ntex_io::AsyncRead for UnixRead {
|
||||
async fn read(&mut self, mut buf: BytesVec) -> (BytesVec, io::Result<usize>) {
|
||||
// read data from socket
|
||||
let result = poll_fn(|cx| {
|
||||
let mut io = self.0 .0.borrow_mut();
|
||||
poll_read_buf(Pin::new(&mut *io), cx, &mut buf)
|
||||
})
|
||||
.await;
|
||||
(buf, result)
|
||||
}
|
||||
}
|
||||
|
||||
struct UnixWrite(UnixStream);
|
||||
|
||||
impl ntex_io::AsyncWrite for UnixWrite {
|
||||
#[inline]
|
||||
async fn write(&mut self, buf: &mut WriteContextBuf) -> io::Result<()> {
|
||||
poll_fn(|cx| {
|
||||
if let Some(mut b) = buf.take() {
|
||||
let result = flush_io(&mut *self.0 .0.borrow_mut(), &mut b, cx);
|
||||
buf.set(b);
|
||||
result
|
||||
} else {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[inline]
|
||||
async fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
async fn shutdown(&mut self) -> io::Result<()> {
|
||||
poll_fn(|cx| Pin::new(&mut *self.0 .0.borrow_mut()).poll_close(cx)).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Flush write buffer to underlying I/O stream.
|
||||
pub(super) fn flush_io<T: AsyncRead + AsyncWrite + Unpin>(
|
||||
io: &mut T,
|
||||
buf: &mut BytesVec,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
let len = buf.len();
|
||||
|
||||
if len != 0 {
|
||||
// log::trace!("flushing framed transport: {:?}", buf.len());
|
||||
|
||||
let mut written = 0;
|
||||
let result = loop {
|
||||
break match Pin::new(&mut *io).poll_write(cx, &buf[written..]) {
|
||||
Poll::Ready(Ok(n)) => {
|
||||
if n == 0 {
|
||||
log::trace!("Disconnected during flush, written {}", written);
|
||||
Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::WriteZero,
|
||||
"failed to write frame to transport",
|
||||
)))
|
||||
} else {
|
||||
written += n;
|
||||
if written == len {
|
||||
buf.clear();
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
Poll::Pending => {
|
||||
// remove written data
|
||||
buf.advance(written);
|
||||
Poll::Pending
|
||||
}
|
||||
Poll::Ready(Err(e)) => {
|
||||
log::trace!("Error during flush: {}", e);
|
||||
Poll::Ready(Err(e))
|
||||
}
|
||||
};
|
||||
};
|
||||
// log::trace!("flushed {} bytes", written);
|
||||
|
||||
// flush
|
||||
if written > 0 {
|
||||
match Pin::new(&mut *io).poll_flush(cx) {
|
||||
Poll::Ready(Ok(_)) => result,
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Err(e)) => {
|
||||
log::trace!("error during flush: {}", e);
|
||||
Poll::Ready(Err(e))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
result
|
||||
}
|
||||
} else {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn poll_read_buf<T: AsyncRead>(
|
||||
io: Pin<&mut T>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut BytesVec,
|
||||
) -> Poll<io::Result<usize>> {
|
||||
let dst = unsafe { &mut *(buf.chunk_mut() as *mut _ as *mut [u8]) };
|
||||
let n = ready!(io.poll_read(cx, dst))?;
|
||||
|
||||
// Safety: This is guaranteed to be the number of initialized (and read)
|
||||
// bytes due to the invariants provided by Read::poll_read() api
|
||||
unsafe {
|
||||
buf.advance_mut(n);
|
||||
}
|
||||
|
||||
Poll::Ready(Ok(n))
|
||||
}
|
|
@ -1,90 +0,0 @@
|
|||
#[cfg(target_os = "linux")]
|
||||
mod io;
|
||||
#[cfg(target_os = "linux")]
|
||||
mod signals;
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
pub use self::signals::{signal, Signal};
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
mod net_impl {
|
||||
use std::os::unix::io::{FromRawFd, IntoRawFd};
|
||||
use std::{cell::RefCell, io::Result, net, net::SocketAddr, rc::Rc};
|
||||
|
||||
use ntex_bytes::PoolRef;
|
||||
use ntex_io::Io;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct TcpStream(pub(crate) Rc<RefCell<glommio::net::TcpStream>>);
|
||||
|
||||
impl TcpStream {
|
||||
fn new(io: glommio::net::TcpStream) -> Self {
|
||||
Self(Rc::new(RefCell::new(io)))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct UnixStream(pub(crate) Rc<RefCell<glommio::net::UnixStream>>);
|
||||
|
||||
impl UnixStream {
|
||||
fn new(io: glommio::net::UnixStream) -> Self {
|
||||
Self(Rc::new(RefCell::new(io)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Opens a TCP connection to a remote host.
|
||||
pub async fn tcp_connect(addr: SocketAddr) -> Result<Io> {
|
||||
let sock = glommio::net::TcpStream::connect(addr).await?;
|
||||
sock.set_nodelay(true)?;
|
||||
Ok(Io::new(TcpStream::new(sock)))
|
||||
}
|
||||
|
||||
/// Opens a TCP connection to a remote host and use specified memory pool.
|
||||
pub async fn tcp_connect_in(addr: SocketAddr, pool: PoolRef) -> Result<Io> {
|
||||
let sock = glommio::net::TcpStream::connect(addr).await?;
|
||||
sock.set_nodelay(true)?;
|
||||
Ok(Io::with_memory_pool(TcpStream::new(sock), pool))
|
||||
}
|
||||
|
||||
/// Opens a unix stream connection.
|
||||
pub async fn unix_connect<P>(addr: P) -> Result<Io>
|
||||
where
|
||||
P: AsRef<std::path::Path>,
|
||||
{
|
||||
let sock = glommio::net::UnixStream::connect(addr).await?;
|
||||
Ok(Io::new(UnixStream::new(sock)))
|
||||
}
|
||||
|
||||
/// Opens a unix stream connection and specified memory pool.
|
||||
pub async fn unix_connect_in<P>(addr: P, pool: PoolRef) -> Result<Io>
|
||||
where
|
||||
P: AsRef<std::path::Path>,
|
||||
{
|
||||
let sock = glommio::net::UnixStream::connect(addr).await?;
|
||||
Ok(Io::with_memory_pool(UnixStream::new(sock), pool))
|
||||
}
|
||||
|
||||
/// Convert std TcpStream to glommio's TcpStream
|
||||
pub fn from_tcp_stream(stream: net::TcpStream) -> Result<Io> {
|
||||
stream.set_nonblocking(true)?;
|
||||
stream.set_nodelay(true)?;
|
||||
unsafe {
|
||||
Ok(Io::new(TcpStream::new(
|
||||
glommio::net::TcpStream::from_raw_fd(stream.into_raw_fd()),
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert std UnixStream to glommio's UnixStream
|
||||
pub fn from_unix_stream(stream: std::os::unix::net::UnixStream) -> Result<Io> {
|
||||
stream.set_nonblocking(true)?;
|
||||
unsafe {
|
||||
Ok(Io::new(UnixStream::new(
|
||||
glommio::net::UnixStream::from_raw_fd(stream.into_raw_fd()),
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
pub use self::net_impl::*;
|
|
@ -1,50 +0,0 @@
|
|||
use std::{cell::RefCell, future::Future, pin::Pin, rc::Rc, task::Context, task::Poll};
|
||||
|
||||
thread_local! {
|
||||
static SRUN: RefCell<bool> = const { RefCell::new(false) };
|
||||
static SHANDLERS: Rc<RefCell<Vec<oneshot::Sender<Signal>>>> = Default::default();
|
||||
}
|
||||
|
||||
/// Different types of process signals
|
||||
#[derive(PartialEq, Clone, Copy, Debug)]
|
||||
pub enum Signal {
|
||||
/// SIGHUP
|
||||
Hup,
|
||||
/// SIGINT
|
||||
Int,
|
||||
/// SIGTERM
|
||||
Term,
|
||||
/// SIGQUIT
|
||||
Quit,
|
||||
}
|
||||
|
||||
/// Register signal handler.
|
||||
///
|
||||
/// Signals are handled by oneshots, you have to re-register
|
||||
/// after each signal.
|
||||
pub fn signal() -> Option<oneshot::Receiver<Signal>> {
|
||||
if !SRUN.with(|v| *v.borrow()) {
|
||||
glommio::spawn_local(Signals::new()).detach();
|
||||
}
|
||||
SHANDLERS.with(|handlers| {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
handlers.borrow_mut().push(tx);
|
||||
Some(rx)
|
||||
})
|
||||
}
|
||||
|
||||
struct Signals {}
|
||||
|
||||
impl Signals {
|
||||
pub(super) fn new() -> Signals {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for Signals {
|
||||
type Output = ();
|
||||
|
||||
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
Poll::Ready(())
|
||||
}
|
||||
}
|
|
@ -1,5 +1,17 @@
|
|||
# Changes
|
||||
|
||||
## [2.11.1] - 2025-03-20
|
||||
|
||||
* Add readiness check support
|
||||
|
||||
## [2.11.0] - 2025-03-10
|
||||
|
||||
* Add single io context
|
||||
|
||||
## [2.10.0] - 2025-02-26
|
||||
|
||||
* Impl Filter for Sealed #506
|
||||
|
||||
## [2.9.3] - 2025-01-21
|
||||
|
||||
* Allow to access io write destination buffer
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "ntex-io"
|
||||
version = "2.9.3"
|
||||
version = "2.11.1"
|
||||
authors = ["ntex contributors <team@ntex.rs>"]
|
||||
description = "Utilities for encoding and decoding frames"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
|
@ -28,4 +28,3 @@ pin-project-lite = "0.2"
|
|||
[dev-dependencies]
|
||||
ntex = "2"
|
||||
rand = "0.8"
|
||||
env_logger = "0.11"
|
||||
|
|
|
@ -152,6 +152,27 @@ impl Stack {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) fn with_read_source<F, R>(&self, io: &IoRef, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut BytesVec) -> R,
|
||||
{
|
||||
let item = self.get_last_level();
|
||||
let mut rb = item.0.take();
|
||||
if rb.is_none() {
|
||||
rb = Some(io.memory_pool().get_read_buf());
|
||||
}
|
||||
|
||||
let result = f(rb.as_mut().unwrap());
|
||||
if let Some(b) = rb {
|
||||
if b.is_empty() {
|
||||
io.memory_pool().release_read_buf(b);
|
||||
} else {
|
||||
item.0.set(Some(b));
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
pub(crate) fn with_read_destination<F, R>(&self, io: &IoRef, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut BytesVec) -> R,
|
||||
|
|
|
@ -1244,6 +1244,8 @@ mod tests {
|
|||
sleep(Millis(50)).await;
|
||||
if let DispatchItem::Item(msg) = msg {
|
||||
Ok::<_, ()>(Some(msg.freeze()))
|
||||
} else if let DispatchItem::Disconnect(_) = msg {
|
||||
Ok::<_, ()>(None)
|
||||
} else {
|
||||
panic!()
|
||||
}
|
||||
|
|
|
@ -25,6 +25,8 @@ bitflags::bitflags! {
|
|||
|
||||
/// write task paused
|
||||
const WR_PAUSED = 0b0000_0100_0000_0000;
|
||||
/// wait for write completion task
|
||||
const WR_TASK_WAIT = 0b0000_1000_0000_0000;
|
||||
|
||||
/// dispatcher is marked stopped
|
||||
const DSP_STOP = 0b0001_0000_0000_0000;
|
||||
|
@ -38,6 +40,10 @@ impl Flags {
|
|||
self.intersects(Flags::IO_STOPPED)
|
||||
}
|
||||
|
||||
pub(crate) fn is_task_waiting_for_write(&self) -> bool {
|
||||
self.contains(Flags::WR_TASK_WAIT)
|
||||
}
|
||||
|
||||
pub(crate) fn is_waiting_for_write(&self) -> bool {
|
||||
self.intersects(Flags::BUF_W_MUST_FLUSH | Flags::BUF_W_BACKPRESSURE)
|
||||
}
|
||||
|
@ -46,10 +52,18 @@ impl Flags {
|
|||
self.remove(Flags::BUF_W_MUST_FLUSH | Flags::BUF_W_BACKPRESSURE);
|
||||
}
|
||||
|
||||
pub(crate) fn task_waiting_for_write_is_done(&mut self) {
|
||||
self.remove(Flags::WR_TASK_WAIT);
|
||||
}
|
||||
|
||||
pub(crate) fn is_read_buf_ready(&self) -> bool {
|
||||
self.contains(Flags::BUF_R_READY)
|
||||
}
|
||||
|
||||
pub(crate) fn is_waiting_for_read(&self) -> bool {
|
||||
self.contains(Flags::RD_NOTIFY)
|
||||
}
|
||||
|
||||
pub(crate) fn cannot_read(self) -> bool {
|
||||
self.intersects(Flags::RD_PAUSED | Flags::BUF_R_FULL)
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ use ntex_util::{future::Either, task::LocalWaker, time::Seconds};
|
|||
use crate::buf::Stack;
|
||||
use crate::filter::{Base, Filter, Layer, NullFilter};
|
||||
use crate::flags::Flags;
|
||||
use crate::seal::Sealed;
|
||||
use crate::seal::{IoBoxed, Sealed};
|
||||
use crate::tasks::{ReadContext, WriteContext};
|
||||
use crate::timer::TimerHandle;
|
||||
use crate::{Decoded, FilterLayer, Handle, IoStatusUpdate, IoStream, RecvError};
|
||||
|
@ -294,6 +294,12 @@ impl<F: Filter> Io<F> {
|
|||
Io(UnsafeCell::new(state), marker::PhantomData)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Convert current io stream into boxed version
|
||||
pub fn boxed(self) -> IoBoxed {
|
||||
self.seal().into()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Map current filter with new one
|
||||
pub fn add_filter<U>(self, nf: U) -> Io<Layer<U, F>>
|
||||
|
@ -431,7 +437,7 @@ impl<F> Io<F> {
|
|||
} else {
|
||||
st.dispatch_task.register(cx.waker());
|
||||
|
||||
let ready = flags.contains(Flags::BUF_R_READY);
|
||||
let ready = flags.is_read_buf_ready();
|
||||
if flags.cannot_read() {
|
||||
flags.cleanup_read_flags();
|
||||
st.read_task.wake();
|
||||
|
@ -552,24 +558,28 @@ impl<F> Io<F> {
|
|||
let st = self.st();
|
||||
let flags = self.flags();
|
||||
|
||||
if flags.is_stopped() {
|
||||
Poll::Ready(Err(st.error_or_disconnected()))
|
||||
} else {
|
||||
let len = st.buffer.write_destination_size();
|
||||
if len > 0 {
|
||||
if full {
|
||||
st.insert_flags(Flags::BUF_W_MUST_FLUSH);
|
||||
st.dispatch_task.register(cx.waker());
|
||||
return Poll::Pending;
|
||||
} else if len >= st.pool.get().write_params_high() << 1 {
|
||||
st.insert_flags(Flags::BUF_W_BACKPRESSURE);
|
||||
st.dispatch_task.register(cx.waker());
|
||||
return Poll::Pending;
|
||||
}
|
||||
let len = st.buffer.write_destination_size();
|
||||
if len > 0 {
|
||||
if full {
|
||||
st.insert_flags(Flags::BUF_W_MUST_FLUSH);
|
||||
st.dispatch_task.register(cx.waker());
|
||||
return if flags.is_stopped() {
|
||||
Poll::Ready(Err(st.error_or_disconnected()))
|
||||
} else {
|
||||
Poll::Pending
|
||||
};
|
||||
} else if len >= st.pool.get().write_params_high() << 1 {
|
||||
st.insert_flags(Flags::BUF_W_BACKPRESSURE);
|
||||
st.dispatch_task.register(cx.waker());
|
||||
return if flags.is_stopped() {
|
||||
Poll::Ready(Err(st.error_or_disconnected()))
|
||||
} else {
|
||||
Poll::Pending
|
||||
};
|
||||
}
|
||||
st.remove_flags(Flags::BUF_W_MUST_FLUSH | Flags::BUF_W_BACKPRESSURE);
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
st.remove_flags(Flags::BUF_W_MUST_FLUSH | Flags::BUF_W_BACKPRESSURE);
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
|
|
@ -29,7 +29,7 @@ pub use self::filter::{Base, Filter, Layer};
|
|||
pub use self::framed::Framed;
|
||||
pub use self::io::{Io, IoRef, OnDisconnect};
|
||||
pub use self::seal::{IoBoxed, Sealed};
|
||||
pub use self::tasks::{ReadContext, WriteContext, WriteContextBuf};
|
||||
pub use self::tasks::{IoContext, ReadContext, WriteContext, WriteContextBuf};
|
||||
pub use self::timer::TimerHandle;
|
||||
pub use self::utils::{seal, Decoded};
|
||||
|
||||
|
@ -53,7 +53,9 @@ pub trait AsyncWrite {
|
|||
/// Status for read task
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum ReadStatus {
|
||||
/// Read task is clear to proceed with read operation
|
||||
Ready,
|
||||
/// Terminate read task
|
||||
Terminate,
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use std::{fmt, ops};
|
||||
use std::{any::Any, any::TypeId, fmt, io, ops, task::Context, task::Poll};
|
||||
|
||||
use crate::{filter::Filter, Io};
|
||||
use crate::filter::{Filter, FilterReadStatus};
|
||||
use crate::{buf::Stack, Io, IoRef, ReadStatus, WriteStatus};
|
||||
|
||||
/// Sealed filter type
|
||||
pub struct Sealed(pub(crate) Box<dyn Filter>);
|
||||
|
@ -11,6 +12,44 @@ impl fmt::Debug for Sealed {
|
|||
}
|
||||
}
|
||||
|
||||
impl Filter for Sealed {
|
||||
#[inline]
|
||||
fn query(&self, id: TypeId) -> Option<Box<dyn Any>> {
|
||||
self.0.query(id)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn process_read_buf(
|
||||
&self,
|
||||
io: &IoRef,
|
||||
stack: &Stack,
|
||||
idx: usize,
|
||||
nbytes: usize,
|
||||
) -> io::Result<FilterReadStatus> {
|
||||
self.0.process_read_buf(io, stack, idx, nbytes)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn process_write_buf(&self, io: &IoRef, stack: &Stack, idx: usize) -> io::Result<()> {
|
||||
self.0.process_write_buf(io, stack, idx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn shutdown(&self, io: &IoRef, stack: &Stack, idx: usize) -> io::Result<Poll<()>> {
|
||||
self.0.shutdown(io, stack, idx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<ReadStatus> {
|
||||
self.0.poll_read_ready(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<WriteStatus> {
|
||||
self.0.poll_write_ready(cx)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Boxed `Io` object with erased filter type
|
||||
pub struct IoBoxed(Io<Sealed>);
|
||||
|
@ -25,12 +64,6 @@ impl IoBoxed {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<Io<Sealed>> for IoBoxed {
|
||||
fn from(io: Io<Sealed>) -> Self {
|
||||
Self(io)
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Filter> From<Io<F>> for IoBoxed {
|
||||
fn from(io: Io<F>) -> Self {
|
||||
Self(io.seal())
|
||||
|
@ -45,3 +78,9 @@ impl ops::Deref for IoBoxed {
|
|||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<IoBoxed> for Io<Sealed> {
|
||||
fn from(value: IoBoxed) -> Self {
|
||||
value.0
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use std::{cell::Cell, fmt, future::poll_fn, io, task::Context, task::Poll};
|
||||
use std::{cell::Cell, fmt, future::poll_fn, io, task::ready, task::Context, task::Poll};
|
||||
|
||||
use ntex_bytes::{BufMut, BytesVec};
|
||||
use ntex_bytes::{Buf, BufMut, BytesVec};
|
||||
use ntex_util::{future::lazy, future::select, future::Either, time::sleep, time::Sleep};
|
||||
|
||||
use crate::{AsyncRead, AsyncWrite, Flags, IoRef, ReadStatus, WriteStatus};
|
||||
|
@ -19,6 +19,13 @@ impl ReadContext {
|
|||
Self(io.clone(), Cell::new(None))
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[inline]
|
||||
/// Io tag
|
||||
pub fn context(&self) -> IoContext {
|
||||
IoContext::new(&self.0)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Io tag
|
||||
pub fn tag(&self) -> &'static str {
|
||||
|
@ -121,7 +128,7 @@ impl ReadContext {
|
|||
);
|
||||
// dest buffer has new data, wake up dispatcher
|
||||
inner.dispatch_task.wake();
|
||||
} else if inner.flags.get().contains(Flags::RD_NOTIFY) {
|
||||
} else if inner.flags.get().is_waiting_for_read() {
|
||||
// in case of "notify" we must wake up dispatch task
|
||||
// if we read any data from source
|
||||
inner.dispatch_task.wake();
|
||||
|
@ -342,3 +349,604 @@ impl WriteContextBuf {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Context for io read task
|
||||
pub struct IoContext(IoRef);
|
||||
|
||||
impl fmt::Debug for IoContext {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("IoContext").field("io", &self.0).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl IoContext {
|
||||
pub(crate) fn new(io: &IoRef) -> Self {
|
||||
Self(io.clone())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Io tag
|
||||
pub fn tag(&self) -> &'static str {
|
||||
self.0.tag()
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
/// Io flags
|
||||
pub fn flags(&self) -> crate::flags::Flags {
|
||||
self.0.flags()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Check readiness for read operations
|
||||
pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<ReadStatus> {
|
||||
self.shutdown_filters();
|
||||
self.0.filter().poll_read_ready(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Check readiness for write operations
|
||||
pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<WriteStatus> {
|
||||
self.0.filter().poll_write_ready(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Get io error
|
||||
pub fn stopped(&self, e: Option<io::Error>) {
|
||||
self.0 .0.io_stopped(e);
|
||||
}
|
||||
|
||||
/// Wait when io get closed or preparing for close
|
||||
pub async fn shutdown(&self, flush_buf: bool) {
|
||||
let st = &self.0 .0;
|
||||
let mut timeout = None;
|
||||
|
||||
poll_fn(|cx| {
|
||||
let flags = self.0.flags();
|
||||
|
||||
if flags.intersects(Flags::IO_STOPPING | Flags::IO_STOPPED) {
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
st.write_task.register(cx.waker());
|
||||
if flags.contains(Flags::IO_STOPPING_FILTERS) {
|
||||
if timeout.is_none() {
|
||||
timeout = Some(sleep(st.disconnect_timeout.get()));
|
||||
}
|
||||
if timeout.as_ref().unwrap().poll_elapsed(cx).is_ready() {
|
||||
st.dispatch_task.wake();
|
||||
st.insert_flags(Flags::IO_STOPPING);
|
||||
return Poll::Ready(());
|
||||
}
|
||||
}
|
||||
Poll::Pending
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
if flush_buf && !self.0.flags().contains(Flags::WR_PAUSED) {
|
||||
st.insert_flags(Flags::WR_TASK_WAIT);
|
||||
|
||||
poll_fn(|cx| {
|
||||
let flags = self.0.flags();
|
||||
|
||||
if flags.intersects(Flags::WR_PAUSED | Flags::IO_STOPPED) {
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
st.write_task.register(cx.waker());
|
||||
|
||||
if timeout.is_none() {
|
||||
timeout = Some(sleep(st.disconnect_timeout.get()));
|
||||
}
|
||||
if timeout.as_ref().unwrap().poll_elapsed(cx).is_ready() {
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get read buffer
|
||||
pub fn get_read_buf(&self) -> Poll<BytesVec> {
|
||||
let inner = &self.0 .0;
|
||||
|
||||
if let Some(waker) = inner.read_task.take() {
|
||||
let mut cx = Context::from_waker(&waker);
|
||||
|
||||
if let Poll::Ready(ReadStatus::Ready) = self.0.filter().poll_read_ready(&mut cx)
|
||||
{
|
||||
let mut buf = if inner.flags.get().is_read_buf_ready() {
|
||||
// read buffer is still not read by dispatcher
|
||||
// we cannot touch it
|
||||
inner.pool.get().get_read_buf()
|
||||
} else {
|
||||
inner
|
||||
.buffer
|
||||
.get_read_source()
|
||||
.unwrap_or_else(|| inner.pool.get().get_read_buf())
|
||||
};
|
||||
|
||||
// make sure we've got room
|
||||
let (hw, lw) = self.0.memory_pool().read_params().unpack();
|
||||
let remaining = buf.remaining_mut();
|
||||
if remaining < lw {
|
||||
buf.reserve(hw - remaining);
|
||||
}
|
||||
return Poll::Ready(buf);
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
|
||||
pub fn release_read_buf(&self, buf: BytesVec) {
|
||||
let inner = &self.0 .0;
|
||||
if let Some(mut first_buf) = inner.buffer.get_read_source() {
|
||||
first_buf.extend_from_slice(&buf);
|
||||
inner.buffer.set_read_source(&self.0, first_buf);
|
||||
} else {
|
||||
inner.buffer.set_read_source(&self.0, buf);
|
||||
}
|
||||
}
|
||||
|
||||
/// Set read buffer
|
||||
pub fn set_read_buf(&self, result: io::Result<usize>, buf: BytesVec) -> Poll<()> {
|
||||
let inner = &self.0 .0;
|
||||
let (hw, _) = self.0.memory_pool().read_params().unpack();
|
||||
|
||||
if let Some(mut first_buf) = inner.buffer.get_read_source() {
|
||||
first_buf.extend_from_slice(&buf);
|
||||
inner.buffer.set_read_source(&self.0, first_buf);
|
||||
} else {
|
||||
inner.buffer.set_read_source(&self.0, buf);
|
||||
}
|
||||
|
||||
match result {
|
||||
Ok(0) => {
|
||||
inner.io_stopped(None);
|
||||
Poll::Ready(())
|
||||
}
|
||||
Ok(nbytes) => {
|
||||
let filter = self.0.filter();
|
||||
let res = filter
|
||||
.process_read_buf(&self.0, &inner.buffer, 0, nbytes)
|
||||
.and_then(|status| {
|
||||
if status.nbytes > 0 {
|
||||
// dest buffer has new data, wake up dispatcher
|
||||
if inner.buffer.read_destination_size() >= hw {
|
||||
log::trace!(
|
||||
"{}: Io read buffer is too large {}, enable read back-pressure",
|
||||
self.0.tag(),
|
||||
nbytes
|
||||
);
|
||||
inner.insert_flags(Flags::BUF_R_READY | Flags::BUF_R_FULL);
|
||||
} else {
|
||||
inner.insert_flags(Flags::BUF_R_READY);
|
||||
|
||||
if nbytes >= hw {
|
||||
// read task is paused because of read back-pressure
|
||||
// but there is no new data in top most read buffer
|
||||
// so we need to wake up read task to read more data
|
||||
// otherwise read task would sleep forever
|
||||
inner.read_task.wake();
|
||||
}
|
||||
}
|
||||
log::trace!(
|
||||
"{}: New {} bytes available, wakeup dispatcher",
|
||||
self.0.tag(),
|
||||
nbytes
|
||||
);
|
||||
if !inner.dispatch_task.wake_checked() {
|
||||
log::error!("Dispatcher waker is not registered");
|
||||
}
|
||||
} else {
|
||||
if nbytes >= hw {
|
||||
// read task is paused because of read back-pressure
|
||||
// but there is no new data in top most read buffer
|
||||
// so we need to wake up read task to read more data
|
||||
// otherwise read task would sleep forever
|
||||
inner.read_task.wake();
|
||||
}
|
||||
if inner.flags.get().is_waiting_for_read() {
|
||||
// in case of "notify" we must wake up dispatch task
|
||||
// if we read any data from source
|
||||
inner.dispatch_task.wake();
|
||||
}
|
||||
}
|
||||
|
||||
// while reading, filter wrote some data
|
||||
// in that case filters need to process write buffers
|
||||
// and potentialy wake write task
|
||||
if status.need_write {
|
||||
inner.write_task.wake();
|
||||
filter.process_write_buf(&self.0, &inner.buffer, 0)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
});
|
||||
|
||||
if let Err(err) = res {
|
||||
inner.io_stopped(Some(err));
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
self.shutdown_filters();
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
inner.io_stopped(Some(e));
|
||||
Poll::Ready(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get write buffer
|
||||
pub fn get_write_buf(&self) -> Poll<BytesVec> {
|
||||
let inner = &self.0 .0;
|
||||
|
||||
// check write readiness
|
||||
if let Some(waker) = inner.write_task.take() {
|
||||
let ready = self
|
||||
.0
|
||||
.filter()
|
||||
.poll_write_ready(&mut Context::from_waker(&waker));
|
||||
let buf = if matches!(
|
||||
ready,
|
||||
Poll::Ready(WriteStatus::Ready | WriteStatus::Shutdown)
|
||||
) {
|
||||
inner.buffer.get_write_destination().and_then(|buf| {
|
||||
if buf.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(buf)
|
||||
}
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(buf) = buf {
|
||||
return Poll::Ready(buf);
|
||||
}
|
||||
}
|
||||
Poll::Pending
|
||||
}
|
||||
|
||||
pub fn release_write_buf(&self, mut buf: BytesVec) {
|
||||
let inner = &self.0 .0;
|
||||
|
||||
if let Some(b) = inner.buffer.get_write_destination() {
|
||||
buf.extend_from_slice(&b);
|
||||
self.0.memory_pool().release_write_buf(b);
|
||||
}
|
||||
inner.buffer.set_write_destination(buf);
|
||||
|
||||
// if write buffer is smaller than high watermark value, turn off back-pressure
|
||||
let len = inner.buffer.write_destination_size();
|
||||
let mut flags = inner.flags.get();
|
||||
|
||||
if len == 0 {
|
||||
if flags.is_waiting_for_write() {
|
||||
flags.waiting_for_write_is_done();
|
||||
inner.dispatch_task.wake();
|
||||
}
|
||||
flags.insert(Flags::WR_PAUSED);
|
||||
inner.flags.set(flags);
|
||||
} else if flags.contains(Flags::BUF_W_BACKPRESSURE)
|
||||
&& len < inner.pool.get().write_params_high() << 1
|
||||
{
|
||||
flags.remove(Flags::BUF_W_BACKPRESSURE);
|
||||
inner.flags.set(flags);
|
||||
inner.dispatch_task.wake();
|
||||
}
|
||||
inner.flags.set(flags);
|
||||
}
|
||||
|
||||
/// Set write buffer
|
||||
pub fn set_write_buf(&self, result: io::Result<usize>, mut buf: BytesVec) -> Poll<()> {
|
||||
let result = match result {
|
||||
Ok(0) => {
|
||||
log::trace!("{}: Disconnected during flush", self.tag());
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::WriteZero,
|
||||
"failed to write frame to transport",
|
||||
))
|
||||
}
|
||||
Ok(n) => {
|
||||
if n == buf.len() {
|
||||
buf.clear();
|
||||
Ok(0)
|
||||
} else {
|
||||
buf.advance(n);
|
||||
Ok(buf.len())
|
||||
}
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
};
|
||||
|
||||
let inner = &self.0 .0;
|
||||
|
||||
// set buffer back
|
||||
let result = match result {
|
||||
Ok(0) => {
|
||||
// log::debug!("{}: WROTE ALL {:?}", self.0.tag(), inner.buffer.write_destination_size());
|
||||
self.0.memory_pool().release_write_buf(buf);
|
||||
Ok(inner.buffer.write_destination_size())
|
||||
}
|
||||
Ok(_) => {
|
||||
if let Some(b) = inner.buffer.get_write_destination() {
|
||||
buf.extend_from_slice(&b);
|
||||
self.0.memory_pool().release_write_buf(b);
|
||||
}
|
||||
let l = buf.len();
|
||||
// log::debug!("{}: WROTE SOME {:?}", self.0.tag(), l);
|
||||
inner.buffer.set_write_destination(buf);
|
||||
Ok(l)
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
};
|
||||
|
||||
let mut flags = inner.flags.get();
|
||||
match result {
|
||||
Ok(0) => {
|
||||
// all data has been written
|
||||
flags.insert(Flags::WR_PAUSED);
|
||||
|
||||
if flags.is_task_waiting_for_write() {
|
||||
flags.task_waiting_for_write_is_done();
|
||||
inner.write_task.wake();
|
||||
}
|
||||
|
||||
if flags.is_waiting_for_write() {
|
||||
flags.waiting_for_write_is_done();
|
||||
inner.dispatch_task.wake();
|
||||
}
|
||||
inner.flags.set(flags);
|
||||
Poll::Ready(())
|
||||
}
|
||||
Ok(len) => {
|
||||
// if write buffer is smaller than high watermark value, turn off back-pressure
|
||||
if flags.contains(Flags::BUF_W_BACKPRESSURE)
|
||||
&& len < inner.pool.get().write_params_high() << 1
|
||||
{
|
||||
flags.remove(Flags::BUF_W_BACKPRESSURE);
|
||||
inner.flags.set(flags);
|
||||
inner.dispatch_task.wake();
|
||||
}
|
||||
Poll::Pending
|
||||
}
|
||||
Err(e) => {
|
||||
inner.io_stopped(Some(e));
|
||||
Poll::Ready(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get read buffer
|
||||
pub fn is_read_ready(&self) -> bool {
|
||||
// check read readiness
|
||||
if let Some(waker) = self.0 .0.read_task.take() {
|
||||
let mut cx = Context::from_waker(&waker);
|
||||
|
||||
if let Poll::Ready(ReadStatus::Ready) = self.0.filter().poll_read_ready(&mut cx)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn with_read_buf<F>(&self, f: F) -> Poll<()>
|
||||
where
|
||||
F: FnOnce(&mut BytesVec) -> Poll<io::Result<usize>>,
|
||||
{
|
||||
let inner = &self.0 .0;
|
||||
let (hw, lw) = self.0.memory_pool().read_params().unpack();
|
||||
let result = inner.buffer.with_read_source(&self.0, |buf| {
|
||||
// make sure we've got room
|
||||
let remaining = buf.remaining_mut();
|
||||
if remaining < lw {
|
||||
buf.reserve(hw - remaining);
|
||||
}
|
||||
|
||||
f(buf)
|
||||
});
|
||||
|
||||
// handle buffer changes
|
||||
match result {
|
||||
Poll::Ready(Ok(0)) => {
|
||||
inner.io_stopped(None);
|
||||
Poll::Ready(())
|
||||
}
|
||||
Poll::Ready(Ok(nbytes)) => {
|
||||
let filter = self.0.filter();
|
||||
let _ = filter
|
||||
.process_read_buf(&self.0, &inner.buffer, 0, nbytes)
|
||||
.and_then(|status| {
|
||||
if status.nbytes > 0 {
|
||||
// dest buffer has new data, wake up dispatcher
|
||||
if inner.buffer.read_destination_size() >= hw {
|
||||
log::trace!(
|
||||
"{}: Io read buffer is too large {}, enable read back-pressure",
|
||||
self.0.tag(),
|
||||
nbytes
|
||||
);
|
||||
inner.insert_flags(Flags::BUF_R_READY | Flags::BUF_R_FULL);
|
||||
} else {
|
||||
inner.insert_flags(Flags::BUF_R_READY);
|
||||
|
||||
if nbytes >= hw {
|
||||
// read task is paused because of read back-pressure
|
||||
// but there is no new data in top most read buffer
|
||||
// so we need to wake up read task to read more data
|
||||
// otherwise read task would sleep forever
|
||||
inner.read_task.wake();
|
||||
}
|
||||
}
|
||||
log::trace!(
|
||||
"{}: New {} bytes available, wakeup dispatcher",
|
||||
self.0.tag(),
|
||||
nbytes
|
||||
);
|
||||
if !inner.dispatch_task.wake_checked() {
|
||||
log::error!("Dispatcher waker is not registered");
|
||||
}
|
||||
} else {
|
||||
if nbytes >= hw {
|
||||
// read task is paused because of read back-pressure
|
||||
// but there is no new data in top most read buffer
|
||||
// so we need to wake up read task to read more data
|
||||
// otherwise read task would sleep forever
|
||||
inner.read_task.wake();
|
||||
}
|
||||
if inner.flags.get().is_waiting_for_read() {
|
||||
// in case of "notify" we must wake up dispatch task
|
||||
// if we read any data from source
|
||||
inner.dispatch_task.wake();
|
||||
}
|
||||
}
|
||||
|
||||
// while reading, filter wrote some data
|
||||
// in that case filters need to process write buffers
|
||||
// and potentialy wake write task
|
||||
if status.need_write {
|
||||
filter.process_write_buf(&self.0, &inner.buffer, 0)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
})
|
||||
.map_err(|err| {
|
||||
inner.dispatch_task.wake();
|
||||
inner.io_stopped(Some(err));
|
||||
inner.insert_flags(Flags::BUF_R_READY);
|
||||
});
|
||||
Poll::Pending
|
||||
}
|
||||
Poll::Ready(Err(e)) => {
|
||||
inner.io_stopped(Some(e));
|
||||
Poll::Ready(())
|
||||
}
|
||||
Poll::Pending => {
|
||||
self.shutdown_filters();
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get write buffer
|
||||
pub fn with_write_buf<F>(&self, f: F) -> Poll<()>
|
||||
where
|
||||
F: FnOnce(&BytesVec) -> Poll<io::Result<usize>>,
|
||||
{
|
||||
let inner = &self.0 .0;
|
||||
let result = inner.buffer.with_write_destination(&self.0, |buf| {
|
||||
let Some(buf) =
|
||||
buf.and_then(|buf| if buf.is_empty() { None } else { Some(buf) })
|
||||
else {
|
||||
return Poll::Ready(Ok(0));
|
||||
};
|
||||
|
||||
match ready!(f(buf)) {
|
||||
Ok(0) => {
|
||||
log::trace!("{}: Disconnected during flush", self.tag());
|
||||
Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::WriteZero,
|
||||
"failed to write frame to transport",
|
||||
)))
|
||||
}
|
||||
Ok(n) => {
|
||||
if n == buf.len() {
|
||||
buf.clear();
|
||||
Poll::Ready(Ok(0))
|
||||
} else {
|
||||
buf.advance(n);
|
||||
Poll::Ready(Ok(buf.len()))
|
||||
}
|
||||
}
|
||||
Err(e) => Poll::Ready(Err(e)),
|
||||
}
|
||||
});
|
||||
|
||||
let mut flags = inner.flags.get();
|
||||
|
||||
let result = match result {
|
||||
Poll::Pending => {
|
||||
flags.remove(Flags::WR_PAUSED);
|
||||
Poll::Pending
|
||||
}
|
||||
Poll::Ready(Ok(0)) => {
|
||||
// all data has been written
|
||||
flags.insert(Flags::WR_PAUSED);
|
||||
|
||||
if flags.is_task_waiting_for_write() {
|
||||
flags.task_waiting_for_write_is_done();
|
||||
inner.write_task.wake();
|
||||
}
|
||||
|
||||
if flags.is_waiting_for_write() {
|
||||
flags.waiting_for_write_is_done();
|
||||
inner.dispatch_task.wake();
|
||||
}
|
||||
Poll::Ready(())
|
||||
}
|
||||
Poll::Ready(Ok(len)) => {
|
||||
// if write buffer is smaller than high watermark value, turn off back-pressure
|
||||
if flags.contains(Flags::BUF_W_BACKPRESSURE)
|
||||
&& len < inner.pool.get().write_params_high() << 1
|
||||
{
|
||||
flags.remove(Flags::BUF_W_BACKPRESSURE);
|
||||
inner.dispatch_task.wake();
|
||||
}
|
||||
Poll::Pending
|
||||
}
|
||||
Poll::Ready(Err(e)) => {
|
||||
self.0 .0.io_stopped(Some(e));
|
||||
Poll::Ready(())
|
||||
}
|
||||
};
|
||||
|
||||
inner.flags.set(flags);
|
||||
result
|
||||
}
|
||||
|
||||
fn shutdown_filters(&self) {
|
||||
let io = &self.0;
|
||||
let st = &self.0 .0;
|
||||
if st.flags.get().contains(Flags::IO_STOPPING_FILTERS) {
|
||||
let flags = st.flags.get();
|
||||
|
||||
if !flags.intersects(Flags::IO_STOPPED | Flags::IO_STOPPING) {
|
||||
let filter = io.filter();
|
||||
match filter.shutdown(io, &st.buffer, 0) {
|
||||
Ok(Poll::Ready(())) => {
|
||||
st.dispatch_task.wake();
|
||||
st.insert_flags(Flags::IO_STOPPING);
|
||||
}
|
||||
Ok(Poll::Pending) => {
|
||||
// check read buffer, if buffer is not consumed it is unlikely
|
||||
// that filter will properly complete shutdown
|
||||
if flags.contains(Flags::RD_PAUSED)
|
||||
|| flags.contains(Flags::BUF_R_FULL | Flags::BUF_R_READY)
|
||||
{
|
||||
st.dispatch_task.wake();
|
||||
st.insert_flags(Flags::IO_STOPPING);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
st.io_stopped(Some(err));
|
||||
}
|
||||
}
|
||||
if let Err(err) = filter.process_write_buf(io, &st.buffer, 0) {
|
||||
st.io_stopped(Some(err));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for IoContext {
|
||||
fn clone(&self) -> Self {
|
||||
Self(self.0.clone())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ where
|
|||
S: ServiceFactory<IoBoxed, C>,
|
||||
C: Clone,
|
||||
{
|
||||
chain_factory(fn_service(|io: Io<F>| Ready::Ok(IoBoxed::from(io))))
|
||||
chain_factory(fn_service(|io: Io<F>| Ready::Ok(io.boxed())))
|
||||
.map_init_err(|_| panic!())
|
||||
.and_then(srv)
|
||||
}
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
# Changes
|
||||
|
||||
## [0.1.4] - 2025-03-14
|
||||
|
||||
* Enable env_logger for test macro
|
||||
|
||||
## [0.1.2] - 2021-02-25
|
||||
|
||||
* Export runtime from ntex crate
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "ntex-macros"
|
||||
version = "0.1.3"
|
||||
version = "0.1.4"
|
||||
description = "ntex proc macros"
|
||||
readme = "README.md"
|
||||
authors = ["ntex contributors <team@ntex.rs>"]
|
||||
|
@ -18,4 +18,3 @@ proc-macro2 = "^1"
|
|||
[dev-dependencies]
|
||||
ntex = "2"
|
||||
futures = "0.3"
|
||||
env_logger = "0.11"
|
||||
|
|
|
@ -262,6 +262,7 @@ pub fn rt_test(_: TokenStream, item: TokenStream) -> TokenStream {
|
|||
quote! {
|
||||
#(#attrs)*
|
||||
fn #name() #ret {
|
||||
ntex::util::enable_test_logging();
|
||||
ntex::rt::System::new("test")
|
||||
.block_on(async { #body })
|
||||
}
|
||||
|
@ -271,6 +272,7 @@ pub fn rt_test(_: TokenStream, item: TokenStream) -> TokenStream {
|
|||
#[test]
|
||||
#(#attrs)*
|
||||
fn #name() #ret {
|
||||
ntex::util::enable_test_logging();
|
||||
ntex::rt::System::new("test")
|
||||
.block_on(async { #body })
|
||||
}
|
||||
|
|
|
@ -1,5 +1,55 @@
|
|||
# Changes
|
||||
|
||||
## [2.5.10] - 2025-03-28
|
||||
|
||||
* Better closed sockets handling
|
||||
|
||||
## [2.5.9] - 2025-03-27
|
||||
|
||||
* Handle closed sockets
|
||||
|
||||
## [2.5.8] - 2025-03-25
|
||||
|
||||
* Update neon runtime
|
||||
|
||||
## [2.5.7] - 2025-03-21
|
||||
|
||||
* Simplify neon poll impl
|
||||
|
||||
## [2.5.6] - 2025-03-20
|
||||
|
||||
* Redesign neon poll support
|
||||
|
||||
## [2.5.5] - 2025-03-17
|
||||
|
||||
* Add check for required io-uring opcodes
|
||||
|
||||
* Handle io-uring cancelation
|
||||
|
||||
## [2.5.4] - 2025-03-15
|
||||
|
||||
* Close FD in various case for poll driver
|
||||
|
||||
## [2.5.3] - 2025-03-14
|
||||
|
||||
* Fix operation cancelation handling for poll driver
|
||||
|
||||
## [2.5.2] - 2025-03-14
|
||||
|
||||
* Fix operation cancelation handling for io-uring driver
|
||||
|
||||
## [2.5.1] - 2025-03-14
|
||||
|
||||
* Fix socket connect for io-uring driver
|
||||
|
||||
## [2.5.0] - 2025-03-12
|
||||
|
||||
* Add neon runtime support
|
||||
|
||||
* Drop glommio support
|
||||
|
||||
* Drop async-std support
|
||||
|
||||
## [2.4.0] - 2024-09-25
|
||||
|
||||
* Update to glommio v0.9
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "ntex-net"
|
||||
version = "2.4.0"
|
||||
version = "2.5.10"
|
||||
authors = ["ntex contributors <team@ntex.rs>"]
|
||||
description = "ntexwork utils for ntex framework"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
|
@ -24,28 +24,36 @@ tokio = ["ntex-rt/tokio", "ntex-tokio"]
|
|||
# compio runtime
|
||||
compio = ["ntex-rt/compio", "ntex-compio"]
|
||||
|
||||
# glommio runtime
|
||||
glommio = ["ntex-rt/glommio", "ntex-glommio"]
|
||||
# neon runtime
|
||||
neon = ["ntex-rt/neon", "ntex-neon", "slab", "socket2"]
|
||||
|
||||
# async-std runtime
|
||||
async-std = ["ntex-rt/async-std", "ntex-async-std"]
|
||||
polling = ["ntex-neon/polling", "dep:polling", "socket2"]
|
||||
io-uring = ["ntex-neon/io-uring", "dep:io-uring", "socket2"]
|
||||
|
||||
[dependencies]
|
||||
ntex-service = "3.3"
|
||||
ntex-bytes = "0.1"
|
||||
ntex-http = "0.1"
|
||||
ntex-io = "2.8"
|
||||
ntex-rt = "0.4.21"
|
||||
ntex-io = "2.11.1"
|
||||
ntex-rt = "0.4.25"
|
||||
ntex-util = "2.5"
|
||||
|
||||
ntex-tokio = { version = "0.5.3", optional = true }
|
||||
ntex-compio = { version = "0.2.4", optional = true }
|
||||
ntex-glommio = { version = "0.5.2", optional = true }
|
||||
ntex-async-std = { version = "0.5.1", optional = true }
|
||||
ntex-neon = { version = "0.1.15", optional = true }
|
||||
|
||||
log = "0.4"
|
||||
thiserror = "1"
|
||||
bitflags = { workspace = true }
|
||||
cfg-if = { workspace = true }
|
||||
log = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
slab = { workspace = true, optional = true }
|
||||
socket2 = { workspace = true, optional = true, features = ["all"] }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
# Linux specific dependencies
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
io-uring = { workspace = true, optional = true }
|
||||
polling = { workspace = true, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
ntex = "2"
|
||||
env_logger = "0.11"
|
||||
|
|
|
@ -6,63 +6,18 @@ pub use ntex_tokio::{from_tcp_stream, tcp_connect, tcp_connect_in};
|
|||
#[cfg(all(unix, feature = "tokio"))]
|
||||
pub use ntex_tokio::{from_unix_stream, unix_connect, unix_connect_in};
|
||||
|
||||
#[cfg(all(
|
||||
feature = "compio",
|
||||
not(feature = "tokio"),
|
||||
not(feature = "async-std"),
|
||||
not(feature = "glommio")
|
||||
))]
|
||||
#[cfg(all(feature = "compio", not(feature = "tokio"), not(feature = "neon")))]
|
||||
pub use ntex_compio::{from_tcp_stream, tcp_connect, tcp_connect_in};
|
||||
|
||||
#[cfg(all(
|
||||
unix,
|
||||
feature = "compio",
|
||||
not(feature = "tokio"),
|
||||
not(feature = "async-std"),
|
||||
not(feature = "glommio")
|
||||
not(feature = "neon")
|
||||
))]
|
||||
pub use ntex_compio::{from_unix_stream, unix_connect, unix_connect_in};
|
||||
|
||||
#[cfg(all(
|
||||
feature = "async-std",
|
||||
not(feature = "tokio"),
|
||||
not(feature = "compio"),
|
||||
not(feature = "glommio")
|
||||
))]
|
||||
pub use ntex_async_std::{from_tcp_stream, tcp_connect, tcp_connect_in};
|
||||
|
||||
#[cfg(all(
|
||||
unix,
|
||||
feature = "async-std",
|
||||
not(feature = "tokio"),
|
||||
not(feature = "compio"),
|
||||
not(feature = "glommio")
|
||||
))]
|
||||
pub use ntex_async_std::{from_unix_stream, unix_connect, unix_connect_in};
|
||||
|
||||
#[cfg(all(
|
||||
feature = "glommio",
|
||||
not(feature = "tokio"),
|
||||
not(feature = "compio"),
|
||||
not(feature = "async-std")
|
||||
))]
|
||||
pub use ntex_glommio::{from_tcp_stream, tcp_connect, tcp_connect_in};
|
||||
|
||||
#[cfg(all(
|
||||
unix,
|
||||
feature = "glommio",
|
||||
not(feature = "tokio"),
|
||||
not(feature = "compio"),
|
||||
not(feature = "async-std")
|
||||
))]
|
||||
pub use ntex_glommio::{from_unix_stream, unix_connect, unix_connect_in};
|
||||
|
||||
#[cfg(all(
|
||||
not(feature = "tokio"),
|
||||
not(feature = "compio"),
|
||||
not(feature = "async-std"),
|
||||
not(feature = "glommio")
|
||||
))]
|
||||
#[cfg(all(not(feature = "tokio"), not(feature = "compio"), not(feature = "neon")))]
|
||||
mod no_rt {
|
||||
use ntex_io::Io;
|
||||
|
||||
|
@ -127,10 +82,5 @@ mod no_rt {
|
|||
}
|
||||
}
|
||||
|
||||
#[cfg(all(
|
||||
not(feature = "tokio"),
|
||||
not(feature = "compio"),
|
||||
not(feature = "async-std"),
|
||||
not(feature = "glommio")
|
||||
))]
|
||||
#[cfg(all(not(feature = "tokio"), not(feature = "compio"), not(feature = "neon")))]
|
||||
pub use no_rt::*;
|
||||
|
|
|
@ -197,7 +197,7 @@ impl<T: Address> Future for TcpConnectorResponse<T> {
|
|||
Poll::Ready(Ok(sock)) => {
|
||||
let req = this.req.take().unwrap();
|
||||
log::trace!(
|
||||
"{}: TCP connector - successfully connected to connecting to {:?} - {:?}",
|
||||
"{}: TCP connector - successfully connected to {:?} - {:?}",
|
||||
this.tag,
|
||||
req.host(),
|
||||
sock.query::<types::PeerAddr>().get()
|
||||
|
|
86
ntex-net/src/helpers.rs
Normal file
86
ntex-net/src/helpers.rs
Normal file
|
@ -0,0 +1,86 @@
|
|||
use std::{io, net::SocketAddr, os::fd::FromRawFd, path::Path};
|
||||
|
||||
use ntex_neon::syscall;
|
||||
use ntex_util::channel::oneshot::channel;
|
||||
use socket2::{Protocol, SockAddr, Socket, Type};
|
||||
|
||||
pub(crate) fn pool_io_err<T, E>(result: std::result::Result<T, E>) -> io::Result<T> {
|
||||
result.map_err(|_| io::Error::new(io::ErrorKind::Other, "Thread pool panic"))
|
||||
}
|
||||
|
||||
pub(crate) async fn connect(addr: SocketAddr) -> io::Result<Socket> {
|
||||
let addr = SockAddr::from(addr);
|
||||
let domain = addr.domain().into();
|
||||
connect_inner(addr, domain, Type::STREAM.into(), Protocol::TCP.into()).await
|
||||
}
|
||||
|
||||
pub(crate) async fn connect_unix(path: impl AsRef<Path>) -> io::Result<Socket> {
|
||||
let addr = SockAddr::unix(path)?;
|
||||
connect_inner(addr, socket2::Domain::UNIX.into(), Type::STREAM.into(), 0).await
|
||||
}
|
||||
|
||||
async fn connect_inner(
|
||||
addr: SockAddr,
|
||||
domain: i32,
|
||||
socket_type: i32,
|
||||
protocol: i32,
|
||||
) -> io::Result<Socket> {
|
||||
#[allow(unused_mut)]
|
||||
let mut ty = socket_type;
|
||||
#[cfg(any(
|
||||
target_os = "android",
|
||||
target_os = "dragonfly",
|
||||
target_os = "freebsd",
|
||||
target_os = "fuchsia",
|
||||
target_os = "hurd",
|
||||
target_os = "illumos",
|
||||
target_os = "linux",
|
||||
target_os = "netbsd",
|
||||
target_os = "openbsd",
|
||||
))]
|
||||
{
|
||||
ty |= libc::SOCK_CLOEXEC;
|
||||
}
|
||||
|
||||
let fd = ntex_rt::spawn_blocking(move || syscall!(libc::socket(domain, ty, protocol)))
|
||||
.await
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||
.and_then(pool_io_err)?;
|
||||
|
||||
let (sender, rx) = channel();
|
||||
|
||||
crate::rt_impl::connect::ConnectOps::current().connect(fd, addr, sender)?;
|
||||
|
||||
rx.await
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::Other, "IO Driver is gone"))
|
||||
.and_then(|item| item)?;
|
||||
|
||||
Ok(unsafe { Socket::from_raw_fd(fd) })
|
||||
}
|
||||
|
||||
pub(crate) fn prep_socket(sock: Socket) -> io::Result<Socket> {
|
||||
#[cfg(not(any(
|
||||
target_os = "android",
|
||||
target_os = "dragonfly",
|
||||
target_os = "freebsd",
|
||||
target_os = "fuchsia",
|
||||
target_os = "hurd",
|
||||
target_os = "illumos",
|
||||
target_os = "linux",
|
||||
target_os = "netbsd",
|
||||
target_os = "openbsd",
|
||||
target_os = "espidf",
|
||||
target_os = "vita",
|
||||
)))]
|
||||
sock.set_cloexec(true)?;
|
||||
#[cfg(any(
|
||||
target_os = "ios",
|
||||
target_os = "macos",
|
||||
target_os = "tvos",
|
||||
target_os = "watchos",
|
||||
))]
|
||||
sock.set_nosigpipe(true)?;
|
||||
sock.set_nonblocking(true)?;
|
||||
|
||||
Ok(sock)
|
||||
}
|
|
@ -1,5 +1,6 @@
|
|||
//! Utility for async runtime abstraction
|
||||
#![deny(rust_2018_idioms, unreachable_pub, missing_debug_implementations)]
|
||||
#![allow(unused_variables, dead_code)]
|
||||
|
||||
mod compat;
|
||||
pub mod connect;
|
||||
|
@ -7,4 +8,25 @@ pub mod connect;
|
|||
pub use ntex_io::Io;
|
||||
pub use ntex_rt::{spawn, spawn_blocking};
|
||||
|
||||
pub use self::compat::*;
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(all(feature = "neon", target_os = "linux", feature = "io-uring"))] {
|
||||
#[path = "rt_uring/mod.rs"]
|
||||
mod rt_impl;
|
||||
pub use self::rt_impl::{
|
||||
from_tcp_stream, from_unix_stream, tcp_connect, tcp_connect_in, unix_connect,
|
||||
unix_connect_in,
|
||||
};
|
||||
} else if #[cfg(all(unix, feature = "neon"))] {
|
||||
#[path = "rt_polling/mod.rs"]
|
||||
mod rt_impl;
|
||||
pub use self::rt_impl::{
|
||||
from_tcp_stream, from_unix_stream, tcp_connect, tcp_connect_in, unix_connect,
|
||||
unix_connect_in,
|
||||
};
|
||||
} else {
|
||||
pub use self::compat::*;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(unix, feature = "neon"))]
|
||||
mod helpers;
|
||||
|
|
111
ntex-net/src/rt_polling/connect.rs
Normal file
111
ntex-net/src/rt_polling/connect.rs
Normal file
|
@ -0,0 +1,111 @@
|
|||
use std::os::fd::{AsRawFd, RawFd};
|
||||
use std::{cell::RefCell, io, rc::Rc, task::Poll};
|
||||
|
||||
use ntex_neon::driver::{DriverApi, Event, Handler};
|
||||
use ntex_neon::{syscall, Runtime};
|
||||
use ntex_util::channel::oneshot::Sender;
|
||||
use slab::Slab;
|
||||
use socket2::SockAddr;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ConnectOps(Rc<ConnectOpsInner>);
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Change {
|
||||
Event(Event),
|
||||
Error(io::Error),
|
||||
}
|
||||
|
||||
struct ConnectOpsBatcher {
|
||||
inner: Rc<ConnectOpsInner>,
|
||||
}
|
||||
|
||||
struct Item {
|
||||
fd: RawFd,
|
||||
sender: Sender<io::Result<()>>,
|
||||
}
|
||||
|
||||
struct ConnectOpsInner {
|
||||
api: DriverApi,
|
||||
connects: RefCell<Slab<Item>>,
|
||||
}
|
||||
|
||||
impl ConnectOps {
|
||||
pub(crate) fn current() -> Self {
|
||||
Runtime::value(|rt| {
|
||||
let mut inner = None;
|
||||
rt.driver().register(|api| {
|
||||
let ops = Rc::new(ConnectOpsInner {
|
||||
api,
|
||||
connects: RefCell::new(Slab::new()),
|
||||
});
|
||||
inner = Some(ops.clone());
|
||||
Box::new(ConnectOpsBatcher { inner: ops })
|
||||
});
|
||||
|
||||
ConnectOps(inner.unwrap())
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn connect(
|
||||
&self,
|
||||
fd: RawFd,
|
||||
addr: SockAddr,
|
||||
sender: Sender<io::Result<()>>,
|
||||
) -> io::Result<usize> {
|
||||
let result = syscall!(break libc::connect(fd, addr.as_ptr(), addr.len()));
|
||||
|
||||
if let Poll::Ready(res) = result {
|
||||
res?;
|
||||
}
|
||||
|
||||
let item = Item { fd, sender };
|
||||
let id = self.0.connects.borrow_mut().insert(item);
|
||||
|
||||
self.0.api.attach(fd, id as u32, Some(Event::writable(0)));
|
||||
Ok(id)
|
||||
}
|
||||
}
|
||||
|
||||
impl Handler for ConnectOpsBatcher {
|
||||
fn event(&mut self, id: usize, event: Event) {
|
||||
log::debug!("connect-fd is readable {:?}", id);
|
||||
|
||||
let mut connects = self.inner.connects.borrow_mut();
|
||||
|
||||
if connects.contains(id) {
|
||||
let item = connects.remove(id);
|
||||
if event.writable {
|
||||
let mut err: libc::c_int = 0;
|
||||
let mut err_len = std::mem::size_of::<libc::c_int>() as libc::socklen_t;
|
||||
|
||||
let res = syscall!(libc::getsockopt(
|
||||
item.fd.as_raw_fd(),
|
||||
libc::SOL_SOCKET,
|
||||
libc::SO_ERROR,
|
||||
&mut err as *mut _ as *mut _,
|
||||
&mut err_len
|
||||
));
|
||||
|
||||
let res = if err == 0 {
|
||||
res.map(|_| ())
|
||||
} else {
|
||||
Err(io::Error::from_raw_os_error(err))
|
||||
};
|
||||
|
||||
self.inner.api.detach(item.fd, id as u32);
|
||||
let _ = item.sender.send(res);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn error(&mut self, id: usize, err: io::Error) {
|
||||
let mut connects = self.inner.connects.borrow_mut();
|
||||
|
||||
if connects.contains(id) {
|
||||
let item = connects.remove(id);
|
||||
let _ = item.sender.send(Err(err));
|
||||
self.inner.api.detach(item.fd, id as u32);
|
||||
}
|
||||
}
|
||||
}
|
368
ntex-net/src/rt_polling/driver.rs
Normal file
368
ntex-net/src/rt_polling/driver.rs
Normal file
|
@ -0,0 +1,368 @@
|
|||
use std::os::fd::{AsRawFd, RawFd};
|
||||
use std::{cell::Cell, cell::RefCell, future::Future, io, mem, rc::Rc, task, task::Poll};
|
||||
|
||||
use ntex_neon::driver::{DriverApi, Event, Handler};
|
||||
use ntex_neon::{syscall, Runtime};
|
||||
use slab::Slab;
|
||||
|
||||
use ntex_bytes::BufMut;
|
||||
use ntex_io::IoContext;
|
||||
|
||||
pub(crate) struct StreamCtl<T> {
|
||||
id: u32,
|
||||
inner: Rc<StreamOpsInner<T>>,
|
||||
}
|
||||
|
||||
bitflags::bitflags! {
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
struct Flags: u8 {
|
||||
const RD = 0b0000_0001;
|
||||
const WR = 0b0000_0010;
|
||||
}
|
||||
}
|
||||
|
||||
struct StreamItem<T> {
|
||||
io: Option<T>,
|
||||
fd: RawFd,
|
||||
flags: Flags,
|
||||
ref_count: u16,
|
||||
context: IoContext,
|
||||
}
|
||||
|
||||
pub(crate) struct StreamOps<T>(Rc<StreamOpsInner<T>>);
|
||||
|
||||
struct StreamOpsHandler<T> {
|
||||
inner: Rc<StreamOpsInner<T>>,
|
||||
}
|
||||
|
||||
struct StreamOpsInner<T> {
|
||||
api: DriverApi,
|
||||
delayd_drop: Cell<bool>,
|
||||
feed: RefCell<Vec<u32>>,
|
||||
streams: Cell<Option<Box<Slab<StreamItem<T>>>>>,
|
||||
}
|
||||
|
||||
impl<T> StreamItem<T> {
|
||||
fn tag(&self) -> &'static str {
|
||||
self.context.tag()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsRawFd + 'static> StreamOps<T> {
|
||||
pub(crate) fn current() -> Self {
|
||||
Runtime::value(|rt| {
|
||||
let mut inner = None;
|
||||
rt.driver().register(|api| {
|
||||
let ops = Rc::new(StreamOpsInner {
|
||||
api,
|
||||
feed: RefCell::new(Vec::new()),
|
||||
delayd_drop: Cell::new(false),
|
||||
streams: Cell::new(Some(Box::new(Slab::new()))),
|
||||
});
|
||||
inner = Some(ops.clone());
|
||||
Box::new(StreamOpsHandler { inner: ops })
|
||||
});
|
||||
|
||||
StreamOps(inner.unwrap())
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn register(&self, io: T, context: IoContext) -> StreamCtl<T> {
|
||||
let fd = io.as_raw_fd();
|
||||
let stream = self.0.with(move |streams| {
|
||||
let item = StreamItem {
|
||||
fd,
|
||||
context,
|
||||
io: Some(io),
|
||||
ref_count: 1,
|
||||
flags: Flags::empty(),
|
||||
};
|
||||
StreamCtl {
|
||||
id: streams.insert(item) as u32,
|
||||
inner: self.0.clone(),
|
||||
}
|
||||
});
|
||||
|
||||
self.0.api.attach(
|
||||
fd,
|
||||
stream.id,
|
||||
Some(Event::new(0, false, false).with_interrupt()),
|
||||
);
|
||||
stream
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for StreamOps<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self(self.0.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Handler for StreamOpsHandler<T> {
|
||||
fn event(&mut self, id: usize, ev: Event) {
|
||||
self.inner.with(|streams| {
|
||||
if !streams.contains(id) {
|
||||
return;
|
||||
}
|
||||
let item = &mut streams[id];
|
||||
if item.io.is_none() {
|
||||
return;
|
||||
}
|
||||
log::debug!("{}: FD event {:?} event: {:?}", item.tag(), id, ev);
|
||||
|
||||
// handle HUP
|
||||
if ev.is_interrupt() {
|
||||
item.context.stopped(None);
|
||||
close(id as u32, item, &self.inner.api, None, true);
|
||||
return;
|
||||
}
|
||||
|
||||
let mut renew_ev = Event::new(0, false, false).with_interrupt();
|
||||
|
||||
if ev.readable {
|
||||
let res = item.context.with_read_buf(|buf| {
|
||||
let chunk = buf.chunk_mut();
|
||||
let result = task::ready!(syscall!(
|
||||
break libc::read(item.fd, chunk.as_mut_ptr() as _, chunk.len())
|
||||
));
|
||||
if let Ok(size) = result {
|
||||
log::debug!("{}: data {:?}, s: {:?}", item.tag(), item.fd, size);
|
||||
unsafe { buf.advance_mut(size) };
|
||||
}
|
||||
Poll::Ready(result)
|
||||
});
|
||||
|
||||
if res.is_pending() && item.context.is_read_ready() {
|
||||
renew_ev.readable = true;
|
||||
item.flags.insert(Flags::RD);
|
||||
} else {
|
||||
item.flags.remove(Flags::RD);
|
||||
}
|
||||
} else if item.flags.contains(Flags::RD) {
|
||||
renew_ev.readable = true;
|
||||
}
|
||||
|
||||
if ev.writable {
|
||||
let result = item.context.with_write_buf(|buf| {
|
||||
log::debug!("{}: write {:?} s: {:?}", item.tag(), item.fd, buf.len());
|
||||
syscall!(break libc::write(item.fd, buf[..].as_ptr() as _, buf.len()))
|
||||
});
|
||||
if result.is_pending() {
|
||||
renew_ev.writable = true;
|
||||
item.flags.insert(Flags::WR);
|
||||
} else {
|
||||
item.flags.remove(Flags::WR);
|
||||
}
|
||||
} else if item.flags.contains(Flags::WR) {
|
||||
renew_ev.writable = true;
|
||||
}
|
||||
|
||||
self.inner.api.modify(item.fd, id as u32, renew_ev);
|
||||
|
||||
// delayed drops
|
||||
if self.inner.delayd_drop.get() {
|
||||
for id in self.inner.feed.borrow_mut().drain(..) {
|
||||
let item = &mut streams[id as usize];
|
||||
item.ref_count -= 1;
|
||||
if item.ref_count == 0 {
|
||||
let mut item = streams.remove(id as usize);
|
||||
log::debug!(
|
||||
"{}: Drop ({}), {:?}, has-io: {}",
|
||||
item.tag(),
|
||||
id,
|
||||
item.fd,
|
||||
item.io.is_some()
|
||||
);
|
||||
close(id, &mut item, &self.inner.api, None, true);
|
||||
}
|
||||
}
|
||||
self.inner.delayd_drop.set(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn error(&mut self, id: usize, err: io::Error) {
|
||||
self.inner.with(|streams| {
|
||||
if let Some(item) = streams.get_mut(id) {
|
||||
log::debug!(
|
||||
"{}: FD is failed ({}) {:?}, err: {:?}",
|
||||
item.tag(),
|
||||
id,
|
||||
item.fd,
|
||||
err
|
||||
);
|
||||
close(id as u32, item, &self.inner.api, Some(err), false);
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> StreamOpsInner<T> {
|
||||
fn with<F, R>(&self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut Slab<StreamItem<T>>) -> R,
|
||||
{
|
||||
let mut streams = self.streams.take().unwrap();
|
||||
let result = f(&mut streams);
|
||||
self.streams.set(Some(streams));
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
fn close<T>(
|
||||
id: u32,
|
||||
item: &mut StreamItem<T>,
|
||||
api: &DriverApi,
|
||||
error: Option<io::Error>,
|
||||
shutdown: bool,
|
||||
) -> Option<ntex_rt::JoinHandle<io::Result<i32>>> {
|
||||
if let Some(io) = item.io.take() {
|
||||
log::debug!("{}: Closing ({}), {:?}", item.tag(), id, item.fd);
|
||||
mem::forget(io);
|
||||
if let Some(err) = error {
|
||||
item.context.stopped(Some(err));
|
||||
}
|
||||
let fd = item.fd;
|
||||
api.detach(fd, id);
|
||||
Some(ntex_rt::spawn_blocking(move || {
|
||||
if shutdown {
|
||||
let _ = syscall!(libc::shutdown(fd, libc::SHUT_RDWR));
|
||||
}
|
||||
syscall!(libc::close(fd))
|
||||
}))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> StreamCtl<T> {
|
||||
pub(crate) fn close(self) -> impl Future<Output = io::Result<()>> {
|
||||
let id = self.id as usize;
|
||||
let fut = self.inner.with(|streams| {
|
||||
let item = &mut streams[id];
|
||||
close(self.id, item, &self.inner.api, None, false)
|
||||
});
|
||||
async move {
|
||||
if let Some(fut) = fut {
|
||||
fut.await
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||
.and_then(crate::helpers::pool_io_err)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn with_io<F, R>(&self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(Option<&T>) -> R,
|
||||
{
|
||||
self.inner
|
||||
.with(|streams| f(streams[self.id as usize].io.as_ref()))
|
||||
}
|
||||
|
||||
pub(crate) fn modify(&self, rd: bool, wr: bool) {
|
||||
self.inner.with(|streams| {
|
||||
let item = &mut streams[self.id as usize];
|
||||
|
||||
log::debug!(
|
||||
"{}: Modify interest ({}), {:?} rd: {:?}, wr: {:?}",
|
||||
item.tag(),
|
||||
self.id,
|
||||
item.fd,
|
||||
rd,
|
||||
wr
|
||||
);
|
||||
|
||||
let mut event = Event::new(0, false, false).with_interrupt();
|
||||
|
||||
if rd {
|
||||
if item.flags.contains(Flags::RD) {
|
||||
event.readable = true;
|
||||
} else {
|
||||
let res = item.context.with_read_buf(|buf| {
|
||||
let chunk = buf.chunk_mut();
|
||||
let result = task::ready!(syscall!(
|
||||
break libc::read(item.fd, chunk.as_mut_ptr() as _, chunk.len())
|
||||
));
|
||||
if let Ok(size) = result {
|
||||
log::debug!(
|
||||
"{}: read {:?}, s: {:?}",
|
||||
item.tag(),
|
||||
item.fd,
|
||||
size
|
||||
);
|
||||
unsafe { buf.advance_mut(size) };
|
||||
}
|
||||
Poll::Ready(result)
|
||||
});
|
||||
|
||||
if res.is_pending() && item.context.is_read_ready() {
|
||||
event.readable = true;
|
||||
item.flags.insert(Flags::RD);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if wr {
|
||||
if item.flags.contains(Flags::WR) {
|
||||
event.writable = true;
|
||||
} else {
|
||||
let result = item.context.with_write_buf(|buf| {
|
||||
log::debug!(
|
||||
"{}: Writing ({}), buf: {:?}",
|
||||
item.tag(),
|
||||
self.id,
|
||||
buf.len()
|
||||
);
|
||||
syscall!(
|
||||
break libc::write(item.fd, buf[..].as_ptr() as _, buf.len())
|
||||
)
|
||||
});
|
||||
|
||||
if result.is_pending() {
|
||||
event.writable = true;
|
||||
item.flags.insert(Flags::WR);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.inner.api.modify(item.fd, self.id, event);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for StreamCtl<T> {
|
||||
fn clone(&self) -> Self {
|
||||
self.inner.with(|streams| {
|
||||
streams[self.id as usize].ref_count += 1;
|
||||
Self {
|
||||
id: self.id,
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for StreamCtl<T> {
|
||||
fn drop(&mut self) {
|
||||
if let Some(mut streams) = self.inner.streams.take() {
|
||||
let id = self.id as usize;
|
||||
streams[id].ref_count -= 1;
|
||||
if streams[id].ref_count == 0 {
|
||||
let mut item = streams.remove(id);
|
||||
log::debug!(
|
||||
"{}: Drop io ({}), {:?}, has-io: {}",
|
||||
item.tag(),
|
||||
self.id,
|
||||
item.fd,
|
||||
item.io.is_some()
|
||||
);
|
||||
close(self.id, &mut item, &self.inner.api, None, true);
|
||||
}
|
||||
self.inner.streams.set(Some(streams));
|
||||
} else {
|
||||
self.inner.delayd_drop.set(true);
|
||||
self.inner.feed.borrow_mut().push(self.id);
|
||||
}
|
||||
}
|
||||
}
|
101
ntex-net/src/rt_polling/io.rs
Normal file
101
ntex-net/src/rt_polling/io.rs
Normal file
|
@ -0,0 +1,101 @@
|
|||
use std::{any, future::poll_fn, task::Poll};
|
||||
|
||||
use ntex_io::{
|
||||
types, Handle, IoContext, IoStream, ReadContext, ReadStatus, WriteContext, WriteStatus,
|
||||
};
|
||||
use ntex_rt::spawn;
|
||||
use socket2::Socket;
|
||||
|
||||
use super::driver::{StreamCtl, StreamOps};
|
||||
|
||||
impl IoStream for super::TcpStream {
|
||||
fn start(self, read: ReadContext, _: WriteContext) -> Option<Box<dyn Handle>> {
|
||||
let io = self.0;
|
||||
let context = read.context();
|
||||
let ctl = StreamOps::current().register(io, context.clone());
|
||||
let ctl2 = ctl.clone();
|
||||
spawn(async move { run(ctl, context).await });
|
||||
|
||||
Some(Box::new(HandleWrapper(ctl2)))
|
||||
}
|
||||
}
|
||||
|
||||
impl IoStream for super::UnixStream {
|
||||
fn start(self, read: ReadContext, _: WriteContext) -> Option<Box<dyn Handle>> {
|
||||
let io = self.0;
|
||||
let context = read.context();
|
||||
let ctl = StreamOps::current().register(io, context.clone());
|
||||
spawn(async move { run(ctl, context).await });
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
struct HandleWrapper(StreamCtl<Socket>);
|
||||
|
||||
impl Handle for HandleWrapper {
|
||||
fn query(&self, id: any::TypeId) -> Option<Box<dyn any::Any>> {
|
||||
if id == any::TypeId::of::<types::PeerAddr>() {
|
||||
let addr = self.0.with_io(|io| io.and_then(|io| io.peer_addr().ok()));
|
||||
if let Some(addr) = addr.and_then(|addr| addr.as_socket()) {
|
||||
return Some(Box::new(types::PeerAddr(addr)));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
enum Status {
|
||||
Shutdown,
|
||||
Terminate,
|
||||
}
|
||||
|
||||
async fn run<T>(ctl: StreamCtl<T>, context: IoContext) {
|
||||
// Handle io read readiness
|
||||
let st = poll_fn(|cx| {
|
||||
let mut modify = false;
|
||||
let mut readable = false;
|
||||
let mut writable = false;
|
||||
let read = match context.poll_read_ready(cx) {
|
||||
Poll::Ready(ReadStatus::Ready) => {
|
||||
modify = true;
|
||||
readable = true;
|
||||
Poll::Pending
|
||||
}
|
||||
Poll::Ready(ReadStatus::Terminate) => Poll::Ready(()),
|
||||
Poll::Pending => {
|
||||
modify = true;
|
||||
Poll::Pending
|
||||
}
|
||||
};
|
||||
|
||||
let write = match context.poll_write_ready(cx) {
|
||||
Poll::Ready(WriteStatus::Ready) => {
|
||||
modify = true;
|
||||
writable = true;
|
||||
Poll::Pending
|
||||
}
|
||||
Poll::Ready(WriteStatus::Shutdown) => Poll::Ready(Status::Shutdown),
|
||||
Poll::Ready(WriteStatus::Terminate) => Poll::Ready(Status::Terminate),
|
||||
Poll::Pending => Poll::Pending,
|
||||
};
|
||||
|
||||
if modify {
|
||||
ctl.modify(readable, writable);
|
||||
}
|
||||
|
||||
if read.is_pending() && write.is_pending() {
|
||||
Poll::Pending
|
||||
} else if write.is_ready() {
|
||||
write
|
||||
} else {
|
||||
Poll::Ready(Status::Terminate)
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
ctl.modify(false, true);
|
||||
context.shutdown(st == Status::Shutdown).await;
|
||||
context.stopped(ctl.close().await.err());
|
||||
}
|
69
ntex-net/src/rt_polling/mod.rs
Normal file
69
ntex-net/src/rt_polling/mod.rs
Normal file
|
@ -0,0 +1,69 @@
|
|||
use std::{io::Result, net, net::SocketAddr};
|
||||
|
||||
use ntex_bytes::PoolRef;
|
||||
use ntex_io::Io;
|
||||
use socket2::Socket;
|
||||
|
||||
pub(crate) mod connect;
|
||||
mod driver;
|
||||
mod io;
|
||||
|
||||
#[cfg(not(target_pointer_width = "64"))]
|
||||
compile_error!("Only 64bit platforms are supported");
|
||||
|
||||
/// Tcp stream wrapper for neon TcpStream
|
||||
struct TcpStream(socket2::Socket);
|
||||
|
||||
/// Tcp stream wrapper for neon UnixStream
|
||||
struct UnixStream(socket2::Socket);
|
||||
|
||||
/// Opens a TCP connection to a remote host.
|
||||
pub async fn tcp_connect(addr: SocketAddr) -> Result<Io> {
|
||||
let sock = crate::helpers::connect(addr).await?;
|
||||
Ok(Io::new(TcpStream(crate::helpers::prep_socket(sock)?)))
|
||||
}
|
||||
|
||||
/// Opens a TCP connection to a remote host and use specified memory pool.
|
||||
pub async fn tcp_connect_in(addr: SocketAddr, pool: PoolRef) -> Result<Io> {
|
||||
let sock = crate::helpers::connect(addr).await?;
|
||||
Ok(Io::with_memory_pool(
|
||||
TcpStream(crate::helpers::prep_socket(sock)?),
|
||||
pool,
|
||||
))
|
||||
}
|
||||
|
||||
/// Opens a unix stream connection.
|
||||
pub async fn unix_connect<'a, P>(addr: P) -> Result<Io>
|
||||
where
|
||||
P: AsRef<std::path::Path> + 'a,
|
||||
{
|
||||
let sock = crate::helpers::connect_unix(addr).await?;
|
||||
Ok(Io::new(UnixStream(crate::helpers::prep_socket(sock)?)))
|
||||
}
|
||||
|
||||
/// Opens a unix stream connection and specified memory pool.
|
||||
pub async fn unix_connect_in<'a, P>(addr: P, pool: PoolRef) -> Result<Io>
|
||||
where
|
||||
P: AsRef<std::path::Path> + 'a,
|
||||
{
|
||||
let sock = crate::helpers::connect_unix(addr).await?;
|
||||
Ok(Io::with_memory_pool(
|
||||
UnixStream(crate::helpers::prep_socket(sock)?),
|
||||
pool,
|
||||
))
|
||||
}
|
||||
|
||||
/// Convert std TcpStream to TcpStream
|
||||
pub fn from_tcp_stream(stream: net::TcpStream) -> Result<Io> {
|
||||
stream.set_nodelay(true)?;
|
||||
Ok(Io::new(TcpStream(crate::helpers::prep_socket(
|
||||
Socket::from(stream),
|
||||
)?)))
|
||||
}
|
||||
|
||||
/// Convert std UnixStream to UnixStream
|
||||
pub fn from_unix_stream(stream: std::os::unix::net::UnixStream) -> Result<Io> {
|
||||
Ok(Io::new(UnixStream(crate::helpers::prep_socket(
|
||||
Socket::from(stream),
|
||||
)?)))
|
||||
}
|
91
ntex-net/src/rt_uring/connect.rs
Normal file
91
ntex-net/src/rt_uring/connect.rs
Normal file
|
@ -0,0 +1,91 @@
|
|||
use std::{cell::RefCell, io, os::fd::RawFd, rc::Rc};
|
||||
|
||||
use io_uring::{opcode, types::Fd};
|
||||
use ntex_neon::{driver::DriverApi, driver::Handler, Runtime};
|
||||
use ntex_util::channel::oneshot::Sender;
|
||||
use slab::Slab;
|
||||
use socket2::SockAddr;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ConnectOps(Rc<ConnectOpsInner>);
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Change {
|
||||
Readable,
|
||||
Writable,
|
||||
Error(io::Error),
|
||||
}
|
||||
|
||||
struct ConnectOpsHandler {
|
||||
inner: Rc<ConnectOpsInner>,
|
||||
}
|
||||
|
||||
type Operations = RefCell<Slab<(Box<SockAddr>, Sender<io::Result<()>>)>>;
|
||||
|
||||
struct ConnectOpsInner {
|
||||
api: DriverApi,
|
||||
ops: Operations,
|
||||
}
|
||||
|
||||
impl ConnectOps {
|
||||
pub(crate) fn current() -> Self {
|
||||
Runtime::value(|rt| {
|
||||
let mut inner = None;
|
||||
rt.driver().register(|api| {
|
||||
if !api.is_supported(opcode::Connect::CODE) {
|
||||
panic!("opcode::Connect is required for io-uring support");
|
||||
}
|
||||
|
||||
let ops = Rc::new(ConnectOpsInner {
|
||||
api,
|
||||
ops: RefCell::new(Slab::new()),
|
||||
});
|
||||
inner = Some(ops.clone());
|
||||
Box::new(ConnectOpsHandler { inner: ops })
|
||||
});
|
||||
ConnectOps(inner.unwrap())
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn connect(
|
||||
&self,
|
||||
fd: RawFd,
|
||||
addr: SockAddr,
|
||||
sender: Sender<io::Result<()>>,
|
||||
) -> io::Result<()> {
|
||||
let addr2 = addr.clone();
|
||||
let mut ops = self.0.ops.borrow_mut();
|
||||
|
||||
// addr must be stable, neon submits ops at the end of rt turn
|
||||
let addr = Box::new(addr);
|
||||
let (addr_ptr, addr_len) = (addr.as_ref().as_ptr(), addr.len());
|
||||
|
||||
let id = ops.insert((addr, sender));
|
||||
self.0.api.submit(
|
||||
id as u32,
|
||||
opcode::Connect::new(Fd(fd), addr_ptr, addr_len).build(),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Handler for ConnectOpsHandler {
|
||||
fn canceled(&mut self, user_data: usize) {
|
||||
log::debug!("connect-op is canceled {:?}", user_data);
|
||||
|
||||
self.inner.ops.borrow_mut().remove(user_data);
|
||||
}
|
||||
|
||||
fn completed(&mut self, user_data: usize, flags: u32, result: io::Result<i32>) {
|
||||
let (addr, tx) = self.inner.ops.borrow_mut().remove(user_data);
|
||||
log::debug!(
|
||||
"connect-op is completed {:?} result: {:?}, addr: {:?}",
|
||||
user_data,
|
||||
result,
|
||||
addr.as_socket()
|
||||
);
|
||||
|
||||
let _ = tx.send(result.map(|_| ()));
|
||||
}
|
||||
}
|
444
ntex-net/src/rt_uring/driver.rs
Normal file
444
ntex-net/src/rt_uring/driver.rs
Normal file
|
@ -0,0 +1,444 @@
|
|||
use std::{cell::RefCell, io, mem, num::NonZeroU32, os, rc::Rc, task::Poll};
|
||||
|
||||
use io_uring::{opcode, squeue::Entry, types::Fd};
|
||||
use ntex_neon::{driver::DriverApi, driver::Handler, Runtime};
|
||||
use ntex_util::channel::oneshot;
|
||||
use slab::Slab;
|
||||
|
||||
use ntex_bytes::{Buf, BufMut, BytesVec};
|
||||
use ntex_io::IoContext;
|
||||
|
||||
pub(crate) struct StreamCtl<T> {
|
||||
id: usize,
|
||||
inner: Rc<StreamOpsInner<T>>,
|
||||
}
|
||||
|
||||
bitflags::bitflags! {
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
struct Flags: u8 {
|
||||
const RD_CANCELING = 0b0000_0001;
|
||||
const RD_REISSUE = 0b0000_0010;
|
||||
const WR_CANCELING = 0b0001_0000;
|
||||
const WR_REISSUE = 0b0010_0000;
|
||||
}
|
||||
}
|
||||
|
||||
struct StreamItem<T> {
|
||||
io: Option<T>,
|
||||
fd: Fd,
|
||||
context: IoContext,
|
||||
ref_count: usize,
|
||||
flags: Flags,
|
||||
rd_op: Option<NonZeroU32>,
|
||||
wr_op: Option<NonZeroU32>,
|
||||
}
|
||||
|
||||
impl<T> StreamItem<T> {
|
||||
fn tag(&self) -> &'static str {
|
||||
self.context.tag()
|
||||
}
|
||||
}
|
||||
|
||||
enum Operation {
|
||||
Recv {
|
||||
id: usize,
|
||||
buf: BytesVec,
|
||||
context: IoContext,
|
||||
},
|
||||
Send {
|
||||
id: usize,
|
||||
buf: BytesVec,
|
||||
context: IoContext,
|
||||
},
|
||||
Close {
|
||||
tx: Option<oneshot::Sender<io::Result<i32>>>,
|
||||
},
|
||||
Nop,
|
||||
}
|
||||
|
||||
pub(crate) struct StreamOps<T>(Rc<StreamOpsInner<T>>);
|
||||
|
||||
struct StreamOpsHandler<T> {
|
||||
inner: Rc<StreamOpsInner<T>>,
|
||||
}
|
||||
|
||||
struct StreamOpsInner<T> {
|
||||
api: DriverApi,
|
||||
feed: RefCell<Vec<usize>>,
|
||||
storage: RefCell<StreamOpsStorage<T>>,
|
||||
}
|
||||
|
||||
struct StreamOpsStorage<T> {
|
||||
ops: Slab<Operation>,
|
||||
streams: Slab<StreamItem<T>>,
|
||||
}
|
||||
|
||||
impl<T: os::fd::AsRawFd + 'static> StreamOps<T> {
|
||||
pub(crate) fn current() -> Self {
|
||||
Runtime::value(|rt| {
|
||||
let mut inner = None;
|
||||
rt.driver().register(|api| {
|
||||
if !api.is_supported(opcode::Recv::CODE) {
|
||||
panic!("opcode::Recv is required for io-uring support");
|
||||
}
|
||||
if !api.is_supported(opcode::Send::CODE) {
|
||||
panic!("opcode::Send is required for io-uring support");
|
||||
}
|
||||
if !api.is_supported(opcode::Close::CODE) {
|
||||
panic!("opcode::Close is required for io-uring support");
|
||||
}
|
||||
|
||||
let mut ops = Slab::new();
|
||||
ops.insert(Operation::Nop);
|
||||
|
||||
let ops = Rc::new(StreamOpsInner {
|
||||
api,
|
||||
feed: RefCell::new(Vec::new()),
|
||||
storage: RefCell::new(StreamOpsStorage {
|
||||
ops,
|
||||
streams: Slab::new(),
|
||||
}),
|
||||
});
|
||||
inner = Some(ops.clone());
|
||||
Box::new(StreamOpsHandler { inner: ops })
|
||||
});
|
||||
|
||||
StreamOps(inner.unwrap())
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn register(&self, io: T, context: IoContext) -> StreamCtl<T> {
|
||||
let item = StreamItem {
|
||||
context,
|
||||
fd: Fd(io.as_raw_fd()),
|
||||
io: Some(io),
|
||||
ref_count: 1,
|
||||
rd_op: None,
|
||||
wr_op: None,
|
||||
flags: Flags::empty(),
|
||||
};
|
||||
let id = self.0.storage.borrow_mut().streams.insert(item);
|
||||
StreamCtl {
|
||||
id,
|
||||
inner: self.0.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn with<F, R>(&self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut StreamOpsStorage<T>) -> R,
|
||||
{
|
||||
f(&mut *self.0.storage.borrow_mut())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for StreamOps<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self(self.0.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Handler for StreamOpsHandler<T> {
|
||||
fn canceled(&mut self, user_data: usize) {
|
||||
let mut storage = self.inner.storage.borrow_mut();
|
||||
|
||||
match storage.ops.remove(user_data) {
|
||||
Operation::Recv { id, buf, context } => {
|
||||
log::debug!("{}: Recv canceled {:?}", context.tag(), id);
|
||||
context.release_read_buf(buf);
|
||||
if let Some(item) = storage.streams.get_mut(id) {
|
||||
item.rd_op.take();
|
||||
item.flags.remove(Flags::RD_CANCELING);
|
||||
if item.flags.contains(Flags::RD_REISSUE) {
|
||||
item.flags.remove(Flags::RD_REISSUE);
|
||||
|
||||
let result = storage.recv(id, Some(context));
|
||||
if let Some((id, op)) = result {
|
||||
self.inner.api.submit(id, op);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Operation::Send { id, buf, context } => {
|
||||
log::debug!("{}: Send canceled: {:?}", context.tag(), id);
|
||||
context.release_write_buf(buf);
|
||||
if let Some(item) = storage.streams.get_mut(id) {
|
||||
item.wr_op.take();
|
||||
item.flags.remove(Flags::WR_CANCELING);
|
||||
if item.flags.contains(Flags::WR_REISSUE) {
|
||||
item.flags.remove(Flags::WR_REISSUE);
|
||||
|
||||
let result = storage.send(id, Some(context));
|
||||
if let Some((id, op)) = result {
|
||||
self.inner.api.submit(id, op);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Operation::Nop | Operation::Close { .. } => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn completed(&mut self, user_data: usize, flags: u32, result: io::Result<i32>) {
|
||||
let mut storage = self.inner.storage.borrow_mut();
|
||||
|
||||
let op = storage.ops.remove(user_data);
|
||||
match op {
|
||||
Operation::Recv {
|
||||
id,
|
||||
mut buf,
|
||||
context,
|
||||
} => {
|
||||
let result = result.map(|size| {
|
||||
unsafe { buf.advance_mut(size as usize) };
|
||||
size as usize
|
||||
});
|
||||
|
||||
// reset op reference
|
||||
if let Some(item) = storage.streams.get_mut(id) {
|
||||
log::debug!(
|
||||
"{}: Recv completed {:?}, res: {:?}, buf({})",
|
||||
context.tag(),
|
||||
item.fd,
|
||||
result,
|
||||
buf.remaining_mut()
|
||||
);
|
||||
item.rd_op.take();
|
||||
}
|
||||
|
||||
// set read buf
|
||||
let tag = context.tag();
|
||||
if context.set_read_buf(result, buf).is_pending() {
|
||||
if let Some((id, op)) = storage.recv(id, Some(context)) {
|
||||
self.inner.api.submit(id, op);
|
||||
}
|
||||
} else {
|
||||
log::debug!("{}: Recv to pause", tag);
|
||||
}
|
||||
}
|
||||
Operation::Send { id, buf, context } => {
|
||||
// reset op reference
|
||||
let fd = if let Some(item) = storage.streams.get_mut(id) {
|
||||
log::debug!(
|
||||
"{}: Send completed: {:?}, res: {:?}, buf({})",
|
||||
context.tag(),
|
||||
item.fd,
|
||||
result,
|
||||
buf.len()
|
||||
);
|
||||
item.wr_op.take();
|
||||
Some(item.fd)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// set read buf
|
||||
let result = context.set_write_buf(result.map(|size| size as usize), buf);
|
||||
if result.is_pending() {
|
||||
log::debug!("{}: Need to send more: {:?}", context.tag(), fd);
|
||||
if let Some((id, op)) = storage.send(id, Some(context)) {
|
||||
self.inner.api.submit(id, op);
|
||||
}
|
||||
}
|
||||
}
|
||||
Operation::Close { tx } => {
|
||||
if let Some(tx) = tx {
|
||||
let _ = tx.send(result);
|
||||
}
|
||||
}
|
||||
Operation::Nop => {}
|
||||
}
|
||||
|
||||
// extra
|
||||
for id in self.inner.feed.borrow_mut().drain(..) {
|
||||
storage.streams[id].ref_count -= 1;
|
||||
if storage.streams[id].ref_count == 0 {
|
||||
let mut item = storage.streams.remove(id);
|
||||
|
||||
log::debug!("{}: Drop io ({}), {:?}", item.tag(), id, item.fd);
|
||||
|
||||
if let Some(io) = item.io.take() {
|
||||
mem::forget(io);
|
||||
|
||||
let id = storage.ops.insert(Operation::Close { tx: None });
|
||||
assert!(id < u32::MAX as usize);
|
||||
self.inner
|
||||
.api
|
||||
.submit(id as u32, opcode::Close::new(item.fd).build());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> StreamOpsStorage<T> {
|
||||
fn recv(&mut self, id: usize, context: Option<IoContext>) -> Option<(u32, Entry)> {
|
||||
let item = &mut self.streams[id];
|
||||
|
||||
if item.rd_op.is_none() {
|
||||
if let Poll::Ready(mut buf) = item.context.get_read_buf() {
|
||||
log::debug!(
|
||||
"{}: Recv resume ({}), {:?} rem: {:?}",
|
||||
item.tag(),
|
||||
id,
|
||||
item.fd,
|
||||
buf.remaining_mut()
|
||||
);
|
||||
|
||||
let slice = buf.chunk_mut();
|
||||
let op = opcode::Recv::new(item.fd, slice.as_mut_ptr(), slice.len() as u32)
|
||||
.build();
|
||||
|
||||
let op_id = self.ops.insert(Operation::Recv {
|
||||
id,
|
||||
buf,
|
||||
context: context.unwrap_or_else(|| item.context.clone()),
|
||||
});
|
||||
assert!(op_id < u32::MAX as usize);
|
||||
|
||||
item.rd_op = NonZeroU32::new(op_id as u32);
|
||||
return Some((op_id as u32, op));
|
||||
}
|
||||
} else if item.flags.contains(Flags::RD_CANCELING) {
|
||||
item.flags.insert(Flags::RD_REISSUE);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn send(&mut self, id: usize, context: Option<IoContext>) -> Option<(u32, Entry)> {
|
||||
let item = &mut self.streams[id];
|
||||
|
||||
if item.wr_op.is_none() {
|
||||
if let Poll::Ready(buf) = item.context.get_write_buf() {
|
||||
log::debug!(
|
||||
"{}: Send resume ({}), {:?} len: {:?}",
|
||||
item.tag(),
|
||||
id,
|
||||
item.fd,
|
||||
buf.len()
|
||||
);
|
||||
|
||||
let slice = buf.chunk();
|
||||
let op =
|
||||
opcode::Send::new(item.fd, slice.as_ptr(), slice.len() as u32).build();
|
||||
|
||||
let op_id = self.ops.insert(Operation::Send {
|
||||
id,
|
||||
buf,
|
||||
context: context.unwrap_or_else(|| item.context.clone()),
|
||||
});
|
||||
assert!(op_id < u32::MAX as usize);
|
||||
|
||||
item.wr_op = NonZeroU32::new(op_id as u32);
|
||||
return Some((op_id as u32, op));
|
||||
}
|
||||
} else if item.flags.contains(Flags::WR_CANCELING) {
|
||||
item.flags.insert(Flags::WR_REISSUE);
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> StreamCtl<T> {
|
||||
pub(crate) async fn close(self) -> io::Result<()> {
|
||||
let result = {
|
||||
let mut storage = self.inner.storage.borrow_mut();
|
||||
|
||||
let (io, fd) = {
|
||||
let item = &mut storage.streams[self.id];
|
||||
(item.io.take(), item.fd)
|
||||
};
|
||||
if let Some(io) = io {
|
||||
mem::forget(io);
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let id = storage.ops.insert(Operation::Close { tx: Some(tx) });
|
||||
assert!(id < u32::MAX as usize);
|
||||
|
||||
drop(storage);
|
||||
self.inner
|
||||
.api
|
||||
.submit(id as u32, opcode::Close::new(fd).build());
|
||||
Some(rx)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(rx) = result {
|
||||
rx.await
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::Other, "gone"))
|
||||
.and_then(|item| item)
|
||||
.map(|_| ())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn with_io<F, R>(&self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(Option<&T>) -> R,
|
||||
{
|
||||
f(self.inner.storage.borrow().streams[self.id].io.as_ref())
|
||||
}
|
||||
|
||||
pub(crate) fn resume_read(&self) {
|
||||
let result = self.inner.storage.borrow_mut().recv(self.id, None);
|
||||
if let Some((id, op)) = result {
|
||||
self.inner.api.submit(id, op);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn resume_write(&self) {
|
||||
let result = self.inner.storage.borrow_mut().send(self.id, None);
|
||||
if let Some((id, op)) = result {
|
||||
self.inner.api.submit(id, op);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn pause_read(&self) {
|
||||
let mut storage = self.inner.storage.borrow_mut();
|
||||
let item = &mut storage.streams[self.id];
|
||||
|
||||
if let Some(rd_op) = item.rd_op {
|
||||
if !item.flags.contains(Flags::RD_CANCELING) {
|
||||
log::debug!("{}: Recv to pause ({}), {:?}", item.tag(), self.id, item.fd);
|
||||
item.flags.insert(Flags::RD_CANCELING);
|
||||
self.inner.api.cancel(rd_op.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for StreamCtl<T> {
|
||||
fn clone(&self) -> Self {
|
||||
self.inner.storage.borrow_mut().streams[self.id].ref_count += 1;
|
||||
Self {
|
||||
id: self.id,
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for StreamCtl<T> {
|
||||
fn drop(&mut self) {
|
||||
if let Ok(mut storage) = self.inner.storage.try_borrow_mut() {
|
||||
storage.streams[self.id].ref_count -= 1;
|
||||
if storage.streams[self.id].ref_count == 0 {
|
||||
let mut item = storage.streams.remove(self.id);
|
||||
if let Some(io) = item.io.take() {
|
||||
log::debug!("{}: Close io ({}), {:?}", item.tag(), self.id, item.fd);
|
||||
mem::forget(io);
|
||||
|
||||
let id = storage.ops.insert(Operation::Close { tx: None });
|
||||
assert!(id < u32::MAX as usize);
|
||||
self.inner
|
||||
.api
|
||||
.submit(id as u32, opcode::Close::new(item.fd).build());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
self.inner.feed.borrow_mut().push(self.id);
|
||||
}
|
||||
}
|
||||
}
|
95
ntex-net/src/rt_uring/io.rs
Normal file
95
ntex-net/src/rt_uring/io.rs
Normal file
|
@ -0,0 +1,95 @@
|
|||
use std::{any, future::poll_fn, task::Poll};
|
||||
|
||||
use ntex_io::{
|
||||
types, Handle, IoContext, IoStream, ReadContext, ReadStatus, WriteContext, WriteStatus,
|
||||
};
|
||||
use ntex_rt::spawn;
|
||||
use socket2::Socket;
|
||||
|
||||
use super::driver::{StreamCtl, StreamOps};
|
||||
|
||||
impl IoStream for super::TcpStream {
|
||||
fn start(self, read: ReadContext, _: WriteContext) -> Option<Box<dyn Handle>> {
|
||||
let io = self.0;
|
||||
let context = read.context();
|
||||
let ctl = StreamOps::current().register(io, context.clone());
|
||||
let ctl2 = ctl.clone();
|
||||
spawn(async move { run(ctl, context).await });
|
||||
|
||||
Some(Box::new(HandleWrapper(ctl2)))
|
||||
}
|
||||
}
|
||||
|
||||
impl IoStream for super::UnixStream {
|
||||
fn start(self, read: ReadContext, _: WriteContext) -> Option<Box<dyn Handle>> {
|
||||
let io = self.0;
|
||||
let context = read.context();
|
||||
let ctl = StreamOps::current().register(io, context.clone());
|
||||
spawn(async move { run(ctl, context).await });
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
struct HandleWrapper(StreamCtl<Socket>);
|
||||
|
||||
impl Handle for HandleWrapper {
|
||||
fn query(&self, id: any::TypeId) -> Option<Box<dyn any::Any>> {
|
||||
if id == any::TypeId::of::<types::PeerAddr>() {
|
||||
let addr = self.0.with_io(|io| io.and_then(|io| io.peer_addr().ok()));
|
||||
if let Some(addr) = addr.and_then(|addr| addr.as_socket()) {
|
||||
return Some(Box::new(types::PeerAddr(addr)));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
enum Status {
|
||||
Shutdown,
|
||||
Terminate,
|
||||
}
|
||||
|
||||
async fn run<T>(ctl: StreamCtl<T>, context: IoContext) {
|
||||
// Handle io readiness
|
||||
let st = poll_fn(|cx| {
|
||||
let read = match context.poll_read_ready(cx) {
|
||||
Poll::Ready(ReadStatus::Ready) => {
|
||||
ctl.resume_read();
|
||||
Poll::Pending
|
||||
}
|
||||
Poll::Ready(ReadStatus::Terminate) => Poll::Ready(()),
|
||||
Poll::Pending => {
|
||||
ctl.pause_read();
|
||||
Poll::Pending
|
||||
}
|
||||
};
|
||||
|
||||
let write = match context.poll_write_ready(cx) {
|
||||
Poll::Ready(WriteStatus::Ready) => {
|
||||
ctl.resume_write();
|
||||
Poll::Pending
|
||||
}
|
||||
Poll::Ready(WriteStatus::Shutdown) => Poll::Ready(Status::Shutdown),
|
||||
Poll::Ready(WriteStatus::Terminate) => Poll::Ready(Status::Terminate),
|
||||
Poll::Pending => Poll::Pending,
|
||||
};
|
||||
|
||||
if read.is_pending() && write.is_pending() {
|
||||
Poll::Pending
|
||||
} else if write.is_ready() {
|
||||
write
|
||||
} else {
|
||||
Poll::Ready(Status::Terminate)
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
ctl.pause_read();
|
||||
ctl.resume_write();
|
||||
context.shutdown(st == Status::Shutdown).await;
|
||||
|
||||
let result = ctl.close().await;
|
||||
context.stopped(result.err());
|
||||
}
|
66
ntex-net/src/rt_uring/mod.rs
Normal file
66
ntex-net/src/rt_uring/mod.rs
Normal file
|
@ -0,0 +1,66 @@
|
|||
use std::{io::Result, net, net::SocketAddr};
|
||||
|
||||
use ntex_bytes::PoolRef;
|
||||
use ntex_io::Io;
|
||||
use socket2::Socket;
|
||||
|
||||
pub(crate) mod connect;
|
||||
mod driver;
|
||||
mod io;
|
||||
|
||||
/// Tcp stream wrapper for neon TcpStream
|
||||
struct TcpStream(Socket);
|
||||
|
||||
/// Tcp stream wrapper for neon UnixStream
|
||||
struct UnixStream(Socket);
|
||||
|
||||
/// Opens a TCP connection to a remote host.
|
||||
pub async fn tcp_connect(addr: SocketAddr) -> Result<Io> {
|
||||
let sock = crate::helpers::connect(addr).await?;
|
||||
Ok(Io::new(TcpStream(crate::helpers::prep_socket(sock)?)))
|
||||
}
|
||||
|
||||
/// Opens a TCP connection to a remote host and use specified memory pool.
|
||||
pub async fn tcp_connect_in(addr: SocketAddr, pool: PoolRef) -> Result<Io> {
|
||||
let sock = crate::helpers::connect(addr).await?;
|
||||
Ok(Io::with_memory_pool(
|
||||
TcpStream(crate::helpers::prep_socket(sock)?),
|
||||
pool,
|
||||
))
|
||||
}
|
||||
|
||||
/// Opens a unix stream connection.
|
||||
pub async fn unix_connect<'a, P>(addr: P) -> Result<Io>
|
||||
where
|
||||
P: AsRef<std::path::Path> + 'a,
|
||||
{
|
||||
let sock = crate::helpers::connect_unix(addr).await?;
|
||||
Ok(Io::new(UnixStream(crate::helpers::prep_socket(sock)?)))
|
||||
}
|
||||
|
||||
/// Opens a unix stream connection and specified memory pool.
|
||||
pub async fn unix_connect_in<'a, P>(addr: P, pool: PoolRef) -> Result<Io>
|
||||
where
|
||||
P: AsRef<std::path::Path> + 'a,
|
||||
{
|
||||
let sock = crate::helpers::connect_unix(addr).await?;
|
||||
Ok(Io::with_memory_pool(
|
||||
UnixStream(crate::helpers::prep_socket(sock)?),
|
||||
pool,
|
||||
))
|
||||
}
|
||||
|
||||
/// Convert std TcpStream to tokio's TcpStream
|
||||
pub fn from_tcp_stream(stream: net::TcpStream) -> Result<Io> {
|
||||
stream.set_nodelay(true)?;
|
||||
Ok(Io::new(TcpStream(crate::helpers::prep_socket(
|
||||
Socket::from(stream),
|
||||
)?)))
|
||||
}
|
||||
|
||||
/// Convert std UnixStream to tokio's UnixStream
|
||||
pub fn from_unix_stream(stream: std::os::unix::net::UnixStream) -> Result<Io> {
|
||||
Ok(Io::new(UnixStream(crate::helpers::prep_socket(
|
||||
Socket::from(stream),
|
||||
)?)))
|
||||
}
|
|
@ -1,5 +1,29 @@
|
|||
# Changes
|
||||
|
||||
## [0.4.29] - 2025-03-26
|
||||
|
||||
* Add Arbiter::get_value() helper method
|
||||
|
||||
## [0.4.27] - 2025-03-14
|
||||
|
||||
* Add srbiters pings ttl
|
||||
|
||||
* Retrieves a list of all arbiters in the system
|
||||
|
||||
* Add "neon" runtime support
|
||||
|
||||
* Drop glommio support
|
||||
|
||||
* Drop async-std support
|
||||
|
||||
## [0.4.26] - 2025-03-12
|
||||
|
||||
* Add Arbiter::spawn_with()
|
||||
|
||||
## [0.4.25] - 2025-03-11
|
||||
|
||||
* Adds Send bound to arbiter exec (#514)
|
||||
|
||||
## [0.4.24] - 2025-01-03
|
||||
|
||||
* Relax runtime requirements
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "ntex-rt"
|
||||
version = "0.4.24"
|
||||
version = "0.4.29"
|
||||
authors = ["ntex contributors <team@ntex.rs>"]
|
||||
description = "ntex runtime"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
|
@ -20,25 +20,21 @@ path = "src/lib.rs"
|
|||
[features]
|
||||
default = []
|
||||
|
||||
# glommio support
|
||||
glommio = ["glomm-io", "futures-channel"]
|
||||
|
||||
# tokio support
|
||||
tokio = ["tok-io"]
|
||||
|
||||
# compio support
|
||||
compio = ["compio-driver", "compio-runtime"]
|
||||
|
||||
# async-std support
|
||||
async-std = ["async_std/unstable"]
|
||||
# neon runtime
|
||||
neon = ["ntex-neon"]
|
||||
|
||||
[dependencies]
|
||||
async-channel = "2"
|
||||
futures-core = "0.3"
|
||||
log = "0.4"
|
||||
futures-timer = "3.0"
|
||||
oneshot = "0.1"
|
||||
log = "0.4"
|
||||
|
||||
async_std = { version = "1", package = "async-std", optional = true }
|
||||
compio-driver = { version = "0.6", optional = true }
|
||||
compio-runtime = { version = "0.6", optional = true }
|
||||
tok-io = { version = "1", package = "tokio", default-features = false, features = [
|
||||
|
@ -46,6 +42,4 @@ tok-io = { version = "1", package = "tokio", default-features = false, features
|
|||
"net",
|
||||
], optional = true }
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
glomm-io = { version = "0.9", package = "glommio", optional = true }
|
||||
futures-channel = { version = "0.3", optional = true }
|
||||
ntex-neon = { version = "0.1.14", optional = true }
|
||||
|
|
|
@ -7,8 +7,7 @@ fn main() {
|
|||
let _ = match key.as_ref() {
|
||||
"CARGO_FEATURE_COMPIO" => features.insert("compio"),
|
||||
"CARGO_FEATURE_TOKIO" => features.insert("tokio"),
|
||||
"CARGO_FEATURE_GLOMMIO" => features.insert("glommio"),
|
||||
"CARGO_FEATURE_ASYNC_STD" => features.insert("async-std"),
|
||||
"CARGO_FEATURE_NEON" => features.insert("neon"),
|
||||
_ => false,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,22 +1,17 @@
|
|||
#![allow(clippy::let_underscore_future)]
|
||||
use std::any::{Any, TypeId};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::task::{ready, Context, Poll};
|
||||
use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc};
|
||||
use std::{cell::RefCell, collections::HashMap, fmt, future::Future, pin::Pin, thread};
|
||||
|
||||
use async_channel::{unbounded, Receiver, Sender};
|
||||
use futures_core::stream::Stream;
|
||||
|
||||
use crate::system::System;
|
||||
use crate::system::{FnExec, Id, System, SystemCommand};
|
||||
|
||||
thread_local!(
|
||||
static ADDR: RefCell<Option<Arbiter>> = const { RefCell::new(None) };
|
||||
static STORAGE: RefCell<HashMap<TypeId, Box<dyn Any>>> = RefCell::new(HashMap::new());
|
||||
);
|
||||
|
||||
type ServerCommandRx = Pin<Box<dyn Stream<Item = SystemCommand>>>;
|
||||
type ArbiterCommandRx = Pin<Box<dyn Stream<Item = ArbiterCommand>>>;
|
||||
|
||||
pub(super) static COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
pub(super) enum ArbiterCommand {
|
||||
|
@ -31,13 +26,16 @@ pub(super) enum ArbiterCommand {
|
|||
/// When an Arbiter is created, it spawns a new OS thread, and
|
||||
/// hosts an event loop. Some Arbiter functions execute on the current thread.
|
||||
pub struct Arbiter {
|
||||
id: usize,
|
||||
pub(crate) sys_id: usize,
|
||||
name: Arc<String>,
|
||||
sender: Sender<ArbiterCommand>,
|
||||
thread_handle: Option<thread::JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Arbiter {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Arbiter")
|
||||
write!(f, "Arbiter({:?})", self.name.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,26 +47,20 @@ impl Default for Arbiter {
|
|||
|
||||
impl Clone for Arbiter {
|
||||
fn clone(&self) -> Self {
|
||||
Self::with_sender(self.sender.clone())
|
||||
Self::with_sender(self.sys_id, self.id, self.name.clone(), self.sender.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Arbiter {
|
||||
#[allow(clippy::borrowed_box)]
|
||||
pub(super) fn new_system() -> (Self, ArbiterController) {
|
||||
pub(super) fn new_system(name: String) -> (Self, ArbiterController) {
|
||||
let (tx, rx) = unbounded();
|
||||
|
||||
let arb = Arbiter::with_sender(tx);
|
||||
let arb = Arbiter::with_sender(0, 0, Arc::new(name), tx);
|
||||
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
|
||||
STORAGE.with(|cell| cell.borrow_mut().clear());
|
||||
|
||||
(
|
||||
arb,
|
||||
ArbiterController {
|
||||
stop: None,
|
||||
rx: Box::pin(rx),
|
||||
},
|
||||
)
|
||||
(arb, ArbiterController { rx, stop: None })
|
||||
}
|
||||
|
||||
/// Returns the current thread's arbiter's address. If no Arbiter is present, then this
|
||||
|
@ -85,27 +77,37 @@ impl Arbiter {
|
|||
let _ = self.sender.try_send(ArbiterCommand::Stop);
|
||||
}
|
||||
|
||||
/// Spawn new thread and run event loop in spawned thread.
|
||||
/// Spawn new thread and run runtime in spawned thread.
|
||||
/// Returns address of newly created arbiter.
|
||||
pub fn new() -> Arbiter {
|
||||
let name = format!("ntex-rt:worker:{}", COUNT.load(Ordering::Relaxed) + 1);
|
||||
Arbiter::with_name(name)
|
||||
}
|
||||
|
||||
/// Spawn new thread and run runtime in spawned thread.
|
||||
/// Returns address of newly created arbiter.
|
||||
pub fn with_name(name: String) -> Arbiter {
|
||||
let id = COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
let name = format!("ntex-rt:worker:{}", id);
|
||||
let sys = System::current();
|
||||
let name2 = Arc::new(name.clone());
|
||||
let config = sys.config();
|
||||
let (arb_tx, arb_rx) = unbounded();
|
||||
let arb_tx2 = arb_tx.clone();
|
||||
|
||||
let builder = if sys.config().stack_size > 0 {
|
||||
thread::Builder::new()
|
||||
.name(name.clone())
|
||||
.name(name)
|
||||
.stack_size(sys.config().stack_size)
|
||||
} else {
|
||||
thread::Builder::new().name(name.clone())
|
||||
thread::Builder::new().name(name)
|
||||
};
|
||||
|
||||
let name = name2.clone();
|
||||
let sys_id = sys.id();
|
||||
|
||||
let handle = builder
|
||||
.spawn(move || {
|
||||
let arb = Arbiter::with_sender(arb_tx);
|
||||
let arb = Arbiter::with_sender(sys_id.0, id, name2, arb_tx);
|
||||
|
||||
let (stop, stop_rx) = oneshot::channel();
|
||||
STORAGE.with(|cell| cell.borrow_mut().clear());
|
||||
|
@ -114,16 +116,19 @@ impl Arbiter {
|
|||
|
||||
config.block_on(async move {
|
||||
// start arbiter controller
|
||||
let _ = crate::spawn(ArbiterController {
|
||||
stop: Some(stop),
|
||||
rx: Box::pin(arb_rx),
|
||||
});
|
||||
let _ = crate::spawn(
|
||||
ArbiterController {
|
||||
stop: Some(stop),
|
||||
rx: arb_rx,
|
||||
}
|
||||
.run(),
|
||||
);
|
||||
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
|
||||
|
||||
// register arbiter
|
||||
let _ = System::current()
|
||||
.sys()
|
||||
.try_send(SystemCommand::RegisterArbiter(id, arb));
|
||||
.try_send(SystemCommand::RegisterArbiter(Id(id), arb));
|
||||
|
||||
// run loop
|
||||
let _ = stop_rx.await;
|
||||
|
@ -132,18 +137,46 @@ impl Arbiter {
|
|||
// unregister arbiter
|
||||
let _ = System::current()
|
||||
.sys()
|
||||
.try_send(SystemCommand::UnregisterArbiter(id));
|
||||
.try_send(SystemCommand::UnregisterArbiter(Id(id)));
|
||||
})
|
||||
.unwrap_or_else(|err| {
|
||||
panic!("Cannot spawn an arbiter's thread {:?}: {:?}", &name, err)
|
||||
});
|
||||
|
||||
Arbiter {
|
||||
id,
|
||||
name,
|
||||
sys_id: sys_id.0,
|
||||
sender: arb_tx2,
|
||||
thread_handle: Some(handle),
|
||||
}
|
||||
}
|
||||
|
||||
fn with_sender(
|
||||
sys_id: usize,
|
||||
id: usize,
|
||||
name: Arc<String>,
|
||||
sender: Sender<ArbiterCommand>,
|
||||
) -> Self {
|
||||
Self {
|
||||
id,
|
||||
sys_id,
|
||||
name,
|
||||
sender,
|
||||
thread_handle: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Id of the arbiter
|
||||
pub fn id(&self) -> Id {
|
||||
Id(self.id)
|
||||
}
|
||||
|
||||
/// Name of the arbiter
|
||||
pub fn name(&self) -> &str {
|
||||
self.name.as_ref()
|
||||
}
|
||||
|
||||
/// Send a future to the Arbiter's thread, and spawn it.
|
||||
pub fn spawn<F>(&self, future: F)
|
||||
where
|
||||
|
@ -154,10 +187,34 @@ impl Arbiter {
|
|||
.try_send(ArbiterCommand::Execute(Box::pin(future)));
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
/// Send a function to the Arbiter's thread and spawns it's resulting future.
|
||||
/// This can be used to spawn non-send futures on the arbiter thread.
|
||||
pub fn spawn_with<F, R, O>(
|
||||
&self,
|
||||
f: F
|
||||
) -> impl Future<Output = Result<O, oneshot::RecvError>> + Send + 'static
|
||||
where
|
||||
F: FnOnce() -> R + Send + 'static,
|
||||
R: Future<Output = O> + 'static,
|
||||
O: Send + 'static,
|
||||
{
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self
|
||||
.sender
|
||||
.try_send(ArbiterCommand::ExecuteFn(Box::new(move || {
|
||||
crate::spawn(async move {
|
||||
let _ = tx.send(f().await);
|
||||
});
|
||||
})));
|
||||
rx
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
/// Send a function to the Arbiter's thread. This function will be executed asynchronously.
|
||||
/// A future is created, and when resolved will contain the result of the function sent
|
||||
/// to the Arbiters thread.
|
||||
pub fn exec<F, R>(&self, f: F) -> impl Future<Output = Result<R, oneshot::RecvError>>
|
||||
pub fn exec<F, R>(&self, f: F) -> impl Future<Output = Result<R, oneshot::RecvError>> + Send + 'static
|
||||
where
|
||||
F: FnOnce() -> R + Send + 'static,
|
||||
R: Send + 'static,
|
||||
|
@ -229,11 +286,23 @@ impl Arbiter {
|
|||
})
|
||||
}
|
||||
|
||||
fn with_sender(sender: Sender<ArbiterCommand>) -> Self {
|
||||
Self {
|
||||
sender,
|
||||
thread_handle: None,
|
||||
}
|
||||
/// Get a type previously inserted to this runtime or create new one.
|
||||
pub fn get_value<T, F>(f: F) -> T
|
||||
where
|
||||
T: Clone + 'static,
|
||||
F: FnOnce() -> T,
|
||||
{
|
||||
STORAGE.with(move |cell| {
|
||||
let mut st = cell.borrow_mut();
|
||||
if let Some(boxed) = st.get(&TypeId::of::<T>()) {
|
||||
if let Some(val) = (&**boxed as &(dyn Any + 'static)).downcast_ref::<T>() {
|
||||
return val.clone();
|
||||
}
|
||||
}
|
||||
let val = f();
|
||||
st.insert(TypeId::of::<T>(), Box::new(val.clone()));
|
||||
val
|
||||
})
|
||||
}
|
||||
|
||||
/// Wait for the event loop to stop by joining the underlying thread (if have Some).
|
||||
|
@ -246,9 +315,17 @@ impl Arbiter {
|
|||
}
|
||||
}
|
||||
|
||||
impl Eq for Arbiter {}
|
||||
|
||||
impl PartialEq for Arbiter {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.id == other.id && self.sys_id == other.sys_id
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct ArbiterController {
|
||||
stop: Option<oneshot::Sender<i32>>,
|
||||
rx: ArbiterCommandRx,
|
||||
rx: Receiver<ArbiterCommand>,
|
||||
}
|
||||
|
||||
impl Drop for ArbiterController {
|
||||
|
@ -264,118 +341,28 @@ impl Drop for ArbiterController {
|
|||
}
|
||||
}
|
||||
|
||||
impl Future for ArbiterController {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
impl ArbiterController {
|
||||
pub(super) async fn run(mut self) {
|
||||
loop {
|
||||
match Pin::new(&mut self.rx).poll_next(cx) {
|
||||
Poll::Ready(None) => return Poll::Ready(()),
|
||||
Poll::Ready(Some(item)) => match item {
|
||||
ArbiterCommand::Stop => {
|
||||
if let Some(stop) = self.stop.take() {
|
||||
let _ = stop.send(0);
|
||||
};
|
||||
return Poll::Ready(());
|
||||
}
|
||||
ArbiterCommand::Execute(fut) => {
|
||||
let _ = crate::spawn(fut);
|
||||
}
|
||||
ArbiterCommand::ExecuteFn(f) => {
|
||||
f.call_box();
|
||||
}
|
||||
},
|
||||
Poll::Pending => return Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) enum SystemCommand {
|
||||
Exit(i32),
|
||||
RegisterArbiter(usize, Arbiter),
|
||||
UnregisterArbiter(usize),
|
||||
}
|
||||
|
||||
pub(super) struct SystemArbiter {
|
||||
stop: Option<oneshot::Sender<i32>>,
|
||||
commands: ServerCommandRx,
|
||||
arbiters: HashMap<usize, Arbiter>,
|
||||
}
|
||||
|
||||
impl SystemArbiter {
|
||||
pub(super) fn new(
|
||||
stop: oneshot::Sender<i32>,
|
||||
commands: Receiver<SystemCommand>,
|
||||
) -> Self {
|
||||
SystemArbiter {
|
||||
commands: Box::pin(commands),
|
||||
stop: Some(stop),
|
||||
arbiters: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SystemArbiter {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("SystemArbiter")
|
||||
.field("arbiters", &self.arbiters)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for SystemArbiter {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
loop {
|
||||
let cmd = ready!(Pin::new(&mut self.commands).poll_next(cx));
|
||||
log::debug!("Received system command: {:?}", cmd);
|
||||
match cmd {
|
||||
None => {
|
||||
log::debug!("System stopped");
|
||||
return Poll::Ready(());
|
||||
match self.rx.recv().await {
|
||||
Ok(ArbiterCommand::Stop) => {
|
||||
if let Some(stop) = self.stop.take() {
|
||||
let _ = stop.send(0);
|
||||
};
|
||||
break;
|
||||
}
|
||||
Some(cmd) => match cmd {
|
||||
SystemCommand::Exit(code) => {
|
||||
log::debug!("Stopping system with {} code", code);
|
||||
|
||||
// stop arbiters
|
||||
for arb in self.arbiters.values() {
|
||||
arb.stop();
|
||||
}
|
||||
// stop event loop
|
||||
if let Some(stop) = self.stop.take() {
|
||||
let _ = stop.send(code);
|
||||
}
|
||||
}
|
||||
SystemCommand::RegisterArbiter(name, hnd) => {
|
||||
self.arbiters.insert(name, hnd);
|
||||
}
|
||||
SystemCommand::UnregisterArbiter(name) => {
|
||||
self.arbiters.remove(&name);
|
||||
}
|
||||
},
|
||||
Ok(ArbiterCommand::Execute(fut)) => {
|
||||
let _ = crate::spawn(fut);
|
||||
}
|
||||
Ok(ArbiterCommand::ExecuteFn(f)) => {
|
||||
f.call_box();
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) trait FnExec: Send + 'static {
|
||||
fn call_box(self: Box<Self>);
|
||||
}
|
||||
|
||||
impl<F> FnExec for F
|
||||
where
|
||||
F: FnOnce() + Send + 'static,
|
||||
{
|
||||
#[allow(clippy::boxed_local)]
|
||||
fn call_box(self: Box<Self>) {
|
||||
(*self)()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@ -387,6 +374,7 @@ mod tests {
|
|||
assert!(Arbiter::get_item::<&'static str, _, _>(|s| *s == "test"));
|
||||
assert!(Arbiter::get_mut_item::<&'static str, _, _>(|s| *s == "test"));
|
||||
assert!(Arbiter::contains_item::<&'static str>());
|
||||
assert!(Arbiter::get_value(|| 64u64) == 64);
|
||||
assert!(format!("{:?}", Arbiter::current()).contains("Arbiter"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
use std::{future::Future, io, pin::Pin, sync::Arc};
|
||||
use std::{future::Future, io, marker::PhantomData, pin::Pin, rc::Rc, sync::Arc};
|
||||
|
||||
use async_channel::unbounded;
|
||||
|
||||
use crate::arbiter::{Arbiter, ArbiterController, SystemArbiter};
|
||||
use crate::{system::SystemConfig, System};
|
||||
use crate::arbiter::{Arbiter, ArbiterController};
|
||||
use crate::system::{System, SystemCommand, SystemConfig, SystemSupport};
|
||||
|
||||
/// Builder struct for a ntex runtime.
|
||||
///
|
||||
|
@ -17,6 +17,8 @@ pub struct Builder {
|
|||
stop_on_panic: bool,
|
||||
/// New thread stack size
|
||||
stack_size: usize,
|
||||
/// Arbiters ping interval
|
||||
ping_interval: usize,
|
||||
/// Block on fn
|
||||
block_on: Option<Arc<dyn Fn(Pin<Box<dyn Future<Output = ()>>>) + Sync + Send>>,
|
||||
}
|
||||
|
@ -28,6 +30,7 @@ impl Builder {
|
|||
stop_on_panic: false,
|
||||
stack_size: 0,
|
||||
block_on: None,
|
||||
ping_interval: 1000,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -52,6 +55,15 @@ impl Builder {
|
|||
self
|
||||
}
|
||||
|
||||
/// Sets ping interval for spawned arbiters.
|
||||
///
|
||||
/// Interval is in milliseconds. By default 5000 milliseconds is set.
|
||||
/// To disable pings set value to zero.
|
||||
pub fn ping_interval(mut self, interval: usize) -> Self {
|
||||
self.ping_interval = interval;
|
||||
self
|
||||
}
|
||||
|
||||
/// Use custom block_on function
|
||||
pub fn block_on<F>(mut self, block_on: F) -> Self
|
||||
where
|
||||
|
@ -74,18 +86,20 @@ impl Builder {
|
|||
stop_on_panic: self.stop_on_panic,
|
||||
};
|
||||
|
||||
let (arb, arb_controller) = Arbiter::new_system();
|
||||
let system = System::construct(sys_sender, arb, config);
|
||||
let (arb, controller) = Arbiter::new_system(self.name.clone());
|
||||
let _ = sys_sender.try_send(SystemCommand::RegisterArbiter(arb.id(), arb.clone()));
|
||||
let system = System::construct(sys_sender, arb.clone(), config);
|
||||
|
||||
// system arbiter
|
||||
let arb = SystemArbiter::new(stop_tx, sys_receiver);
|
||||
let support = SystemSupport::new(stop_tx, sys_receiver, self.ping_interval);
|
||||
|
||||
// init system arbiter and run configuration method
|
||||
SystemRunner {
|
||||
stop,
|
||||
arb,
|
||||
arb_controller,
|
||||
support,
|
||||
controller,
|
||||
system,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -94,9 +108,10 @@ impl Builder {
|
|||
#[must_use = "SystemRunner must be run"]
|
||||
pub struct SystemRunner {
|
||||
stop: oneshot::Receiver<i32>,
|
||||
arb: SystemArbiter,
|
||||
arb_controller: ArbiterController,
|
||||
support: SystemSupport,
|
||||
controller: ArbiterController,
|
||||
system: System,
|
||||
_t: PhantomData<Rc<()>>,
|
||||
}
|
||||
|
||||
impl SystemRunner {
|
||||
|
@ -113,15 +128,14 @@ impl SystemRunner {
|
|||
|
||||
/// This function will start event loop and will finish once the
|
||||
/// `System::stop()` function is called.
|
||||
#[inline]
|
||||
pub fn run<F>(self, f: F) -> io::Result<()>
|
||||
where
|
||||
F: FnOnce() -> io::Result<()> + 'static,
|
||||
{
|
||||
let SystemRunner {
|
||||
controller,
|
||||
stop,
|
||||
arb,
|
||||
arb_controller,
|
||||
support,
|
||||
system,
|
||||
..
|
||||
} = self;
|
||||
|
@ -130,8 +144,8 @@ impl SystemRunner {
|
|||
system.config().block_on(async move {
|
||||
f()?;
|
||||
|
||||
let _ = crate::spawn(arb);
|
||||
let _ = crate::spawn(arb_controller);
|
||||
let _ = crate::spawn(support.run());
|
||||
let _ = crate::spawn(controller.run());
|
||||
match stop.await {
|
||||
Ok(code) => {
|
||||
if code != 0 {
|
||||
|
@ -149,22 +163,21 @@ impl SystemRunner {
|
|||
}
|
||||
|
||||
/// Execute a future and wait for result.
|
||||
#[inline]
|
||||
pub fn block_on<F, R>(self, fut: F) -> R
|
||||
where
|
||||
F: Future<Output = R> + 'static,
|
||||
R: 'static,
|
||||
{
|
||||
let SystemRunner {
|
||||
arb,
|
||||
arb_controller,
|
||||
controller,
|
||||
support,
|
||||
system,
|
||||
..
|
||||
} = self;
|
||||
|
||||
system.config().block_on(async move {
|
||||
let _ = crate::spawn(arb);
|
||||
let _ = crate::spawn(arb_controller);
|
||||
let _ = crate::spawn(support.run());
|
||||
let _ = crate::spawn(controller.run());
|
||||
fut.await
|
||||
})
|
||||
}
|
||||
|
@ -177,16 +190,16 @@ impl SystemRunner {
|
|||
R: 'static,
|
||||
{
|
||||
let SystemRunner {
|
||||
arb,
|
||||
arb_controller,
|
||||
controller,
|
||||
support,
|
||||
..
|
||||
} = self;
|
||||
|
||||
// run loop
|
||||
tok_io::task::LocalSet::new()
|
||||
.run_until(async move {
|
||||
let _ = crate::spawn(arb);
|
||||
let _ = crate::spawn(arb_controller);
|
||||
let _ = crate::spawn(support.run());
|
||||
let _ = crate::spawn(controller.run());
|
||||
fut.await
|
||||
})
|
||||
.await
|
||||
|
@ -242,6 +255,7 @@ mod tests {
|
|||
thread::spawn(move || {
|
||||
let runner = crate::System::build()
|
||||
.stop_on_panic(true)
|
||||
.ping_interval(25)
|
||||
.block_on(|fut| {
|
||||
let rt = tok_io::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
|
@ -270,6 +284,18 @@ mod tests {
|
|||
.unwrap();
|
||||
assert_eq!(id, id2);
|
||||
|
||||
let (tx, rx) = mpsc::channel();
|
||||
sys.arbiter().spawn(async move {
|
||||
futures_timer::Delay::new(std::time::Duration::from_millis(100)).await;
|
||||
|
||||
let recs = System::list_arbiter_pings(Arbiter::current().id(), |recs| {
|
||||
recs.unwrap().clone()
|
||||
});
|
||||
let _ = tx.send(recs);
|
||||
});
|
||||
let recs = rx.recv().unwrap();
|
||||
|
||||
assert!(!recs.is_empty());
|
||||
sys.stop();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ mod system;
|
|||
|
||||
pub use self::arbiter::Arbiter;
|
||||
pub use self::builder::{Builder, SystemRunner};
|
||||
pub use self::system::System;
|
||||
pub use self::system::{Id, PingRecord, System};
|
||||
|
||||
thread_local! {
|
||||
static CB: RefCell<(TBefore, TEnter, TExit, TAfter)> = RefCell::new((
|
||||
|
@ -112,6 +112,8 @@ mod tokio {
|
|||
///
|
||||
/// This function panics if ntex system is not running.
|
||||
#[inline]
|
||||
#[doc(hidden)]
|
||||
#[deprecated]
|
||||
pub fn spawn_fn<F, R>(f: F) -> tok_io::task::JoinHandle<R::Output>
|
||||
where
|
||||
F: FnOnce() -> R + 'static,
|
||||
|
@ -196,6 +198,8 @@ mod compio {
|
|||
///
|
||||
/// This function panics if ntex system is not running.
|
||||
#[inline]
|
||||
#[doc(hidden)]
|
||||
#[deprecated]
|
||||
pub fn spawn_fn<F, R>(f: F) -> JoinHandle<R::Output>
|
||||
where
|
||||
F: FnOnce() -> R + 'static,
|
||||
|
@ -248,15 +252,38 @@ mod compio {
|
|||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[cfg(feature = "async-std")]
|
||||
mod asyncstd {
|
||||
use std::future::{poll_fn, Future};
|
||||
use std::{fmt, pin::Pin, task::ready, task::Context, task::Poll};
|
||||
#[cfg(feature = "neon")]
|
||||
mod neon {
|
||||
use std::task::{ready, Context, Poll};
|
||||
use std::{fmt, future::poll_fn, future::Future, pin::Pin};
|
||||
|
||||
use ntex_neon::Runtime;
|
||||
|
||||
/// Runs the provided future, blocking the current thread until the future
|
||||
/// completes.
|
||||
pub fn block_on<F: Future<Output = ()>>(fut: F) {
|
||||
async_std::task::block_on(fut);
|
||||
let rt = Runtime::new().unwrap();
|
||||
log::info!(
|
||||
"Starting neon runtime, driver {:?}",
|
||||
rt.driver().tp().name()
|
||||
);
|
||||
|
||||
rt.block_on(fut);
|
||||
}
|
||||
|
||||
/// Spawns a blocking task.
|
||||
///
|
||||
/// The task will be spawned onto a thread pool specifically dedicated
|
||||
/// to blocking tasks. This is useful to prevent long-running synchronous
|
||||
/// operations from blocking the main futures executor.
|
||||
pub fn spawn_blocking<F, T>(f: F) -> JoinHandle<T>
|
||||
where
|
||||
F: FnOnce() -> T + Send + Sync + 'static,
|
||||
T: Send + 'static,
|
||||
{
|
||||
JoinHandle {
|
||||
fut: Some(ntex_neon::spawn_blocking(f)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn a future on the current thread. This does not create a new Arbiter
|
||||
|
@ -267,29 +294,29 @@ mod asyncstd {
|
|||
///
|
||||
/// This function panics if ntex system is not running.
|
||||
#[inline]
|
||||
pub fn spawn<F>(mut f: F) -> JoinHandle<F::Output>
|
||||
pub fn spawn<F>(f: F) -> Task<F::Output>
|
||||
where
|
||||
F: Future + 'static,
|
||||
{
|
||||
let ptr = crate::CB.with(|cb| (cb.borrow().0)());
|
||||
JoinHandle {
|
||||
fut: async_std::task::spawn_local(async move {
|
||||
if let Some(ptr) = ptr {
|
||||
let mut f = unsafe { Pin::new_unchecked(&mut f) };
|
||||
let result = poll_fn(|ctx| {
|
||||
let new_ptr = crate::CB.with(|cb| (cb.borrow().1)(ptr));
|
||||
let result = f.as_mut().poll(ctx);
|
||||
crate::CB.with(|cb| (cb.borrow().2)(new_ptr));
|
||||
result
|
||||
})
|
||||
.await;
|
||||
crate::CB.with(|cb| (cb.borrow().3)(ptr));
|
||||
let task = ntex_neon::spawn(async move {
|
||||
if let Some(ptr) = ptr {
|
||||
let mut f = std::pin::pin!(f);
|
||||
let result = poll_fn(|ctx| {
|
||||
let new_ptr = crate::CB.with(|cb| (cb.borrow().1)(ptr));
|
||||
let result = f.as_mut().poll(ctx);
|
||||
crate::CB.with(|cb| (cb.borrow().2)(new_ptr));
|
||||
result
|
||||
} else {
|
||||
f.await
|
||||
}
|
||||
}),
|
||||
}
|
||||
})
|
||||
.await;
|
||||
crate::CB.with(|cb| (cb.borrow().3)(ptr));
|
||||
result
|
||||
} else {
|
||||
f.await
|
||||
}
|
||||
});
|
||||
|
||||
Task { task: Some(task) }
|
||||
}
|
||||
|
||||
/// Executes a future on the current thread. This does not create a new Arbiter
|
||||
|
@ -300,7 +327,9 @@ mod asyncstd {
|
|||
///
|
||||
/// This function panics if ntex system is not running.
|
||||
#[inline]
|
||||
pub fn spawn_fn<F, R>(f: F) -> JoinHandle<R::Output>
|
||||
#[doc(hidden)]
|
||||
#[deprecated]
|
||||
pub fn spawn_fn<F, R>(f: F) -> Task<R::Output>
|
||||
where
|
||||
F: FnOnce() -> R + 'static,
|
||||
R: Future + 'static,
|
||||
|
@ -308,18 +337,32 @@ mod asyncstd {
|
|||
spawn(async move { f().await })
|
||||
}
|
||||
|
||||
/// Spawns a blocking task.
|
||||
///
|
||||
/// The task will be spawned onto a thread pool specifically dedicated
|
||||
/// to blocking tasks. This is useful to prevent long-running synchronous
|
||||
/// operations from blocking the main futures executor.
|
||||
pub fn spawn_blocking<F, T>(f: F) -> JoinHandle<T>
|
||||
where
|
||||
F: FnOnce() -> T + Send + 'static,
|
||||
T: Send + 'static,
|
||||
{
|
||||
JoinHandle {
|
||||
fut: async_std::task::spawn_blocking(f),
|
||||
/// A spawned task.
|
||||
pub struct Task<T> {
|
||||
task: Option<ntex_neon::Task<T>>,
|
||||
}
|
||||
|
||||
impl<T> Task<T> {
|
||||
pub fn is_finished(&self) -> bool {
|
||||
if let Some(hnd) = &self.task {
|
||||
hnd.is_finished()
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Task<T> {
|
||||
fn drop(&mut self) {
|
||||
self.task.take().unwrap().detach();
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Future for Task<T> {
|
||||
type Output = Result<T, JoinError>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
Poll::Ready(Ok(ready!(Pin::new(self.task.as_mut().unwrap()).poll(cx))))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -335,128 +378,24 @@ mod asyncstd {
|
|||
impl std::error::Error for JoinError {}
|
||||
|
||||
pub struct JoinHandle<T> {
|
||||
fut: async_std::task::JoinHandle<T>,
|
||||
fut: Option<ntex_neon::JoinHandle<T>>,
|
||||
}
|
||||
|
||||
impl<T> JoinHandle<T> {
|
||||
pub fn is_finished(&self) -> bool {
|
||||
self.fut.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Future for JoinHandle<T> {
|
||||
type Output = Result<T, JoinError>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
Poll::Ready(Ok(ready!(Pin::new(&mut self.fut).poll(cx))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[cfg(all(feature = "glommio", target_os = "linux"))]
|
||||
mod glommio {
|
||||
use std::future::{poll_fn, Future};
|
||||
use std::{pin::Pin, task::Context, task::Poll};
|
||||
|
||||
use futures_channel::oneshot::Canceled;
|
||||
use glomm_io::task;
|
||||
|
||||
pub type JoinError = Canceled;
|
||||
|
||||
/// Runs the provided future, blocking the current thread until the future
|
||||
/// completes.
|
||||
pub fn block_on<F: Future<Output = ()>>(fut: F) {
|
||||
let ex = glomm_io::LocalExecutor::default();
|
||||
ex.run(async move {
|
||||
let _ = fut.await;
|
||||
})
|
||||
}
|
||||
|
||||
/// Spawn a future on the current thread. This does not create a new Arbiter
|
||||
/// or Arbiter address, it is simply a helper for spawning futures on the current
|
||||
/// thread.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if ntex system is not running.
|
||||
#[inline]
|
||||
pub fn spawn<F>(mut f: F) -> JoinHandle<F::Output>
|
||||
where
|
||||
F: Future + 'static,
|
||||
F::Output: 'static,
|
||||
{
|
||||
let ptr = crate::CB.with(|cb| (cb.borrow().0)());
|
||||
JoinHandle {
|
||||
fut: Either::Left(
|
||||
glomm_io::spawn_local(async move {
|
||||
if let Some(ptr) = ptr {
|
||||
glomm_io::executor().yield_now().await;
|
||||
let mut f = unsafe { Pin::new_unchecked(&mut f) };
|
||||
let result = poll_fn(|ctx| {
|
||||
let new_ptr = crate::CB.with(|cb| (cb.borrow().1)(ptr));
|
||||
let result = f.as_mut().poll(ctx);
|
||||
crate::CB.with(|cb| (cb.borrow().2)(new_ptr));
|
||||
result
|
||||
})
|
||||
.await;
|
||||
crate::CB.with(|cb| (cb.borrow().3)(ptr));
|
||||
result
|
||||
} else {
|
||||
glomm_io::executor().yield_now().await;
|
||||
f.await
|
||||
}
|
||||
})
|
||||
.detach(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Executes a future on the current thread. This does not create a new Arbiter
|
||||
/// or Arbiter address, it is simply a helper for executing futures on the current
|
||||
/// thread.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if ntex system is not running.
|
||||
#[inline]
|
||||
pub fn spawn_fn<F, R>(f: F) -> JoinHandle<R::Output>
|
||||
where
|
||||
F: FnOnce() -> R + 'static,
|
||||
R: Future + 'static,
|
||||
{
|
||||
spawn(async move { f().await })
|
||||
}
|
||||
|
||||
pub fn spawn_blocking<F, R>(f: F) -> JoinHandle<R>
|
||||
where
|
||||
F: FnOnce() -> R + Send + 'static,
|
||||
R: Send + 'static,
|
||||
{
|
||||
let fut = glomm_io::executor().spawn_blocking(f);
|
||||
JoinHandle {
|
||||
fut: Either::Right(Box::pin(async move { Ok(fut.await) })),
|
||||
}
|
||||
}
|
||||
|
||||
enum Either<T1, T2> {
|
||||
Left(T1),
|
||||
Right(T2),
|
||||
}
|
||||
|
||||
/// Blocking operation completion future. It resolves with results
|
||||
/// of blocking function execution.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub struct JoinHandle<T> {
|
||||
fut:
|
||||
Either<task::JoinHandle<T>, Pin<Box<dyn Future<Output = Result<T, Canceled>>>>>,
|
||||
}
|
||||
|
||||
impl<T> Future for JoinHandle<T> {
|
||||
type Output = Result<T, Canceled>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
match self.fut {
|
||||
Either::Left(ref mut f) => match Pin::new(f).poll(cx) {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(res) => Poll::Ready(res.ok_or(Canceled)),
|
||||
},
|
||||
Either::Right(ref mut f) => Pin::new(f).poll(cx),
|
||||
}
|
||||
Poll::Ready(
|
||||
ready!(Pin::new(self.fut.as_mut().unwrap()).poll(cx))
|
||||
.map_err(|_| JoinError)
|
||||
.and_then(|result| result.map_err(|_| JoinError)),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -464,22 +403,14 @@ mod glommio {
|
|||
#[cfg(feature = "tokio")]
|
||||
pub use self::tokio::*;
|
||||
|
||||
#[cfg(feature = "async-std")]
|
||||
pub use self::asyncstd::*;
|
||||
|
||||
#[cfg(feature = "glommio")]
|
||||
pub use self::glommio::*;
|
||||
|
||||
#[cfg(feature = "compio")]
|
||||
pub use self::compio::*;
|
||||
|
||||
#[cfg(feature = "neon")]
|
||||
pub use self::neon::*;
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[cfg(all(
|
||||
not(feature = "tokio"),
|
||||
not(feature = "async-std"),
|
||||
not(feature = "compio"),
|
||||
not(feature = "glommio")
|
||||
))]
|
||||
#[cfg(all(not(feature = "tokio"), not(feature = "compio"), not(feature = "neon")))]
|
||||
mod no_rt {
|
||||
use std::task::{Context, Poll};
|
||||
use std::{fmt, future::Future, marker::PhantomData, pin::Pin};
|
||||
|
@ -538,10 +469,5 @@ mod no_rt {
|
|||
impl std::error::Error for JoinError {}
|
||||
}
|
||||
|
||||
#[cfg(all(
|
||||
not(feature = "tokio"),
|
||||
not(feature = "async-std"),
|
||||
not(feature = "compio"),
|
||||
not(feature = "glommio")
|
||||
))]
|
||||
#[cfg(all(not(feature = "tokio"), not(feature = "compio"), not(feature = "neon")))]
|
||||
pub use self::no_rt::*;
|
||||
|
|
|
@ -1,13 +1,31 @@
|
|||
use std::collections::{HashMap, VecDeque};
|
||||
use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc};
|
||||
use std::time::{Duration, Instant};
|
||||
use std::{cell::RefCell, fmt, future::Future, pin::Pin, rc::Rc};
|
||||
|
||||
use async_channel::Sender;
|
||||
use async_channel::{Receiver, Sender};
|
||||
use futures_timer::Delay;
|
||||
|
||||
use super::arbiter::{Arbiter, SystemCommand};
|
||||
use super::arbiter::Arbiter;
|
||||
use super::builder::{Builder, SystemRunner};
|
||||
|
||||
static SYSTEM_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
thread_local!(
|
||||
static ARBITERS: RefCell<Arbiters> = RefCell::new(Arbiters::default());
|
||||
static PINGS: RefCell<HashMap<Id, VecDeque<PingRecord>>> =
|
||||
RefCell::new(HashMap::default());
|
||||
);
|
||||
|
||||
#[derive(Default)]
|
||||
struct Arbiters {
|
||||
all: HashMap<Id, Arbiter>,
|
||||
list: Vec<Arbiter>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
|
||||
pub struct Id(pub(crate) usize);
|
||||
|
||||
/// System is a runtime manager.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct System {
|
||||
|
@ -33,14 +51,17 @@ impl System {
|
|||
/// Constructs new system and sets it as current
|
||||
pub(super) fn construct(
|
||||
sys: Sender<SystemCommand>,
|
||||
arbiter: Arbiter,
|
||||
mut arbiter: Arbiter,
|
||||
config: SystemConfig,
|
||||
) -> Self {
|
||||
let id = SYSTEM_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
arbiter.sys_id = id;
|
||||
|
||||
let sys = System {
|
||||
id,
|
||||
sys,
|
||||
config,
|
||||
arbiter,
|
||||
id: SYSTEM_COUNT.fetch_add(1, Ordering::SeqCst),
|
||||
};
|
||||
System::set_current(sys.clone());
|
||||
sys
|
||||
|
@ -79,8 +100,8 @@ impl System {
|
|||
}
|
||||
|
||||
/// System id
|
||||
pub fn id(&self) -> usize {
|
||||
self.id
|
||||
pub fn id(&self) -> Id {
|
||||
Id(self.id)
|
||||
}
|
||||
|
||||
/// Stop the system
|
||||
|
@ -104,6 +125,34 @@ impl System {
|
|||
&self.arbiter
|
||||
}
|
||||
|
||||
/// Retrieves a list of all arbiters in the system.
|
||||
///
|
||||
/// This method should be called from the thread where the system has been initialized,
|
||||
/// typically the "main" thread.
|
||||
pub fn list_arbiters<F, R>(f: F) -> R
|
||||
where
|
||||
F: FnOnce(&[Arbiter]) -> R,
|
||||
{
|
||||
ARBITERS.with(|arbs| f(arbs.borrow().list.as_ref()))
|
||||
}
|
||||
|
||||
/// Retrieves a list of last pings records for specified arbiter.
|
||||
///
|
||||
/// This method should be called from the thread where the system has been initialized,
|
||||
/// typically the "main" thread.
|
||||
pub fn list_arbiter_pings<F, R>(id: Id, f: F) -> R
|
||||
where
|
||||
F: FnOnce(Option<&VecDeque<PingRecord>>) -> R,
|
||||
{
|
||||
PINGS.with(|pings| {
|
||||
if let Some(recs) = pings.borrow().get(&id) {
|
||||
f(Some(recs))
|
||||
} else {
|
||||
f(None)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn sys(&self) -> &Sender<SystemCommand> {
|
||||
&self.sys
|
||||
}
|
||||
|
@ -150,3 +199,173 @@ impl fmt::Debug for SystemConfig {
|
|||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) enum SystemCommand {
|
||||
Exit(i32),
|
||||
RegisterArbiter(Id, Arbiter),
|
||||
UnregisterArbiter(Id),
|
||||
}
|
||||
|
||||
pub(super) struct SystemSupport {
|
||||
stop: Option<oneshot::Sender<i32>>,
|
||||
commands: Receiver<SystemCommand>,
|
||||
ping_interval: Duration,
|
||||
}
|
||||
|
||||
impl SystemSupport {
|
||||
pub(super) fn new(
|
||||
stop: oneshot::Sender<i32>,
|
||||
commands: Receiver<SystemCommand>,
|
||||
ping_interval: usize,
|
||||
) -> Self {
|
||||
Self {
|
||||
commands,
|
||||
stop: Some(stop),
|
||||
ping_interval: Duration::from_millis(ping_interval as u64),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn run(mut self) {
|
||||
ARBITERS.with(move |arbs| {
|
||||
let mut arbiters = arbs.borrow_mut();
|
||||
arbiters.all.clear();
|
||||
arbiters.list.clear();
|
||||
});
|
||||
|
||||
loop {
|
||||
match self.commands.recv().await {
|
||||
Ok(SystemCommand::Exit(code)) => {
|
||||
log::debug!("Stopping system with {} code", code);
|
||||
|
||||
// stop arbiters
|
||||
ARBITERS.with(move |arbs| {
|
||||
let mut arbiters = arbs.borrow_mut();
|
||||
for arb in arbiters.list.drain(..) {
|
||||
arb.stop();
|
||||
}
|
||||
arbiters.all.clear();
|
||||
});
|
||||
|
||||
// stop event loop
|
||||
if let Some(stop) = self.stop.take() {
|
||||
let _ = stop.send(code);
|
||||
}
|
||||
}
|
||||
Ok(SystemCommand::RegisterArbiter(id, hnd)) => {
|
||||
crate::spawn(ping_arbiter(hnd.clone(), self.ping_interval));
|
||||
ARBITERS.with(move |arbs| {
|
||||
let mut arbiters = arbs.borrow_mut();
|
||||
arbiters.all.insert(id, hnd.clone());
|
||||
arbiters.list.push(hnd);
|
||||
});
|
||||
}
|
||||
Ok(SystemCommand::UnregisterArbiter(id)) => {
|
||||
ARBITERS.with(move |arbs| {
|
||||
let mut arbiters = arbs.borrow_mut();
|
||||
if let Some(hnd) = arbiters.all.remove(&id) {
|
||||
for (idx, arb) in arbiters.list.iter().enumerate() {
|
||||
if &hnd == arb {
|
||||
arbiters.list.remove(idx);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
Err(_) => {
|
||||
log::debug!("System stopped");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct PingRecord {
|
||||
/// Ping start time
|
||||
pub start: Instant,
|
||||
/// Round-trip time, if value is not set then ping is in process
|
||||
pub rtt: Option<Duration>,
|
||||
}
|
||||
|
||||
async fn ping_arbiter(arb: Arbiter, interval: Duration) {
|
||||
loop {
|
||||
Delay::new(interval).await;
|
||||
|
||||
// check if arbiter is still active
|
||||
let is_alive = ARBITERS.with(|arbs| arbs.borrow().all.contains_key(&arb.id()));
|
||||
|
||||
if !is_alive {
|
||||
PINGS.with(|pings| pings.borrow_mut().remove(&arb.id()));
|
||||
break;
|
||||
}
|
||||
|
||||
// calc ttl
|
||||
let start = Instant::now();
|
||||
PINGS.with(|pings| {
|
||||
let mut p = pings.borrow_mut();
|
||||
let recs = p.entry(arb.id()).or_default();
|
||||
recs.push_front(PingRecord { start, rtt: None });
|
||||
recs.truncate(10);
|
||||
});
|
||||
|
||||
let result = arb
|
||||
.spawn_with(|| async {
|
||||
yield_to().await;
|
||||
})
|
||||
.await;
|
||||
|
||||
if result.is_err() {
|
||||
break;
|
||||
}
|
||||
|
||||
PINGS.with(|pings| {
|
||||
pings
|
||||
.borrow_mut()
|
||||
.get_mut(&arb.id())
|
||||
.unwrap()
|
||||
.front_mut()
|
||||
.unwrap()
|
||||
.rtt = Some(Instant::now() - start);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async fn yield_to() {
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
struct Yield {
|
||||
completed: bool,
|
||||
}
|
||||
|
||||
impl Future for Yield {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
|
||||
if self.completed {
|
||||
return Poll::Ready(());
|
||||
}
|
||||
self.completed = true;
|
||||
cx.waker().wake_by_ref();
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
Yield { completed: false }.await;
|
||||
}
|
||||
|
||||
pub(super) trait FnExec: Send + 'static {
|
||||
fn call_box(self: Box<Self>);
|
||||
}
|
||||
|
||||
impl<F> FnExec for F
|
||||
where
|
||||
F: FnOnce() + Send + 'static,
|
||||
{
|
||||
#[allow(clippy::boxed_local)]
|
||||
fn call_box(self: Box<Self>) {
|
||||
(*self)()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,17 @@
|
|||
# Changes
|
||||
|
||||
## [2.7.3] - 2025-03-28
|
||||
|
||||
* Better worker availability handling
|
||||
|
||||
## [2.7.2] - 2025-03-27
|
||||
|
||||
* Handle paused state
|
||||
|
||||
## [2.7.1] - 2025-02-28
|
||||
|
||||
* Fix set core affinity out of worker start #508
|
||||
|
||||
## [2.7.0] - 2025-01-31
|
||||
|
||||
* Cpu affinity support for workers
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "ntex-server"
|
||||
version = "2.7.0"
|
||||
version = "2.7.4"
|
||||
authors = ["ntex contributors <team@ntex.rs>"]
|
||||
description = "Server for ntex framework"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
|
@ -22,13 +22,13 @@ ntex-service = "3.4"
|
|||
ntex-rt = "0.4"
|
||||
ntex-util = "2.8"
|
||||
|
||||
async-channel = "2"
|
||||
async-broadcast = "0.7"
|
||||
core_affinity = "0.8"
|
||||
polling = "3.3"
|
||||
log = "0.4"
|
||||
socket2 = "0.5"
|
||||
oneshot = { version = "0.1", default-features = false, features = ["async"] }
|
||||
async-channel = { workspace = true }
|
||||
atomic-waker = { workspace = true }
|
||||
core_affinity = { workspace = true }
|
||||
oneshot = { workspace = true }
|
||||
polling = { workspace = true }
|
||||
log = { workspace = true }
|
||||
socket2 = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
ntex = "2"
|
||||
|
|
|
@ -139,14 +139,7 @@ impl<F: ServerConfiguration> ServerManager<F> {
|
|||
fn start_worker<F: ServerConfiguration>(mgr: ServerManager<F>, cid: Option<CoreId>) {
|
||||
let _ = ntex_rt::spawn(async move {
|
||||
let id = mgr.next_id();
|
||||
|
||||
if let Some(cid) = cid {
|
||||
if core_affinity::set_for_current(cid) {
|
||||
log::info!("Set affinity to {:?} for worker {:?}", cid, id);
|
||||
}
|
||||
}
|
||||
|
||||
let mut wrk = Worker::start(id, mgr.factory());
|
||||
let mut wrk = Worker::start(id, mgr.factory(), cid);
|
||||
|
||||
loop {
|
||||
match wrk.status() {
|
||||
|
@ -156,7 +149,7 @@ fn start_worker<F: ServerConfiguration>(mgr: ServerManager<F>, cid: Option<CoreI
|
|||
mgr.unavailable(wrk);
|
||||
sleep(RESTART_DELAY).await;
|
||||
if !mgr.stopping() {
|
||||
wrk = Worker::start(id, mgr.factory());
|
||||
wrk = Worker::start(id, mgr.factory(), cid);
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
@ -187,7 +180,7 @@ impl<F: ServerConfiguration> HandleCmdState<F> {
|
|||
fn process(&mut self, mut item: F::Item) {
|
||||
loop {
|
||||
if !self.workers.is_empty() {
|
||||
if self.next > self.workers.len() {
|
||||
if self.next >= self.workers.len() {
|
||||
self.next = self.workers.len() - 1;
|
||||
}
|
||||
match self.workers[self.next].send(item) {
|
||||
|
@ -218,10 +211,9 @@ impl<F: ServerConfiguration> HandleCmdState<F> {
|
|||
match upd {
|
||||
Update::Available(worker) => {
|
||||
self.workers.push(worker);
|
||||
self.workers.sort();
|
||||
if self.workers.len() == 1 {
|
||||
self.mgr.resume();
|
||||
} else {
|
||||
self.workers.sort();
|
||||
}
|
||||
}
|
||||
Update::Unavailable(worker) => {
|
||||
|
@ -240,6 +232,9 @@ impl<F: ServerConfiguration> HandleCmdState<F> {
|
|||
if let Err(item) = self.workers[0].send(item) {
|
||||
self.backlog.push_back(item);
|
||||
self.workers.remove(0);
|
||||
if self.workers.is_empty() {
|
||||
self.mgr.pause();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -92,12 +92,14 @@ impl AcceptLoop {
|
|||
|
||||
/// Start accept loop
|
||||
pub fn start(mut self, socks: Vec<(Token, Listener)>, srv: Server) {
|
||||
let (tx, rx_start) = oneshot::channel();
|
||||
let (rx, poll) = self
|
||||
.inner
|
||||
.take()
|
||||
.expect("AcceptLoop cannot be used multiple times");
|
||||
|
||||
Accept::start(
|
||||
tx,
|
||||
rx,
|
||||
poll,
|
||||
socks,
|
||||
|
@ -105,6 +107,8 @@ impl AcceptLoop {
|
|||
self.notify.clone(),
|
||||
self.status_handler.take(),
|
||||
);
|
||||
|
||||
let _ = rx_start.recv();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -121,6 +125,7 @@ impl fmt::Debug for AcceptLoop {
|
|||
struct Accept {
|
||||
poller: Arc<Poller>,
|
||||
rx: mpsc::Receiver<AcceptorCommand>,
|
||||
tx: Option<oneshot::Sender<()>>,
|
||||
sockets: Vec<ServerSocketInfo>,
|
||||
srv: Server,
|
||||
notify: AcceptNotify,
|
||||
|
@ -131,6 +136,7 @@ struct Accept {
|
|||
|
||||
impl Accept {
|
||||
fn start(
|
||||
tx: oneshot::Sender<()>,
|
||||
rx: mpsc::Receiver<AcceptorCommand>,
|
||||
poller: Arc<Poller>,
|
||||
socks: Vec<(Token, Listener)>,
|
||||
|
@ -145,11 +151,12 @@ impl Accept {
|
|||
.name("ntex-server accept loop".to_owned())
|
||||
.spawn(move || {
|
||||
System::set_current(sys);
|
||||
Accept::new(rx, poller, socks, srv, notify, status_handler).poll()
|
||||
Accept::new(tx, rx, poller, socks, srv, notify, status_handler).poll()
|
||||
});
|
||||
}
|
||||
|
||||
fn new(
|
||||
tx: oneshot::Sender<()>,
|
||||
rx: mpsc::Receiver<AcceptorCommand>,
|
||||
poller: Arc<Poller>,
|
||||
socks: Vec<(Token, Listener)>,
|
||||
|
@ -175,6 +182,7 @@ impl Accept {
|
|||
notify,
|
||||
srv,
|
||||
status_handler,
|
||||
tx: Some(tx),
|
||||
backpressure: true,
|
||||
backlog: VecDeque::new(),
|
||||
}
|
||||
|
@ -192,19 +200,23 @@ impl Accept {
|
|||
// Create storage for events
|
||||
let mut events = Events::with_capacity(NonZeroUsize::new(512).unwrap());
|
||||
|
||||
let mut timeout = Some(Duration::ZERO);
|
||||
loop {
|
||||
if let Err(e) = self.poller.wait(&mut events, None) {
|
||||
if e.kind() == io::ErrorKind::Interrupted {
|
||||
continue;
|
||||
} else {
|
||||
if let Err(e) = self.poller.wait(&mut events, timeout) {
|
||||
if e.kind() != io::ErrorKind::Interrupted {
|
||||
panic!("Cannot wait for events in poller: {}", e)
|
||||
}
|
||||
} else if timeout.is_some() {
|
||||
timeout = None;
|
||||
let _ = self.tx.take().unwrap().send(());
|
||||
}
|
||||
|
||||
for event in events.iter() {
|
||||
let readd = self.accept(event.key);
|
||||
if readd {
|
||||
self.add_source(event.key);
|
||||
for idx in 0..self.sockets.len() {
|
||||
if self.sockets[idx].registered.get() {
|
||||
let readd = self.accept(idx);
|
||||
if readd {
|
||||
self.add_source(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -59,8 +59,13 @@ where
|
|||
.workers(1)
|
||||
.disable_signals()
|
||||
.run();
|
||||
tx.send((system, local_addr, server))
|
||||
.expect("Failed to send Server to TestServer");
|
||||
|
||||
ntex_rt::spawn(async move {
|
||||
ntex_util::time::sleep(ntex_util::time::Millis(75)).await;
|
||||
tx.send((system, local_addr, server))
|
||||
.expect("Failed to send Server to TestServer");
|
||||
});
|
||||
|
||||
Ok(())
|
||||
})
|
||||
});
|
||||
|
|
|
@ -23,9 +23,14 @@ impl Default for WorkerPool {
|
|||
impl WorkerPool {
|
||||
/// Create new Server builder instance
|
||||
pub fn new() -> Self {
|
||||
let num = core_affinity::get_core_ids()
|
||||
.map(|v| v.len())
|
||||
.unwrap_or_else(|| {
|
||||
std::thread::available_parallelism().map_or(2, std::num::NonZeroUsize::get)
|
||||
});
|
||||
|
||||
WorkerPool {
|
||||
num: std::thread::available_parallelism()
|
||||
.map_or(2, std::num::NonZeroUsize::get),
|
||||
num,
|
||||
no_signals: false,
|
||||
stop_runtime: false,
|
||||
shutdown_timeout: DEFAULT_SHUTDOWN_TIMEOUT,
|
||||
|
|
|
@ -2,8 +2,9 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
|||
use std::task::{ready, Context, Poll};
|
||||
use std::{cmp, future::poll_fn, future::Future, hash, pin::Pin, sync::Arc};
|
||||
|
||||
use async_broadcast::{self as bus, broadcast};
|
||||
use async_channel::{unbounded, Receiver, Sender};
|
||||
use atomic_waker::AtomicWaker;
|
||||
use core_affinity::CoreId;
|
||||
|
||||
use ntex_rt::{spawn, Arbiter};
|
||||
use ntex_service::{Pipeline, PipelineBinding, Service, ServiceFactory};
|
||||
|
@ -77,7 +78,7 @@ pub struct WorkerStop(oneshot::Receiver<bool>);
|
|||
|
||||
impl<T> Worker<T> {
|
||||
/// Start worker.
|
||||
pub fn start<F>(id: WorkerId, cfg: F) -> Worker<T>
|
||||
pub fn start<F>(id: WorkerId, cfg: F, cid: Option<CoreId>) -> Worker<T>
|
||||
where
|
||||
T: Send + 'static,
|
||||
F: ServerConfiguration<Item = T>,
|
||||
|
@ -87,15 +88,21 @@ impl<T> Worker<T> {
|
|||
let (avail, avail_tx) = WorkerAvailability::create();
|
||||
|
||||
Arbiter::default().exec_fn(move || {
|
||||
if let Some(cid) = cid {
|
||||
if core_affinity::set_for_current(cid) {
|
||||
log::info!("Set affinity to {:?} for worker {:?}", cid, id);
|
||||
}
|
||||
}
|
||||
|
||||
let _ = spawn(async move {
|
||||
log::info!("Starting worker {:?}", id);
|
||||
|
||||
log::debug!("Creating server instance in {:?}", id);
|
||||
let factory = cfg.create().await;
|
||||
log::debug!("Server instance has been created in {:?}", id);
|
||||
|
||||
match create(id, rx1, rx2, factory, avail_tx).await {
|
||||
Ok((svc, wrk)) => {
|
||||
log::debug!("Server instance has been created in {:?}", id);
|
||||
run_worker(svc, wrk).await;
|
||||
}
|
||||
Err(e) => {
|
||||
|
@ -144,10 +151,8 @@ impl<T> Worker<T> {
|
|||
if self.failed.load(Ordering::Acquire) {
|
||||
WorkerStatus::Failed
|
||||
} else {
|
||||
// cleanup updates
|
||||
while self.avail.notify.try_recv().is_ok() {}
|
||||
|
||||
if self.avail.notify.recv_direct().await.is_err() {
|
||||
self.avail.wait_for_update().await;
|
||||
if self.avail.failed() {
|
||||
self.failed.store(true, Ordering::Release);
|
||||
}
|
||||
self.status()
|
||||
|
@ -189,52 +194,85 @@ impl Future for WorkerStop {
|
|||
|
||||
#[derive(Debug, Clone)]
|
||||
struct WorkerAvailability {
|
||||
notify: bus::Receiver<()>,
|
||||
available: Arc<AtomicBool>,
|
||||
inner: Arc<Inner>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct WorkerAvailabilityTx {
|
||||
notify: bus::Sender<()>,
|
||||
available: Arc<AtomicBool>,
|
||||
inner: Arc<Inner>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Inner {
|
||||
waker: AtomicWaker,
|
||||
updated: AtomicBool,
|
||||
available: AtomicBool,
|
||||
failed: AtomicBool,
|
||||
}
|
||||
|
||||
impl WorkerAvailability {
|
||||
fn create() -> (Self, WorkerAvailabilityTx) {
|
||||
let (mut tx, rx) = broadcast(16);
|
||||
tx.set_overflow(true);
|
||||
let inner = Arc::new(Inner {
|
||||
waker: AtomicWaker::new(),
|
||||
updated: AtomicBool::new(false),
|
||||
available: AtomicBool::new(false),
|
||||
failed: AtomicBool::new(false),
|
||||
});
|
||||
|
||||
let avail = WorkerAvailability {
|
||||
notify: rx,
|
||||
available: Arc::new(AtomicBool::new(false)),
|
||||
};
|
||||
let avail_tx = WorkerAvailabilityTx {
|
||||
notify: tx,
|
||||
available: avail.available.clone(),
|
||||
inner: inner.clone(),
|
||||
};
|
||||
let avail_tx = WorkerAvailabilityTx { inner };
|
||||
(avail, avail_tx)
|
||||
}
|
||||
|
||||
fn failed(&self) -> bool {
|
||||
self.inner.failed.load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
fn available(&self) -> bool {
|
||||
self.available.load(Ordering::Acquire)
|
||||
self.inner.available.load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
async fn wait_for_update(&self) {
|
||||
poll_fn(|cx| {
|
||||
if self.inner.updated.load(Ordering::Acquire) {
|
||||
self.inner.updated.store(false, Ordering::Release);
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
self.inner.waker.register(cx.waker());
|
||||
Poll::Pending
|
||||
}
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
impl WorkerAvailabilityTx {
|
||||
fn set(&self, val: bool) {
|
||||
let old = self.available.swap(val, Ordering::Release);
|
||||
if !old && val {
|
||||
let _ = self.notify.try_broadcast(());
|
||||
let old = self.inner.available.swap(val, Ordering::Release);
|
||||
if old != val {
|
||||
self.inner.updated.store(true, Ordering::Release);
|
||||
self.inner.waker.wake();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for WorkerAvailabilityTx {
|
||||
fn drop(&mut self) {
|
||||
self.inner.failed.store(true, Ordering::Release);
|
||||
self.inner.updated.store(true, Ordering::Release);
|
||||
self.inner.available.store(false, Ordering::Release);
|
||||
self.inner.waker.wake();
|
||||
}
|
||||
}
|
||||
|
||||
/// Service worker
|
||||
///
|
||||
/// Worker accepts message via unbounded channel and starts processing.
|
||||
struct WorkerSt<T, F: ServiceFactory<T>> {
|
||||
id: WorkerId,
|
||||
rx: Pin<Box<dyn Stream<Item = T>>>,
|
||||
rx: Receiver<T>,
|
||||
stop: Pin<Box<dyn Stream<Item = Shutdown>>>,
|
||||
factory: F,
|
||||
availability: WorkerAvailabilityTx,
|
||||
|
@ -246,25 +284,43 @@ where
|
|||
F: ServiceFactory<T> + 'static,
|
||||
{
|
||||
loop {
|
||||
let mut recv = std::pin::pin!(wrk.rx.recv());
|
||||
let fut = poll_fn(|cx| {
|
||||
ready!(svc.poll_ready(cx)?);
|
||||
|
||||
if let Some(item) = ready!(Pin::new(&mut wrk.rx).poll_next(cx)) {
|
||||
let fut = svc.call(item);
|
||||
let _ = spawn(async move {
|
||||
let _ = fut.await;
|
||||
});
|
||||
match svc.poll_ready(cx) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
wrk.availability.set(true);
|
||||
}
|
||||
Poll::Ready(Err(err)) => {
|
||||
wrk.availability.set(false);
|
||||
return Poll::Ready(Err(err));
|
||||
}
|
||||
Poll::Pending => {
|
||||
wrk.availability.set(false);
|
||||
return Poll::Pending;
|
||||
}
|
||||
}
|
||||
|
||||
match ready!(recv.as_mut().poll(cx)) {
|
||||
Ok(item) => {
|
||||
let fut = svc.call(item);
|
||||
let _ = spawn(async move {
|
||||
let _ = fut.await;
|
||||
});
|
||||
Poll::Ready(Ok::<_, F::Error>(true))
|
||||
}
|
||||
Err(_) => {
|
||||
log::error!("Server is gone");
|
||||
Poll::Ready(Ok(false))
|
||||
}
|
||||
}
|
||||
Poll::Ready(Ok::<(), F::Error>(()))
|
||||
});
|
||||
|
||||
match select(fut, stream_recv(&mut wrk.stop)).await {
|
||||
Either::Left(Ok(())) => continue,
|
||||
Either::Left(Ok(true)) => continue,
|
||||
Either::Left(Err(_)) => {
|
||||
let _ = ntex_rt::spawn(async move {
|
||||
svc.shutdown().await;
|
||||
});
|
||||
wrk.availability.set(false);
|
||||
}
|
||||
Either::Right(Some(Shutdown { timeout, result })) => {
|
||||
wrk.availability.set(false);
|
||||
|
@ -278,7 +334,8 @@ where
|
|||
stop_svc(wrk.id, svc, timeout, Some(result)).await;
|
||||
return;
|
||||
}
|
||||
Either::Right(None) => {
|
||||
Either::Left(Ok(false)) | Either::Right(None) => {
|
||||
wrk.availability.set(false);
|
||||
stop_svc(wrk.id, svc, STOP_TIMEOUT, None).await;
|
||||
return;
|
||||
}
|
||||
|
@ -288,7 +345,6 @@ where
|
|||
loop {
|
||||
match select(wrk.factory.create(()), stream_recv(&mut wrk.stop)).await {
|
||||
Either::Left(Ok(service)) => {
|
||||
wrk.availability.set(true);
|
||||
svc = Pipeline::new(service).bind();
|
||||
break;
|
||||
}
|
||||
|
@ -329,8 +385,6 @@ where
|
|||
{
|
||||
availability.set(false);
|
||||
let factory = factory?;
|
||||
|
||||
let rx = Box::pin(rx);
|
||||
let mut stop = Box::pin(stop);
|
||||
|
||||
let svc = match select(factory.create(()), stream_recv(&mut stop)).await {
|
||||
|
@ -349,9 +403,9 @@ where
|
|||
svc,
|
||||
WorkerSt {
|
||||
id,
|
||||
rx,
|
||||
factory,
|
||||
availability,
|
||||
rx: Box::pin(rx),
|
||||
stop: Box::pin(stop),
|
||||
},
|
||||
))
|
||||
|
|
|
@ -13,9 +13,8 @@ async fn main() -> io::Result<()> {
|
|||
println!("Started openssl echp server: 127.0.0.1:8443");
|
||||
|
||||
// load ssl keys
|
||||
let cert_file =
|
||||
&mut BufReader::new(File::open("../ntex-tls/examples/cert.pem").unwrap());
|
||||
let key_file = &mut BufReader::new(File::open("../ntex-tls/examples/key.pem").unwrap());
|
||||
let cert_file = &mut BufReader::new(File::open("../examples/cert.pem").unwrap());
|
||||
let key_file = &mut BufReader::new(File::open("../examples/key.pem").unwrap());
|
||||
let keys = rustls_pemfile::private_key(key_file).unwrap().unwrap();
|
||||
let cert_chain = rustls_pemfile::certs(cert_file)
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
|
|
|
@ -8,18 +8,18 @@ use tls_openssl::ssl::{self, SslFiletype, SslMethod};
|
|||
|
||||
#[ntex::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
//std::env::set_var("RUST_LOG", "trace");
|
||||
//env_logger::init();
|
||||
std::env::set_var("RUST_LOG", "trace");
|
||||
let _ = env_logger::try_init();
|
||||
|
||||
println!("Started openssl web server: 127.0.0.1:8443");
|
||||
|
||||
// load ssl keys
|
||||
let mut builder = ssl::SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
|
||||
builder
|
||||
.set_private_key_file("../tests/key.pem", SslFiletype::PEM)
|
||||
.set_private_key_file("./examples/key.pem", SslFiletype::PEM)
|
||||
.unwrap();
|
||||
builder
|
||||
.set_certificate_chain_file("../tests/cert.pem")
|
||||
.set_certificate_chain_file("./examples/cert.pem")
|
||||
.unwrap();
|
||||
|
||||
// h2 alpn config
|
||||
|
|
|
@ -1,5 +1,11 @@
|
|||
# Changes
|
||||
|
||||
## [2.10.0] - 2025-03-12
|
||||
|
||||
* Add "Inplace" channel
|
||||
|
||||
* Expose "yield_to" helper
|
||||
|
||||
## [2.9.0] - 2025-01-15
|
||||
|
||||
* Add EitherService/EitherServiceFactory
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "ntex-util"
|
||||
version = "2.9.0"
|
||||
version = "2.10.0"
|
||||
authors = ["ntex contributors <team@ntex.rs>"]
|
||||
description = "Utilities for ntex framework"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
|
|
81
ntex-util/src/channel/inplace.rs
Normal file
81
ntex-util/src/channel/inplace.rs
Normal file
|
@ -0,0 +1,81 @@
|
|||
//! A futures-aware bounded(1) channel.
|
||||
use std::{cell::Cell, fmt, future::poll_fn, task::Context, task::Poll};
|
||||
|
||||
use crate::task::LocalWaker;
|
||||
|
||||
/// Creates a new futures-aware, channel.
|
||||
pub fn channel<T>() -> Inplace<T> {
|
||||
Inplace {
|
||||
value: Cell::new(None),
|
||||
rx_task: LocalWaker::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// A futures-aware bounded(1) channel.
|
||||
pub struct Inplace<T> {
|
||||
value: Cell<Option<T>>,
|
||||
rx_task: LocalWaker,
|
||||
}
|
||||
|
||||
// The channels do not ever project Pin to the inner T
|
||||
impl<T> Unpin for Inplace<T> {}
|
||||
|
||||
impl<T> fmt::Debug for Inplace<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Inplace<T>")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Inplace<T> {
|
||||
/// Set a successful result.
|
||||
///
|
||||
/// If the value is successfully enqueued for the remote end to receive,
|
||||
/// then `Ok(())` is returned. If previose value is not consumed
|
||||
/// then `Err` is returned with the value provided.
|
||||
pub fn send(&self, val: T) -> Result<(), T> {
|
||||
if let Some(v) = self.value.take() {
|
||||
self.value.set(Some(v));
|
||||
Err(val)
|
||||
} else {
|
||||
self.value.set(Some(val));
|
||||
self.rx_task.wake();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Wait until the oneshot is ready and return value
|
||||
pub async fn recv(&self) -> T {
|
||||
poll_fn(|cx| self.poll_recv(cx)).await
|
||||
}
|
||||
|
||||
/// Polls the oneshot to determine if value is ready
|
||||
pub fn poll_recv(&self, cx: &mut Context<'_>) -> Poll<T> {
|
||||
// If we've got a value, then skip the logic below as we're done.
|
||||
if let Some(val) = self.value.take() {
|
||||
return Poll::Ready(val);
|
||||
}
|
||||
|
||||
// Check if sender is dropped and return error if it is.
|
||||
self.rx_task.register(cx.waker());
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::future::lazy;
|
||||
|
||||
#[ntex_macros::rt_test2]
|
||||
async fn test_inplace() {
|
||||
let ch = channel();
|
||||
assert_eq!(lazy(|cx| ch.poll_recv(cx)).await, Poll::Pending);
|
||||
|
||||
assert!(ch.send(1).is_ok());
|
||||
assert!(ch.send(2) == Err(2));
|
||||
assert_eq!(lazy(|cx| ch.poll_recv(cx)).await, Poll::Ready(1));
|
||||
|
||||
assert!(ch.send(1).is_ok());
|
||||
assert_eq!(ch.recv().await, 1);
|
||||
}
|
||||
}
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
mod cell;
|
||||
pub mod condition;
|
||||
pub mod inplace;
|
||||
pub mod mpsc;
|
||||
pub mod oneshot;
|
||||
pub mod pool;
|
||||
|
|
|
@ -91,7 +91,6 @@ impl fmt::Debug for LocalWaker {
|
|||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
/// Yields execution back to the current runtime.
|
||||
pub async fn yield_to() {
|
||||
use std::{future::Future, pin::Pin, task::Context, task::Poll};
|
||||
|
|
|
@ -1,5 +1,37 @@
|
|||
# Changes
|
||||
|
||||
## [2.12.4] - 2025-03-28
|
||||
|
||||
* http: Return PayloadError::Incomplete on server disconnect
|
||||
|
||||
* web: Expose WebStack for external wrapper support in downstream crates #542
|
||||
|
||||
## [2.12.3] - 2025-03-22
|
||||
|
||||
* web: Export web::app_service::AppService #534
|
||||
|
||||
* http: Add delay for test server availability, could cause connect race
|
||||
|
||||
## [2.12.2] - 2025-03-15
|
||||
|
||||
* http: Allow to run publish future to completion in case error
|
||||
|
||||
* http: Remove brotli support
|
||||
|
||||
## [2.12.1] - 2025-03-14
|
||||
|
||||
* Allow to disable test logging (no-test-logging features)
|
||||
|
||||
## [2.12.0] - 2025-03-12
|
||||
|
||||
* Add neon runtime support
|
||||
|
||||
* Check test server availability before using it
|
||||
|
||||
* Drop glommio support
|
||||
|
||||
* Drop async-std support
|
||||
|
||||
## [2.11.0] - 2025-01-31
|
||||
|
||||
* Cpu affinity support for server
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "ntex"
|
||||
version = "2.11.0"
|
||||
version = "2.12.4"
|
||||
authors = ["ntex contributors <team@ntex.rs>"]
|
||||
description = "Framework for composable network services"
|
||||
readme = "README.md"
|
||||
|
@ -18,7 +18,7 @@ edition = "2021"
|
|||
rust-version = "1.75"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
features = ["tokio", "openssl", "rustls", "compress", "cookie", "ws", "brotli", "ntex-tls/rustls-ring"]
|
||||
features = ["tokio", "openssl", "rustls", "compress", "cookie", "ws", "ntex-tls/rustls-ring"]
|
||||
|
||||
[lib]
|
||||
name = "ntex"
|
||||
|
@ -45,20 +45,20 @@ url = ["url-pkg"]
|
|||
# tokio runtime
|
||||
tokio = ["ntex-net/tokio"]
|
||||
|
||||
# glommio runtime
|
||||
glommio = ["ntex-net/glommio"]
|
||||
|
||||
# async-std runtime
|
||||
async-std = ["ntex-net/async-std"]
|
||||
|
||||
# compio runtime
|
||||
compio = ["ntex-net/compio"]
|
||||
|
||||
# neon runtime
|
||||
neon = ["ntex-net/neon"]
|
||||
|
||||
# neon runtime
|
||||
neon-uring = ["ntex-net/neon", "ntex-net/io-uring"]
|
||||
|
||||
# websocket support
|
||||
ws = ["dep:sha-1"]
|
||||
|
||||
# brotli2 support
|
||||
brotli = ["dep:brotli2"]
|
||||
# disable [ntex::test] logging configuration
|
||||
no-test-logging = []
|
||||
|
||||
[dependencies]
|
||||
ntex-codec = "0.6"
|
||||
|
@ -68,11 +68,11 @@ ntex-service = "3.4"
|
|||
ntex-macros = "0.1"
|
||||
ntex-util = "2.8"
|
||||
ntex-bytes = "0.1.27"
|
||||
ntex-server = "2.7"
|
||||
ntex-h2 = "1.8.1"
|
||||
ntex-rt = "0.4.22"
|
||||
ntex-io = "2.9"
|
||||
ntex-net = "2.4"
|
||||
ntex-server = "2.7.4"
|
||||
ntex-h2 = "1.8.6"
|
||||
ntex-rt = "0.4.27"
|
||||
ntex-io = "2.11"
|
||||
ntex-net = "2.5.10"
|
||||
ntex-tls = "2.3"
|
||||
|
||||
base64 = "0.22"
|
||||
|
@ -83,6 +83,7 @@ pin-project-lite = "0.2"
|
|||
regex = { version = "1.11", default-features = false, features = ["std"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
sha-1 = { version = "0.10", optional = true }
|
||||
env_logger = { version = "0.11", default-features = false }
|
||||
thiserror = "1"
|
||||
nanorand = { version = "0.7", default-features = false, features = [
|
||||
"std",
|
||||
|
@ -108,13 +109,12 @@ tls-rustls = { version = "0.23", package = "rustls", optional = true, default-fe
|
|||
webpki-roots = { version = "0.26", optional = true }
|
||||
|
||||
# compression
|
||||
brotli2 = { version = "0.3.2", optional = true }
|
||||
flate2 = { version = "1.0", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
env_logger = "0.11"
|
||||
rand = "0.8"
|
||||
time = "0.3"
|
||||
oneshot = "0.1"
|
||||
futures-util = "0.3"
|
||||
tls-openssl = { version = "0.10", package = "openssl" }
|
||||
tls-rustls = { version = "0.23", package = "rustls", features = ["ring", "std"], default-features = false }
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
use std::{
|
||||
future::poll_fn, io, io::Write, pin::Pin, task::Context, task::Poll, time::Instant,
|
||||
};
|
||||
use std::{future::poll_fn, io, io::Write, pin::Pin, task, task::Poll, time::Instant};
|
||||
|
||||
use crate::http::body::{BodySize, MessageBody};
|
||||
use crate::http::error::PayloadError;
|
||||
use crate::http::h1;
|
||||
use crate::http::header::{HeaderMap, HeaderValue, HOST};
|
||||
use crate::http::message::{RequestHeadType, ResponseHead};
|
||||
use crate::http::payload::{Payload, PayloadStream};
|
||||
use crate::http::{h1, Version};
|
||||
use crate::io::{IoBoxed, RecvError};
|
||||
use crate::time::{timeout_checked, Millis};
|
||||
use crate::util::{ready, BufMut, Bytes, BytesMut, Stream};
|
||||
|
@ -101,7 +99,13 @@ where
|
|||
Ok((head, Payload::None))
|
||||
}
|
||||
_ => {
|
||||
let pl: PayloadStream = Box::pin(PlStream::new(io, codec, created, pool));
|
||||
let pl: PayloadStream = Box::pin(PlStream::new(
|
||||
io,
|
||||
codec,
|
||||
created,
|
||||
pool,
|
||||
head.version == Version::HTTP_10,
|
||||
));
|
||||
Ok((head, pl.into()))
|
||||
}
|
||||
}
|
||||
|
@ -137,6 +141,7 @@ pub(super) struct PlStream {
|
|||
io: Option<IoBoxed>,
|
||||
codec: h1::ClientPayloadCodec,
|
||||
created: Instant,
|
||||
http_10: bool,
|
||||
pool: Option<Acquired>,
|
||||
}
|
||||
|
||||
|
@ -146,12 +151,14 @@ impl PlStream {
|
|||
codec: h1::ClientCodec,
|
||||
created: Instant,
|
||||
pool: Option<Acquired>,
|
||||
http_10: bool,
|
||||
) -> Self {
|
||||
PlStream {
|
||||
io: Some(io),
|
||||
codec: codec.into_payload_codec(),
|
||||
created,
|
||||
pool,
|
||||
http_10,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -161,41 +168,46 @@ impl Stream for PlStream {
|
|||
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
cx: &mut task::Context<'_>,
|
||||
) -> Poll<Option<Self::Item>> {
|
||||
let mut this = self.as_mut();
|
||||
loop {
|
||||
return Poll::Ready(Some(
|
||||
match ready!(this.io.as_ref().unwrap().poll_recv(&this.codec, cx)) {
|
||||
Ok(chunk) => {
|
||||
if let Some(chunk) = chunk {
|
||||
Ok(chunk)
|
||||
} else {
|
||||
release_connection(
|
||||
this.io.take().unwrap(),
|
||||
!this.codec.keepalive(),
|
||||
this.created,
|
||||
this.pool.take(),
|
||||
);
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
let item = ready!(this.io.as_ref().unwrap().poll_recv(&this.codec, cx));
|
||||
return Poll::Ready(Some(match item {
|
||||
Ok(chunk) => {
|
||||
if let Some(chunk) = chunk {
|
||||
Ok(chunk)
|
||||
} else {
|
||||
release_connection(
|
||||
this.io.take().unwrap(),
|
||||
!this.codec.keepalive(),
|
||||
this.created,
|
||||
this.pool.take(),
|
||||
);
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
Err(RecvError::KeepAlive) => {
|
||||
Err(io::Error::new(io::ErrorKind::TimedOut, "Keep-alive").into())
|
||||
}
|
||||
Err(RecvError::KeepAlive) => {
|
||||
Err(io::Error::new(io::ErrorKind::TimedOut, "Keep-alive").into())
|
||||
}
|
||||
Err(RecvError::Stop) => {
|
||||
Err(io::Error::new(io::ErrorKind::Other, "Dispatcher stopped").into())
|
||||
}
|
||||
Err(RecvError::WriteBackpressure) => {
|
||||
ready!(this.io.as_ref().unwrap().poll_flush(cx, false))?;
|
||||
continue;
|
||||
}
|
||||
Err(RecvError::Decoder(err)) => Err(err),
|
||||
Err(RecvError::PeerGone(Some(err))) => {
|
||||
Err(PayloadError::Incomplete(Some(err)))
|
||||
}
|
||||
Err(RecvError::PeerGone(None)) => {
|
||||
if this.http_10 {
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
Err(RecvError::Stop) => {
|
||||
Err(io::Error::new(io::ErrorKind::Other, "Dispatcher stopped")
|
||||
.into())
|
||||
}
|
||||
Err(RecvError::WriteBackpressure) => {
|
||||
ready!(this.io.as_ref().unwrap().poll_flush(cx, false))?;
|
||||
continue;
|
||||
}
|
||||
Err(RecvError::Decoder(err)) => Err(err),
|
||||
Err(RecvError::PeerGone(Some(err))) => Err(err.into()),
|
||||
Err(RecvError::PeerGone(None)) => return Poll::Ready(None),
|
||||
},
|
||||
));
|
||||
Err(PayloadError::Incomplete(None))
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -387,8 +387,8 @@ impl Future for ReadBody {
|
|||
let this = self.get_mut();
|
||||
|
||||
loop {
|
||||
return match Pin::new(&mut this.stream).poll_next(cx)? {
|
||||
Poll::Ready(Some(chunk)) => {
|
||||
return match Pin::new(&mut this.stream).poll_next(cx) {
|
||||
Poll::Ready(Some(Ok(chunk))) => {
|
||||
if this.limit > 0 && (this.buf.len() + chunk.len()) > this.limit {
|
||||
Poll::Ready(Err(PayloadError::Overflow))
|
||||
} else {
|
||||
|
@ -397,6 +397,7 @@ impl Future for ReadBody {
|
|||
}
|
||||
}
|
||||
Poll::Ready(None) => Poll::Ready(Ok(this.buf.split().freeze())),
|
||||
Poll::Ready(Some(Err(err))) => Poll::Ready(Err(err)),
|
||||
Poll::Pending => {
|
||||
if this.timeout.poll_elapsed(cx).is_ready() {
|
||||
Poll::Ready(Err(PayloadError::Incomplete(Some(
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
use std::{future::Future, io, io::Write, pin::Pin, task::Context, task::Poll};
|
||||
|
||||
#[cfg(feature = "brotli")]
|
||||
use brotli2::write::BrotliDecoder;
|
||||
use flate2::write::{GzDecoder, ZlibDecoder};
|
||||
|
||||
use super::Writer;
|
||||
|
@ -27,10 +25,6 @@ where
|
|||
#[inline]
|
||||
pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> {
|
||||
let decoder = match encoding {
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(BrotliDecoder::new(
|
||||
Writer::new(),
|
||||
)))),
|
||||
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
|
||||
ZlibDecoder::new(Writer::new()),
|
||||
))),
|
||||
|
@ -137,25 +131,11 @@ where
|
|||
enum ContentDecoder {
|
||||
Deflate(Box<ZlibDecoder<Writer>>),
|
||||
Gzip(Box<GzDecoder<Writer>>),
|
||||
#[cfg(feature = "brotli")]
|
||||
Br(Box<BrotliDecoder<Writer>>),
|
||||
}
|
||||
|
||||
impl ContentDecoder {
|
||||
fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
|
||||
match self {
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentDecoder::Br(ref mut decoder) => match decoder.flush() {
|
||||
Ok(()) => {
|
||||
let b = decoder.get_mut().take();
|
||||
if !b.is_empty() {
|
||||
Ok(Some(b))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() {
|
||||
Ok(_) => {
|
||||
let b = decoder.get_mut().take();
|
||||
|
@ -183,19 +163,6 @@ impl ContentDecoder {
|
|||
|
||||
fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
|
||||
match self {
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) {
|
||||
Ok(_) => {
|
||||
decoder.flush()?;
|
||||
let b = decoder.get_mut().take();
|
||||
if !b.is_empty() {
|
||||
Ok(Some(b))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) {
|
||||
Ok(_) => {
|
||||
decoder.flush()?;
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
//! Stream encoder
|
||||
use std::{fmt, future::Future, io, io::Write, pin::Pin, task::Context, task::Poll};
|
||||
|
||||
#[cfg(feature = "brotli")]
|
||||
use brotli2::write::BrotliEncoder;
|
||||
use flate2::write::{GzEncoder, ZlibEncoder};
|
||||
|
||||
use crate::http::body::{Body, BodySize, MessageBody, ResponseBody};
|
||||
|
@ -191,23 +189,11 @@ fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
|
|||
enum ContentEncoder {
|
||||
Deflate(ZlibEncoder<Writer>),
|
||||
Gzip(GzEncoder<Writer>),
|
||||
#[cfg(feature = "brotli")]
|
||||
Br(BrotliEncoder<Writer>),
|
||||
}
|
||||
|
||||
impl ContentEncoder {
|
||||
fn can_encode(encoding: ContentEncoding) -> bool {
|
||||
#[cfg(feature = "brotli")]
|
||||
{
|
||||
matches!(
|
||||
encoding,
|
||||
ContentEncoding::Deflate | ContentEncoding::Gzip | ContentEncoding::Br
|
||||
)
|
||||
}
|
||||
#[cfg(not(feature = "brotli"))]
|
||||
{
|
||||
matches!(encoding, ContentEncoding::Deflate | ContentEncoding::Gzip)
|
||||
}
|
||||
matches!(encoding, ContentEncoding::Deflate | ContentEncoding::Gzip)
|
||||
}
|
||||
|
||||
fn encoder(encoding: ContentEncoding) -> Option<Self> {
|
||||
|
@ -220,18 +206,12 @@ impl ContentEncoder {
|
|||
Writer::new(),
|
||||
flate2::Compression::fast(),
|
||||
))),
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentEncoding::Br => {
|
||||
Some(ContentEncoder::Br(BrotliEncoder::new(Writer::new(), 3)))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn take(&mut self) -> Bytes {
|
||||
match *self {
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(),
|
||||
ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(),
|
||||
ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(),
|
||||
}
|
||||
|
@ -239,11 +219,6 @@ impl ContentEncoder {
|
|||
|
||||
fn finish(self) -> Result<Bytes, io::Error> {
|
||||
match self {
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentEncoder::Br(encoder) => match encoder.finish() {
|
||||
Ok(writer) => Ok(writer.buf.freeze()),
|
||||
Err(err) => Err(err),
|
||||
},
|
||||
ContentEncoder::Gzip(encoder) => match encoder.finish() {
|
||||
Ok(writer) => Ok(writer.buf.freeze()),
|
||||
Err(err) => Err(err),
|
||||
|
@ -257,14 +232,6 @@ impl ContentEncoder {
|
|||
|
||||
fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
|
||||
match *self {
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => {
|
||||
log::trace!("Error decoding br encoding: {}", err);
|
||||
Err(err)
|
||||
}
|
||||
},
|
||||
ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => {
|
||||
|
@ -288,8 +255,6 @@ impl fmt::Debug for ContentEncoder {
|
|||
match self {
|
||||
ContentEncoder::Deflate(_) => write!(f, "ContentEncoder::Deflate"),
|
||||
ContentEncoder::Gzip(_) => write!(f, "ContentEncoder::Gzip"),
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentEncoder::Br(_) => write!(f, "ContentEncoder::Br"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
//! HTTP/1 protocol dispatcher
|
||||
use std::{error, future, io, marker, pin::Pin, rc::Rc, task::Context, task::Poll};
|
||||
use std::{error, future, io, marker, mem, pin::Pin, rc::Rc, task::Context, task::Poll};
|
||||
|
||||
use crate::io::{Decoded, Filter, Io, IoStatusUpdate, RecvError};
|
||||
use crate::service::{PipelineCall, Service};
|
||||
|
@ -144,7 +144,20 @@ where
|
|||
inner.send_response(res, body)
|
||||
}
|
||||
Poll::Ready(Err(err)) => inner.control(Control::err(err)),
|
||||
Poll::Pending => ready!(inner.poll_request(cx)),
|
||||
Poll::Pending => {
|
||||
// state changed because of error.
|
||||
// spawn current publish future to runtime
|
||||
// so it could complete error handling
|
||||
let st = ready!(inner.poll_request(cx));
|
||||
if inner.payload.is_some() {
|
||||
if let State::CallPublish { fut } =
|
||||
mem::replace(&mut *this.st, State::ReadRequest)
|
||||
{
|
||||
crate::rt::spawn(fut);
|
||||
}
|
||||
}
|
||||
st
|
||||
}
|
||||
},
|
||||
// handle control service responses
|
||||
State::CallControl { fut } => match Pin::new(fut).poll(cx) {
|
||||
|
@ -339,7 +352,7 @@ where
|
|||
.io
|
||||
.encode(Message::Item((msg, body.size())), &self.codec)
|
||||
.map_err(|err| {
|
||||
if let Some(mut payload) = self.payload.take() {
|
||||
if let Some(ref mut payload) = self.payload {
|
||||
payload.1.set_error(PayloadError::Incomplete(None));
|
||||
}
|
||||
err
|
||||
|
@ -438,7 +451,7 @@ where
|
|||
}
|
||||
|
||||
fn set_payload_error(&mut self, err: PayloadError) {
|
||||
if let Some(mut payload) = self.payload.take() {
|
||||
if let Some(ref mut payload) = self.payload {
|
||||
payload.1.set_error(err);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,8 +3,7 @@ use std::rc::{Rc, Weak};
|
|||
use std::task::{Context, Poll};
|
||||
use std::{cell::RefCell, collections::VecDeque, pin::Pin};
|
||||
|
||||
use crate::http::error::PayloadError;
|
||||
use crate::{task::LocalWaker, util::Bytes, util::Stream};
|
||||
use crate::{http::error::PayloadError, task::LocalWaker, util::Bytes, util::Stream};
|
||||
|
||||
/// max buffer size 32k
|
||||
const MAX_BUFFER_SIZE: usize = 32_768;
|
||||
|
@ -119,7 +118,7 @@ impl PayloadSender {
|
|||
// we check only if Payload (other side) is alive,
|
||||
// otherwise always return true (consume payload)
|
||||
if let Some(shared) = self.inner.upgrade() {
|
||||
if shared.borrow().need_read {
|
||||
if shared.borrow().flags.contains(Flags::NEED_READ) {
|
||||
PayloadStatus::Read
|
||||
} else {
|
||||
shared.borrow_mut().io_task.register(cx.waker());
|
||||
|
@ -131,12 +130,20 @@ impl PayloadSender {
|
|||
}
|
||||
}
|
||||
|
||||
bitflags::bitflags! {
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
struct Flags: u8 {
|
||||
const EOF = 0b0000_0001;
|
||||
const ERROR = 0b0000_0010;
|
||||
const NEED_READ = 0b0000_0100;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Inner {
|
||||
len: usize,
|
||||
eof: bool,
|
||||
flags: Flags,
|
||||
err: Option<PayloadError>,
|
||||
need_read: bool,
|
||||
items: VecDeque<Bytes>,
|
||||
task: LocalWaker,
|
||||
io_task: LocalWaker,
|
||||
|
@ -144,12 +151,16 @@ struct Inner {
|
|||
|
||||
impl Inner {
|
||||
fn new(eof: bool) -> Self {
|
||||
let flags = if eof {
|
||||
Flags::EOF | Flags::NEED_READ
|
||||
} else {
|
||||
Flags::NEED_READ
|
||||
};
|
||||
Inner {
|
||||
eof,
|
||||
flags,
|
||||
len: 0,
|
||||
err: None,
|
||||
items: VecDeque::new(),
|
||||
need_read: true,
|
||||
task: LocalWaker::new(),
|
||||
io_task: LocalWaker::new(),
|
||||
}
|
||||
|
@ -157,18 +168,23 @@ impl Inner {
|
|||
|
||||
fn set_error(&mut self, err: PayloadError) {
|
||||
self.err = Some(err);
|
||||
self.flags.insert(Flags::ERROR);
|
||||
self.task.wake()
|
||||
}
|
||||
|
||||
fn feed_eof(&mut self) {
|
||||
self.eof = true;
|
||||
self.flags.insert(Flags::EOF);
|
||||
self.task.wake()
|
||||
}
|
||||
|
||||
fn feed_data(&mut self, data: Bytes) {
|
||||
self.len += data.len();
|
||||
self.items.push_back(data);
|
||||
self.need_read = self.len < MAX_BUFFER_SIZE;
|
||||
if self.len < MAX_BUFFER_SIZE {
|
||||
self.flags.insert(Flags::NEED_READ);
|
||||
} else {
|
||||
self.flags.remove(Flags::NEED_READ);
|
||||
}
|
||||
self.task.wake();
|
||||
}
|
||||
|
||||
|
@ -178,19 +194,25 @@ impl Inner {
|
|||
) -> Poll<Option<Result<Bytes, PayloadError>>> {
|
||||
if let Some(data) = self.items.pop_front() {
|
||||
self.len -= data.len();
|
||||
self.need_read = self.len < MAX_BUFFER_SIZE;
|
||||
if self.len < MAX_BUFFER_SIZE {
|
||||
self.flags.insert(Flags::NEED_READ);
|
||||
} else {
|
||||
self.flags.remove(Flags::NEED_READ);
|
||||
}
|
||||
|
||||
if self.need_read && !self.eof {
|
||||
if self.flags.contains(Flags::NEED_READ)
|
||||
&& !self.flags.intersects(Flags::EOF | Flags::ERROR)
|
||||
{
|
||||
self.task.register(cx.waker());
|
||||
}
|
||||
self.io_task.wake();
|
||||
Poll::Ready(Some(Ok(data)))
|
||||
} else if let Some(err) = self.err.take() {
|
||||
Poll::Ready(Some(Err(err)))
|
||||
} else if self.eof {
|
||||
} else if self.flags.intersects(Flags::EOF | Flags::ERROR) {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
self.need_read = true;
|
||||
self.flags.insert(Flags::NEED_READ);
|
||||
self.task.register(cx.waker());
|
||||
self.io_task.wake();
|
||||
Poll::Pending
|
||||
|
|
|
@ -8,10 +8,10 @@ use coo_kie::{Cookie, CookieJar};
|
|||
use crate::io::Filter;
|
||||
use crate::io::Io;
|
||||
use crate::server::Server;
|
||||
use crate::service::ServiceFactory;
|
||||
#[cfg(feature = "ws")]
|
||||
use crate::ws::{error::WsClientError, WsClient, WsConnection};
|
||||
use crate::{rt::System, service::ServiceFactory};
|
||||
use crate::{time::Millis, time::Seconds, util::Bytes};
|
||||
use crate::{rt::System, time::sleep, time::Millis, time::Seconds, util::Bytes};
|
||||
|
||||
use super::client::{Client, ClientRequest, ClientResponse, Connector};
|
||||
use super::error::{HttpError, PayloadError};
|
||||
|
@ -244,10 +244,15 @@ where
|
|||
.workers(1)
|
||||
.disable_signals()
|
||||
.run();
|
||||
tx.send((system, srv, local_addr)).unwrap();
|
||||
|
||||
crate::rt::spawn(async move {
|
||||
sleep(Millis(125)).await;
|
||||
tx.send((system, srv, local_addr)).unwrap();
|
||||
});
|
||||
Ok(())
|
||||
})
|
||||
});
|
||||
thread::sleep(std::time::Duration::from_millis(150));
|
||||
|
||||
let (system, server, addr) = rx.recv().unwrap();
|
||||
|
||||
|
|
|
@ -123,4 +123,15 @@ pub mod util {
|
|||
#[doc(hidden)]
|
||||
#[deprecated]
|
||||
pub use std::task::ready;
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn enable_test_logging() {
|
||||
#[cfg(not(feature = "no-test-logging"))]
|
||||
if std::env::var("NTEX_NO_TEST_LOG").is_err() {
|
||||
if std::env::var("RUST_LOG").is_err() {
|
||||
std::env::set_var("RUST_LOG", "trace");
|
||||
}
|
||||
let _ = env_logger::builder().is_test(true).try_init();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ pub struct ServiceConfig<Err = DefaultError> {
|
|||
}
|
||||
|
||||
impl<Err: ErrorRenderer> ServiceConfig<Err> {
|
||||
pub(crate) fn new() -> Self {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
services: Vec::new(),
|
||||
state: Extensions::new(),
|
||||
|
@ -132,7 +132,7 @@ mod tests {
|
|||
use crate::http::{Method, StatusCode};
|
||||
use crate::util::Bytes;
|
||||
use crate::web::test::{call_service, init_service, read_body, TestRequest};
|
||||
use crate::web::{self, App, HttpRequest, HttpResponse};
|
||||
use crate::web::{self, App, DefaultError, HttpRequest, HttpResponse};
|
||||
|
||||
#[crate::rt_test]
|
||||
async fn test_configure_state() {
|
||||
|
@ -205,4 +205,11 @@ mod tests {
|
|||
let resp = call_service(&srv, req).await;
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_service_config() {
|
||||
let cfg: ServiceConfig<DefaultError> = ServiceConfig::new();
|
||||
assert!(cfg.services.is_empty());
|
||||
assert!(cfg.external.is_empty());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ mod route;
|
|||
mod scope;
|
||||
mod server;
|
||||
mod service;
|
||||
mod stack;
|
||||
pub mod stack;
|
||||
pub mod test;
|
||||
pub mod types;
|
||||
mod util;
|
||||
|
@ -128,6 +128,7 @@ pub mod dev {
|
|||
//! The purpose of this module is to alleviate imports of many common
|
||||
//! traits by adding a glob import to the top of ntex::web heavy modules:
|
||||
|
||||
pub use crate::web::app_service::AppService;
|
||||
pub use crate::web::config::AppConfig;
|
||||
pub use crate::web::info::ConnectionInfo;
|
||||
pub use crate::web::rmap::ResourceMap;
|
||||
|
|
|
@ -697,7 +697,10 @@ where
|
|||
.set_tag("test", "WEB-SRV")
|
||||
.run();
|
||||
|
||||
tx.send((System::current(), srv, local_addr)).unwrap();
|
||||
crate::rt::spawn(async move {
|
||||
sleep(Millis(125)).await;
|
||||
tx.send((System::current(), srv, local_addr)).unwrap();
|
||||
});
|
||||
Ok(())
|
||||
})
|
||||
});
|
||||
|
|
|
@ -3,16 +3,14 @@ use std::io::{Read, Write};
|
|||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use brotli2::write::BrotliEncoder;
|
||||
use coo_kie::Cookie;
|
||||
use flate2::{read::GzDecoder, write::GzEncoder, write::ZlibEncoder, Compression};
|
||||
use futures_util::stream::once;
|
||||
use rand::Rng;
|
||||
|
||||
use ntex::http::client::error::{JsonPayloadError, SendRequestError};
|
||||
use ntex::http::client::error::SendRequestError;
|
||||
use ntex::http::client::{Client, Connector};
|
||||
use ntex::http::test::server as test_server;
|
||||
use ntex::http::{header, HttpMessage, HttpService, Method};
|
||||
use ntex::http::{header, HttpMessage, HttpService};
|
||||
use ntex::service::{chain_factory, map_config};
|
||||
use ntex::web::dev::AppConfig;
|
||||
use ntex::web::middleware::Compress;
|
||||
|
@ -220,7 +218,7 @@ async fn test_connection_reuse() {
|
|||
)))
|
||||
});
|
||||
|
||||
let client = Client::build().timeout(Seconds(10)).finish();
|
||||
let client = Client::build().timeout(Seconds(30)).finish();
|
||||
|
||||
// req 1
|
||||
let request = client.get(srv.url("/")).send();
|
||||
|
@ -255,7 +253,7 @@ async fn test_connection_force_close() {
|
|||
)))
|
||||
});
|
||||
|
||||
let client = Client::build().timeout(Seconds(10)).finish();
|
||||
let client = Client::build().timeout(Seconds(30)).finish();
|
||||
|
||||
// req 1
|
||||
let request = client.get(srv.url("/")).force_close().send();
|
||||
|
@ -263,7 +261,7 @@ async fn test_connection_force_close() {
|
|||
assert!(response.status().is_success());
|
||||
|
||||
// req 2
|
||||
let client = Client::build().timeout(Seconds(10)).finish();
|
||||
let client = Client::build().timeout(Seconds(30)).finish();
|
||||
let req = client.post(srv.url("/")).force_close();
|
||||
let response = req.send().await.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
@ -291,7 +289,7 @@ async fn test_connection_server_close() {
|
|||
)))
|
||||
});
|
||||
|
||||
let client = Client::build().timeout(Seconds(10)).finish();
|
||||
let client = Client::build().timeout(Seconds(30)).finish();
|
||||
|
||||
// req 1
|
||||
let request = client.get(srv.url("/")).send();
|
||||
|
@ -510,19 +508,21 @@ async fn test_client_gzip_encoding_large() {
|
|||
async fn test_client_gzip_encoding_large_random() {
|
||||
let data = rand::thread_rng()
|
||||
.sample_iter(&rand::distributions::Alphanumeric)
|
||||
.take(100_000)
|
||||
.take(1_048_500)
|
||||
.map(char::from)
|
||||
.collect::<String>();
|
||||
|
||||
let srv = test::server(|| {
|
||||
App::new().service(web::resource("/").route(web::to(|data: Bytes| async move {
|
||||
let mut e = GzEncoder::new(Vec::new(), Compression::default());
|
||||
e.write_all(&data).unwrap();
|
||||
let data = e.finish().unwrap();
|
||||
HttpResponse::Ok()
|
||||
.header("content-encoding", "gzip")
|
||||
.body(data)
|
||||
})))
|
||||
App::new()
|
||||
.state(web::types::PayloadConfig::default().limit(1_048_576))
|
||||
.service(web::resource("/").route(web::to(|data: Bytes| async move {
|
||||
let mut e = GzEncoder::new(Vec::new(), Compression::default());
|
||||
e.write_all(&data).unwrap();
|
||||
let data = e.finish().unwrap();
|
||||
HttpResponse::Ok()
|
||||
.header("content-encoding", "gzip")
|
||||
.body(data)
|
||||
})))
|
||||
});
|
||||
|
||||
// client request
|
||||
|
@ -530,130 +530,10 @@ async fn test_client_gzip_encoding_large_random() {
|
|||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().await.unwrap();
|
||||
let bytes = response.body().limit(1_048_576).await.unwrap();
|
||||
assert_eq!(bytes, Bytes::from(data));
|
||||
}
|
||||
|
||||
#[ntex::test]
|
||||
async fn test_client_brotli_encoding() {
|
||||
let srv = test::server(|| {
|
||||
App::new().service(web::resource("/").route(web::to(|data: Bytes| async move {
|
||||
let mut e = BrotliEncoder::new(Vec::new(), 5);
|
||||
e.write_all(&data).unwrap();
|
||||
let data = e.finish().unwrap();
|
||||
HttpResponse::Ok()
|
||||
.header("content-encoding", "br")
|
||||
.body(data)
|
||||
})))
|
||||
});
|
||||
|
||||
// client request
|
||||
let mut response = srv.post("/").send_body(STR).await.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().await.unwrap();
|
||||
assert_eq!(bytes, Bytes::from_static(STR.as_ref()));
|
||||
}
|
||||
|
||||
#[ntex::test]
|
||||
async fn test_client_brotli_encoding_large_random() {
|
||||
let data = rand::thread_rng()
|
||||
.sample_iter(&rand::distributions::Alphanumeric)
|
||||
.take(70_000)
|
||||
.map(char::from)
|
||||
.collect::<String>();
|
||||
|
||||
let srv = test::server(|| {
|
||||
App::new().service(web::resource("/").route(web::to(|data: Bytes| async move {
|
||||
let mut e = BrotliEncoder::new(Vec::new(), 5);
|
||||
e.write_all(&data).unwrap();
|
||||
let data = e.finish().unwrap();
|
||||
HttpResponse::Ok()
|
||||
.header("content-encoding", "br")
|
||||
.body(data)
|
||||
})))
|
||||
});
|
||||
|
||||
// client request
|
||||
let mut response = srv.post("/").send_body(data.clone()).await.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().await.unwrap();
|
||||
assert_eq!(bytes.len(), data.len());
|
||||
assert_eq!(bytes, Bytes::from(data.clone()));
|
||||
|
||||
// frozen request
|
||||
let request = srv.post("/").timeout(Seconds(30)).freeze().unwrap();
|
||||
assert_eq!(request.get_method(), Method::POST);
|
||||
assert_eq!(request.get_uri(), srv.url("/").as_str());
|
||||
let mut response = request.send_body(data.clone()).await.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().await.unwrap();
|
||||
assert_eq!(bytes.len(), data.len());
|
||||
assert_eq!(bytes, Bytes::from(data.clone()));
|
||||
|
||||
// extra header
|
||||
let mut response = request
|
||||
.extra_header("x-test2", "222")
|
||||
.send_body(data.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().await.unwrap();
|
||||
assert_eq!(bytes.len(), data.len());
|
||||
assert_eq!(bytes, Bytes::from(data.clone()));
|
||||
|
||||
// client stream request
|
||||
let mut response = srv
|
||||
.post("/")
|
||||
.send_stream(once(Ready::Ok::<_, JsonPayloadError>(Bytes::from(
|
||||
data.clone(),
|
||||
))))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().await.unwrap();
|
||||
assert_eq!(bytes.len(), data.len());
|
||||
assert_eq!(bytes, Bytes::from(data.clone()));
|
||||
|
||||
// frozen request
|
||||
let request = srv.post("/").timeout(Seconds(30)).freeze().unwrap();
|
||||
let mut response = request
|
||||
.send_stream(once(Ready::Ok::<_, JsonPayloadError>(Bytes::from(
|
||||
data.clone(),
|
||||
))))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().await.unwrap();
|
||||
assert_eq!(bytes.len(), data.len());
|
||||
assert_eq!(bytes, Bytes::from(data.clone()));
|
||||
|
||||
let mut response = request
|
||||
.extra_header("x-test2", "222")
|
||||
.send_stream(once(Ready::Ok::<_, JsonPayloadError>(Bytes::from(
|
||||
data.clone(),
|
||||
))))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().await.unwrap();
|
||||
assert_eq!(bytes.len(), data.len());
|
||||
assert_eq!(bytes, Bytes::from(data.clone()));
|
||||
}
|
||||
|
||||
#[ntex::test]
|
||||
async fn test_client_deflate_encoding() {
|
||||
let srv = test::server(|| {
|
||||
|
@ -814,7 +694,7 @@ async fn client_read_until_eof() {
|
|||
|
||||
// client request
|
||||
let req = Client::build()
|
||||
.timeout(Seconds(5))
|
||||
.timeout(Seconds(30))
|
||||
.finish()
|
||||
.get(format!("http://{}/", addr).as_str());
|
||||
let mut response = req.send().await.unwrap();
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#![cfg(feature = "openssl")]
|
||||
use std::{io, sync::atomic::AtomicUsize, sync::atomic::Ordering, sync::Arc};
|
||||
use std::io;
|
||||
use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc, Mutex};
|
||||
|
||||
use futures_util::stream::{once, Stream, StreamExt};
|
||||
use tls_openssl::ssl::{AlpnError, SslAcceptor, SslFiletype, SslMethod};
|
||||
|
@ -424,11 +425,12 @@ async fn test_h2_service_error() {
|
|||
assert_eq!(bytes, Bytes::from_static(b"error"));
|
||||
}
|
||||
|
||||
struct SetOnDrop(Arc<AtomicUsize>);
|
||||
struct SetOnDrop(Arc<AtomicUsize>, Arc<Mutex<Option<::oneshot::Sender<()>>>>);
|
||||
|
||||
impl Drop for SetOnDrop {
|
||||
fn drop(&mut self) {
|
||||
self.0.fetch_add(1, Ordering::Relaxed);
|
||||
let _ = self.1.lock().unwrap().take().unwrap().send(());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -436,17 +438,20 @@ impl Drop for SetOnDrop {
|
|||
async fn test_h2_client_drop() -> io::Result<()> {
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
let count2 = count.clone();
|
||||
let (tx, rx) = ::oneshot::channel();
|
||||
let tx = Arc::new(Mutex::new(Some(tx)));
|
||||
|
||||
let srv = test_server(move || {
|
||||
let tx = tx.clone();
|
||||
let count = count2.clone();
|
||||
HttpService::build()
|
||||
.h2(move |req: Request| {
|
||||
let count = count.clone();
|
||||
let st = SetOnDrop(count.clone(), tx.clone());
|
||||
async move {
|
||||
let _st = SetOnDrop(count);
|
||||
assert!(req.peer_addr().is_some());
|
||||
assert_eq!(req.version(), Version::HTTP_2);
|
||||
sleep(Seconds(100)).await;
|
||||
sleep(Seconds(30)).await;
|
||||
drop(st);
|
||||
Ok::<_, io::Error>(Response::Ok().finish())
|
||||
}
|
||||
})
|
||||
|
@ -454,9 +459,9 @@ async fn test_h2_client_drop() -> io::Result<()> {
|
|||
.map_err(|_| ())
|
||||
});
|
||||
|
||||
let result = timeout(Millis(250), srv.srequest(Method::GET, "/").send()).await;
|
||||
let result = timeout(Millis(1500), srv.srequest(Method::GET, "/").send()).await;
|
||||
assert!(result.is_err());
|
||||
sleep(Millis(150)).await;
|
||||
let _ = timeout(Millis(1500), rx).await;
|
||||
assert_eq!(count.load(Ordering::Relaxed), 1);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -539,13 +544,19 @@ async fn test_ws_transport() {
|
|||
async fn test_h2_graceful_shutdown() -> io::Result<()> {
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
let count2 = count.clone();
|
||||
let (tx, rx) = ::oneshot::channel();
|
||||
let tx = Arc::new(Mutex::new(Some(tx)));
|
||||
|
||||
let srv = test_server(move || {
|
||||
let tx = tx.clone();
|
||||
let count = count2.clone();
|
||||
HttpService::build()
|
||||
.h2(move |_| {
|
||||
let count = count.clone();
|
||||
count.fetch_add(1, Ordering::Relaxed);
|
||||
if count.load(Ordering::Relaxed) == 2 {
|
||||
let _ = tx.lock().unwrap().take().unwrap().send(());
|
||||
}
|
||||
async move {
|
||||
sleep(Millis(1000)).await;
|
||||
count.fetch_sub(1, Ordering::Relaxed);
|
||||
|
@ -566,7 +577,7 @@ async fn test_h2_graceful_shutdown() -> io::Result<()> {
|
|||
let _ = req.send().await.unwrap();
|
||||
sleep(Millis(100000)).await;
|
||||
});
|
||||
sleep(Millis(150)).await;
|
||||
let _ = rx.await;
|
||||
assert_eq!(count.load(Ordering::Relaxed), 2);
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
@ -574,8 +585,6 @@ async fn test_h2_graceful_shutdown() -> io::Result<()> {
|
|||
srv.stop().await;
|
||||
let _ = tx.send(());
|
||||
});
|
||||
sleep(Millis(150)).await;
|
||||
assert_eq!(count.load(Ordering::Relaxed), 2);
|
||||
|
||||
let _ = rx.await;
|
||||
assert_eq!(count.load(Ordering::Relaxed), 0);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc};
|
||||
use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc, Mutex};
|
||||
use std::{io, io::Read, io::Write, net};
|
||||
|
||||
use futures_util::future::{self, FutureExt};
|
||||
|
@ -405,6 +405,36 @@ async fn test_http1_handle_not_consumed_payload() {
|
|||
assert_eq!(&data[..17], b"HTTP/1.1 200 OK\r\n");
|
||||
}
|
||||
|
||||
/// Handle payload errors (keep-alive, disconnects)
|
||||
#[ntex::test]
|
||||
async fn test_http1_handle_payload_errors() {
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
let count2 = count.clone();
|
||||
|
||||
let srv = test_server(move || {
|
||||
let count = count2.clone();
|
||||
HttpService::build().h1(move |mut req: Request| {
|
||||
let count = count.clone();
|
||||
async move {
|
||||
let mut pl = req.take_payload();
|
||||
let result = pl.recv().await;
|
||||
if result.unwrap().is_err() {
|
||||
count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
Ok::<_, io::Error>(Response::Ok().finish())
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
|
||||
let _ =
|
||||
stream.write_all(b"GET /test/tests/test HTTP/1.1\r\ncontent-length: 99999\r\n\r\n");
|
||||
sleep(Millis(250)).await;
|
||||
drop(stream);
|
||||
sleep(Millis(250)).await;
|
||||
assert_eq!(count.load(Ordering::Acquire), 1);
|
||||
}
|
||||
|
||||
#[ntex::test]
|
||||
async fn test_content_length() {
|
||||
let srv = test_server(|| {
|
||||
|
@ -693,11 +723,12 @@ async fn test_h1_service_error() {
|
|||
assert_eq!(bytes, Bytes::from_static(b"error"));
|
||||
}
|
||||
|
||||
struct SetOnDrop(Arc<AtomicUsize>);
|
||||
struct SetOnDrop(Arc<AtomicUsize>, Option<::oneshot::Sender<()>>);
|
||||
|
||||
impl Drop for SetOnDrop {
|
||||
fn drop(&mut self) {
|
||||
self.0.fetch_add(1, Ordering::Relaxed);
|
||||
let _ = self.1.take().unwrap().send(());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -705,24 +736,28 @@ impl Drop for SetOnDrop {
|
|||
async fn test_h1_client_drop() -> io::Result<()> {
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
let count2 = count.clone();
|
||||
let (tx, rx) = ::oneshot::channel();
|
||||
let tx = Arc::new(Mutex::new(Some(tx)));
|
||||
|
||||
let srv = test_server(move || {
|
||||
let tx = tx.clone();
|
||||
let count = count2.clone();
|
||||
HttpService::build().h1(move |req: Request| {
|
||||
let tx = tx.clone();
|
||||
let count = count.clone();
|
||||
async move {
|
||||
let _st = SetOnDrop(count);
|
||||
let _st = SetOnDrop(count, tx.lock().unwrap().take());
|
||||
assert!(req.peer_addr().is_some());
|
||||
assert_eq!(req.version(), Version::HTTP_11);
|
||||
sleep(Seconds(100)).await;
|
||||
sleep(Millis(50000)).await;
|
||||
Ok::<_, io::Error>(Response::Ok().finish())
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
let result = timeout(Millis(100), srv.request(Method::GET, "/").send()).await;
|
||||
let result = timeout(Millis(1500), srv.request(Method::GET, "/").send()).await;
|
||||
assert!(result.is_err());
|
||||
sleep(Millis(250)).await;
|
||||
let _ = rx.await;
|
||||
assert_eq!(count.load(Ordering::Relaxed), 1);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -731,12 +766,18 @@ async fn test_h1_client_drop() -> io::Result<()> {
|
|||
async fn test_h1_gracefull_shutdown() {
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
let count2 = count.clone();
|
||||
let (tx, rx) = ::oneshot::channel();
|
||||
let tx = Arc::new(Mutex::new(Some(tx)));
|
||||
|
||||
let srv = test_server(move || {
|
||||
let tx = tx.clone();
|
||||
let count = count2.clone();
|
||||
HttpService::build().h1(move |_: Request| {
|
||||
let count = count.clone();
|
||||
count.fetch_add(1, Ordering::Relaxed);
|
||||
if count.load(Ordering::Relaxed) == 2 {
|
||||
let _ = tx.lock().unwrap().take().unwrap().send(());
|
||||
}
|
||||
async move {
|
||||
sleep(Millis(1000)).await;
|
||||
count.fetch_sub(1, Ordering::Relaxed);
|
||||
|
@ -751,7 +792,7 @@ async fn test_h1_gracefull_shutdown() {
|
|||
let mut stream2 = net::TcpStream::connect(srv.addr()).unwrap();
|
||||
let _ = stream2.write_all(b"GET /index.html HTTP/1.1\r\n\r\n");
|
||||
|
||||
sleep(Millis(150)).await;
|
||||
let _ = rx.await;
|
||||
assert_eq!(count.load(Ordering::Relaxed), 2);
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
@ -759,8 +800,6 @@ async fn test_h1_gracefull_shutdown() {
|
|||
srv.stop().await;
|
||||
let _ = tx.send(());
|
||||
});
|
||||
sleep(Millis(150)).await;
|
||||
assert_eq!(count.load(Ordering::Relaxed), 2);
|
||||
|
||||
let _ = rx.await;
|
||||
assert_eq!(count.load(Ordering::Relaxed), 0);
|
||||
|
@ -770,12 +809,18 @@ async fn test_h1_gracefull_shutdown() {
|
|||
async fn test_h1_gracefull_shutdown_2() {
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
let count2 = count.clone();
|
||||
let (tx, rx) = ::oneshot::channel();
|
||||
let tx = Arc::new(Mutex::new(Some(tx)));
|
||||
|
||||
let srv = test_server(move || {
|
||||
let tx = tx.clone();
|
||||
let count = count2.clone();
|
||||
HttpService::build().finish(move |_: Request| {
|
||||
let count = count.clone();
|
||||
count.fetch_add(1, Ordering::Relaxed);
|
||||
if count.load(Ordering::Relaxed) == 2 {
|
||||
let _ = tx.lock().unwrap().take().unwrap().send(());
|
||||
}
|
||||
async move {
|
||||
sleep(Millis(1000)).await;
|
||||
count.fetch_sub(1, Ordering::Relaxed);
|
||||
|
@ -790,17 +835,14 @@ async fn test_h1_gracefull_shutdown_2() {
|
|||
let mut stream2 = net::TcpStream::connect(srv.addr()).unwrap();
|
||||
let _ = stream2.write_all(b"GET /index.html HTTP/1.1\r\n\r\n");
|
||||
|
||||
sleep(Millis(150)).await;
|
||||
assert_eq!(count.load(Ordering::Relaxed), 2);
|
||||
let _ = rx.await;
|
||||
assert_eq!(count.load(Ordering::Acquire), 2);
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
rt::spawn(async move {
|
||||
srv.stop().await;
|
||||
let _ = tx.send(());
|
||||
});
|
||||
sleep(Millis(150)).await;
|
||||
assert_eq!(count.load(Ordering::Relaxed), 2);
|
||||
|
||||
let _ = rx.await;
|
||||
assert_eq!(count.load(Ordering::Relaxed), 0);
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
|
||||
#[cfg(feature = "tokio")]
|
||||
use std::{io, sync::Arc};
|
||||
use std::{io::Read, net, sync::mpsc, thread, time};
|
||||
use std::{io::Read, io::Write, net, sync::mpsc, thread, time};
|
||||
|
||||
use ntex::codec::BytesCodec;
|
||||
use ntex::io::Io;
|
||||
|
@ -71,6 +71,7 @@ async fn test_listen() {
|
|||
|
||||
#[ntex::test]
|
||||
#[cfg(unix)]
|
||||
#[allow(clippy::unused_io_amount)]
|
||||
async fn test_run() {
|
||||
let addr = TestServer::unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
@ -80,6 +81,7 @@ async fn test_run() {
|
|||
sys.run(move || {
|
||||
let srv = build()
|
||||
.backlog(100)
|
||||
.workers(1)
|
||||
.disable_signals()
|
||||
.bind("test", addr, move |_| {
|
||||
fn_service(|io: Io| async move {
|
||||
|
@ -90,6 +92,7 @@ async fn test_run() {
|
|||
})
|
||||
})
|
||||
.unwrap()
|
||||
.set_tag("test", "SRV")
|
||||
.run();
|
||||
let _ = tx.send((srv, ntex::rt::System::current()));
|
||||
Ok(())
|
||||
|
@ -99,6 +102,7 @@ async fn test_run() {
|
|||
|
||||
let mut buf = [1u8; 4];
|
||||
let mut conn = net::TcpStream::connect(addr).unwrap();
|
||||
conn.write(&b"test"[..]).unwrap();
|
||||
let _ = conn.read_exact(&mut buf);
|
||||
assert_eq!(buf, b"test"[..]);
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
use std::{future::Future, io, io::Read, io::Write, pin::Pin, task::Context, task::Poll};
|
||||
|
||||
use brotli2::write::{BrotliDecoder, BrotliEncoder};
|
||||
use flate2::read::GzDecoder;
|
||||
use flate2::write::{GzEncoder, ZlibDecoder, ZlibEncoder};
|
||||
use flate2::Compression;
|
||||
|
@ -318,36 +317,6 @@ async fn test_body_chunked_implicit() {
|
|||
assert_eq!(Bytes::from(dec), Bytes::from_static(STR.as_ref()));
|
||||
}
|
||||
|
||||
#[ntex::test]
|
||||
async fn test_body_br_streaming() {
|
||||
let srv = test::server_with(test::config().h1(), || {
|
||||
App::new().wrap(Compress::new(ContentEncoding::Br)).service(
|
||||
web::resource("/").route(web::to(move || async {
|
||||
HttpResponse::Ok()
|
||||
.streaming(TestBody::new(Bytes::from_static(STR.as_ref()), 24))
|
||||
})),
|
||||
)
|
||||
});
|
||||
|
||||
let mut response = srv
|
||||
.get("/")
|
||||
.header(ACCEPT_ENCODING, "br")
|
||||
.no_decompress()
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().await.unwrap();
|
||||
|
||||
// decode br
|
||||
let mut e = BrotliDecoder::new(Vec::with_capacity(2048));
|
||||
e.write_all(bytes.as_ref()).unwrap();
|
||||
let dec = e.finish().unwrap();
|
||||
assert_eq!(Bytes::from(dec), Bytes::from_static(STR.as_ref()));
|
||||
}
|
||||
|
||||
#[ntex::test]
|
||||
async fn test_head_binary() {
|
||||
let srv = test::server_with(test::config().h1(), || {
|
||||
|
@ -422,35 +391,6 @@ async fn test_body_deflate() {
|
|||
assert_eq!(Bytes::from(dec), Bytes::from_static(STR.as_ref()));
|
||||
}
|
||||
|
||||
#[ntex::test]
|
||||
async fn test_body_brotli() {
|
||||
let srv = test::server_with(test::config().h1(), || {
|
||||
App::new().wrap(Compress::new(ContentEncoding::Br)).service(
|
||||
web::resource("/")
|
||||
.route(web::to(move || async { HttpResponse::Ok().body(STR) })),
|
||||
)
|
||||
});
|
||||
|
||||
// client request
|
||||
let mut response = srv
|
||||
.get("/")
|
||||
.header(ACCEPT_ENCODING, "br")
|
||||
.no_decompress()
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().await.unwrap();
|
||||
|
||||
// decode brotli
|
||||
let mut e = BrotliDecoder::new(Vec::with_capacity(2048));
|
||||
e.write_all(bytes.as_ref()).unwrap();
|
||||
let dec = e.finish().unwrap();
|
||||
assert_eq!(Bytes::from(dec), Bytes::from_static(STR.as_ref()));
|
||||
}
|
||||
|
||||
#[ntex::test]
|
||||
async fn test_encoding() {
|
||||
let srv = test::server_with(test::config().h1(), || {
|
||||
|
@ -644,204 +584,6 @@ async fn test_reading_deflate_encoding_large_random() {
|
|||
assert_eq!(bytes, Bytes::from(data));
|
||||
}
|
||||
|
||||
#[ntex::test]
|
||||
async fn test_brotli_encoding() {
|
||||
let srv = test::server_with(test::config().h1(), || {
|
||||
App::new().service(web::resource("/").route(web::to(move |body: Bytes| async {
|
||||
HttpResponse::Ok().body(body)
|
||||
})))
|
||||
});
|
||||
|
||||
let mut e = BrotliEncoder::new(Vec::new(), 5);
|
||||
e.write_all(STR.as_ref()).unwrap();
|
||||
let enc = e.finish().unwrap();
|
||||
|
||||
// client request
|
||||
let request = srv
|
||||
.post("/")
|
||||
.header(CONTENT_ENCODING, "br")
|
||||
.send_body(enc.clone());
|
||||
let mut response = request.await.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().await.unwrap();
|
||||
assert_eq!(bytes, Bytes::from_static(STR.as_ref()));
|
||||
}
|
||||
|
||||
#[ntex::test]
|
||||
async fn test_brotli_encoding_large() {
|
||||
let data = rand::thread_rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(320_000)
|
||||
.map(char::from)
|
||||
.collect::<String>();
|
||||
|
||||
let srv = test::server_with(test::config().h1(), || {
|
||||
App::new().service(
|
||||
web::resource("/")
|
||||
.state(web::types::PayloadConfig::new(320_000))
|
||||
.route(web::to(move |body: Bytes| async {
|
||||
HttpResponse::Ok().streaming(TestBody::new(body, 10240))
|
||||
})),
|
||||
)
|
||||
});
|
||||
|
||||
let mut e = BrotliEncoder::new(Vec::new(), 5);
|
||||
e.write_all(data.as_ref()).unwrap();
|
||||
let enc = e.finish().unwrap();
|
||||
|
||||
// client request
|
||||
let request = srv
|
||||
.post("/")
|
||||
.header(CONTENT_ENCODING, "br")
|
||||
.send_body(enc.clone());
|
||||
let mut response = request.await.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().limit(320_000).await.unwrap();
|
||||
assert_eq!(bytes, Bytes::from(data));
|
||||
}
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
#[ntex::test]
|
||||
async fn test_brotli_encoding_large_openssl() {
|
||||
// load ssl keys
|
||||
use tls_openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
|
||||
|
||||
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
|
||||
builder
|
||||
.set_private_key_file("./tests/key.pem", SslFiletype::PEM)
|
||||
.unwrap();
|
||||
builder
|
||||
.set_certificate_chain_file("./tests/cert.pem")
|
||||
.unwrap();
|
||||
|
||||
let data = STR.repeat(10);
|
||||
let srv = test::server_with(test::config().openssl(builder.build()), move || {
|
||||
App::new().service(web::resource("/").route(web::to(|bytes: Bytes| async {
|
||||
HttpResponse::Ok()
|
||||
.encoding(ContentEncoding::Identity)
|
||||
.body(bytes)
|
||||
})))
|
||||
});
|
||||
|
||||
// body
|
||||
let mut e = BrotliEncoder::new(Vec::new(), 3);
|
||||
e.write_all(data.as_ref()).unwrap();
|
||||
let enc = e.finish().unwrap();
|
||||
|
||||
// client request
|
||||
let mut response = srv
|
||||
.post("/")
|
||||
.header(CONTENT_ENCODING, "br")
|
||||
.send_body(enc)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().await.unwrap();
|
||||
assert_eq!(bytes, Bytes::from(data));
|
||||
}
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
#[ntex::test]
|
||||
async fn test_brotli_encoding_large_openssl_h1() {
|
||||
// load ssl keys
|
||||
use tls_openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
|
||||
|
||||
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
|
||||
builder
|
||||
.set_private_key_file("./tests/key.pem", SslFiletype::PEM)
|
||||
.unwrap();
|
||||
builder
|
||||
.set_certificate_chain_file("./tests/cert.pem")
|
||||
.unwrap();
|
||||
|
||||
let data = STR.repeat(10);
|
||||
let srv = test::server_with(test::config().openssl(builder.build()).h1(), move || {
|
||||
App::new().service(web::resource("/").route(web::to(|bytes: Bytes| async {
|
||||
HttpResponse::Ok()
|
||||
.encoding(ContentEncoding::Identity)
|
||||
.body(bytes)
|
||||
})))
|
||||
});
|
||||
|
||||
// body
|
||||
let mut e = BrotliEncoder::new(Vec::new(), 3);
|
||||
e.write_all(data.as_ref()).unwrap();
|
||||
let enc = e.finish().unwrap();
|
||||
|
||||
// client request
|
||||
let mut response = srv
|
||||
.post("/")
|
||||
.header(CONTENT_ENCODING, "br")
|
||||
.send_body(enc)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().await.unwrap();
|
||||
assert_eq!(bytes, Bytes::from(data));
|
||||
}
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
#[ntex::test]
|
||||
async fn test_brotli_encoding_large_openssl_h2() {
|
||||
// load ssl keys
|
||||
use tls_openssl::ssl::{AlpnError, SslAcceptor, SslFiletype, SslMethod};
|
||||
|
||||
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
|
||||
builder
|
||||
.set_private_key_file("./tests/key.pem", SslFiletype::PEM)
|
||||
.unwrap();
|
||||
builder
|
||||
.set_certificate_chain_file("./tests/cert.pem")
|
||||
.unwrap();
|
||||
builder.set_alpn_select_callback(|_, protos| {
|
||||
const H2: &[u8] = b"\x02h2";
|
||||
const H11: &[u8] = b"\x08http/1.1";
|
||||
if protos.windows(3).any(|window| window == H2) {
|
||||
Ok(b"h2")
|
||||
} else if protos.windows(9).any(|window| window == H11) {
|
||||
Ok(b"http/1.1")
|
||||
} else {
|
||||
Err(AlpnError::NOACK)
|
||||
}
|
||||
});
|
||||
builder.set_alpn_protos(b"\x08http/1.1\x02h2").unwrap();
|
||||
|
||||
let data = STR.repeat(10);
|
||||
let srv = test::server_with(test::config().openssl(builder.build()).h2(), move || {
|
||||
App::new().service(web::resource("/").route(web::to(|bytes: Bytes| async {
|
||||
HttpResponse::Ok()
|
||||
.encoding(ContentEncoding::Identity)
|
||||
.body(bytes)
|
||||
})))
|
||||
});
|
||||
|
||||
// body
|
||||
let mut e = BrotliEncoder::new(Vec::new(), 3);
|
||||
e.write_all(data.as_ref()).unwrap();
|
||||
let enc = e.finish().unwrap();
|
||||
|
||||
// client request
|
||||
let mut response = srv
|
||||
.post("/")
|
||||
.header(CONTENT_ENCODING, "br")
|
||||
.send_body(enc)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.status().is_success());
|
||||
|
||||
// read response
|
||||
let bytes = response.body().await.unwrap();
|
||||
assert_eq!(bytes, Bytes::from(data));
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "rustls", feature = "openssl"))]
|
||||
#[ntex::test]
|
||||
async fn test_reading_deflate_encoding_large_random_rustls() {
|
||||
|
@ -868,7 +610,7 @@ async fn test_reading_deflate_encoding_large_random_rustls() {
|
|||
// client request
|
||||
let req = srv
|
||||
.post("/")
|
||||
.timeout(Millis(10_000))
|
||||
.timeout(Millis(30_000))
|
||||
.header(CONTENT_ENCODING, "deflate")
|
||||
.send_stream(TestBody::new(Bytes::from(enc), 1024));
|
||||
|
||||
|
@ -909,7 +651,7 @@ async fn test_reading_deflate_encoding_large_random_rustls_h1() {
|
|||
// client request
|
||||
let req = srv
|
||||
.post("/")
|
||||
.timeout(Millis(10_000))
|
||||
.timeout(Millis(30_000))
|
||||
.header(CONTENT_ENCODING, "deflate")
|
||||
.send_stream(TestBody::new(Bytes::from(enc), 1024));
|
||||
|
||||
|
@ -950,7 +692,7 @@ async fn test_reading_deflate_encoding_large_random_rustls_h2() {
|
|||
// client request
|
||||
let req = srv
|
||||
.post("/")
|
||||
.timeout(Millis(10_000))
|
||||
.timeout(Millis(30_000))
|
||||
.header(CONTENT_ENCODING, "deflate")
|
||||
.send_stream(TestBody::new(Bytes::from(enc), 1024));
|
||||
|
||||
|
|
|
@ -21,6 +21,8 @@ async fn service(msg: ws::Frame) -> Result<Option<ws::Message>, io::Error> {
|
|||
|
||||
#[ntex::test]
|
||||
async fn web_ws() {
|
||||
let _ = env_logger::try_init();
|
||||
|
||||
let srv = test::server(|| {
|
||||
App::new().service(web::resource("/").route(web::to(
|
||||
|req: HttpRequest| async move {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue