Compare commits

...

26 commits

Author SHA1 Message Date
Nikolay Kim
01d3a2440b
Prepare net release (#550) 2025-03-28 21:26:07 +01:00
Nikolay Kim
f5ee55d598
Handle socket close for poll driver (#549) 2025-03-28 21:06:11 +01:00
Nikolay Kim
e4f24ee41f Handle flaky tests 2025-03-28 11:39:24 +01:00
Nikolay Kim
f6fe9c3e10
Improve tests (#548) 2025-03-28 10:07:10 +01:00
Nikolay Kim
30928d019c
Improve tests (#547) 2025-03-28 09:11:59 +01:00
Nikolay Kim
e9a1284151
Better worker availability handling (#546) 2025-03-28 08:51:44 +01:00
Nikolay Kim
8f2d5056c9
Return PayloadError::Incomplete on server disconnect (#545) 2025-03-28 02:10:25 +01:00
Nikolay Kim
f647ad2eac
Update tests (#544) 2025-03-27 22:16:51 +01:00
Ruangyot Nanchiang
728ab919a3
Expose WebStack for external wrapper support in downstream crates (#542)
* add public ServiceConfig::register constructor to support external configuration (#250)

* fix: doctest ServiceConfig::register() error (#250)

* add unit testing for ServiceConfig::register()

* replace pub(crate) to pub in ServiceConfig::new() (#250)

* replace pub to pub(crate) for ServiceConfig::new() and add pub for mod ntex::web::stack instead

* remove unsed DefaultError import in config.rs tests

---------

Co-authored-by: RuangyotN <ruangyotn@skyller.co>
2025-03-27 22:12:34 +01:00
Nikolay Kim
b2915f4868
Improve tests (#543) 2025-03-27 20:45:43 +01:00
Nikolay Kim
eb4ec4b3e1
Add Arbiter::get_value() helper method (#541) 2025-03-26 14:40:05 +01:00
Nikolay Kim
0d3f1293c9
Update neon runtime (#540) 2025-03-25 12:40:42 +01:00
Ruangyot Nanchiang
e903e65e27
add public ServiceConfig::register constructor to support external configuration (#250) (#539)
* add public ServiceConfig::register constructor to support external configuration (#250)

* fix: doctest ServiceConfig::register() error (#250)

* add unit testing for ServiceConfig::register()

* replace pub(crate) to pub in ServiceConfig::new() (#250)

---------

Co-authored-by: RuangyotN <ruangyotn@skyller.co>
2025-03-25 12:31:09 +01:00
Nikolay Kim
eaec50d8a2
Prepare release (#538) 2025-03-22 22:17:59 +01:00
Drew Pirrone-Brusse
b32df88500
Publicize web::app_service::AppService (#534) 2025-03-22 22:16:00 +01:00
Nikolay Kim
5484009c92
Simplify neon poll impl (#537) 2025-03-21 08:21:45 +01:00
Nikolay Kim
bf6b1d6c79
Maintain interest info for poll driver (#536) 2025-03-20 08:56:31 +01:00
Nikolay Kim
e3f58cce27
Redesign neon poll support (#535) 2025-03-19 21:13:39 +01:00
Nikolay Kim
e904cf85f1 Fix tls examples
Some checks failed
Checks / Check (push) Failing after 4s
Checks / Clippy (push) Failing after 3s
Checks / Rustfmt (push) Failing after 3s
Coverage / coverage (push) Failing after 2s
CI (Linux) / 1.75.0 - x86_64-unknown-linux-gnu (push) Failing after 2s
CI (Linux) / nightly - x86_64-unknown-linux-gnu (push) Failing after 2s
CI (Linux) / stable - x86_64-unknown-linux-gnu (push) Failing after 2s
CI (Windows) / nightly - x86_64-pc-windows-msvc (push) Has been cancelled
CI (Windows) / stable - x86_64-pc-windows-msvc (push) Has been cancelled
CI (OSX) / nightly - aarch64-apple-darwin (push) Has been cancelled
CI (OSX) / stable - aarch64-apple-darwin (push) Has been cancelled
2025-03-18 06:10:52 +01:00
Nikolay Kim
3b58f5a111
Add delay for test server availability, could cause connect race (#533) 2025-03-18 05:50:28 +01:00
Nikolay Kim
5621ca1898
Add check for required io-uring opcodes (#532)
Some checks failed
Coverage / coverage (push) Failing after 2s
CI (OSX) / nightly - aarch64-apple-darwin (push) Waiting to run
CI (OSX) / stable - aarch64-apple-darwin (push) Waiting to run
CI (Windows) / nightly - x86_64-pc-windows-msvc (push) Waiting to run
CI (Windows) / stable - x86_64-pc-windows-msvc (push) Waiting to run
Checks / Check (push) Failing after 2s
Checks / Clippy (push) Failing after 2s
Checks / Rustfmt (push) Failing after 2s
CI (Linux) / nightly - x86_64-unknown-linux-gnu (push) Failing after 2s
CI (Linux) / stable - x86_64-unknown-linux-gnu (push) Failing after 2s
CI (Linux) / 1.75.0 - x86_64-unknown-linux-gnu (push) Failing after 2s
2025-03-17 22:03:55 +01:00
Nikolay Kim
11734e8f1b Run neon tests forst 2025-03-17 04:57:28 +01:00
Nikolay Kim
5426790eb0
Remove brotli support (#531) 2025-03-16 12:38:09 +01:00
Nikolay Kim
7417ee3a4b
Allow to run publish future to completion in case error (#529) 2025-03-16 12:11:01 +01:00
Nikolay Kim
1f71b200ad
Close FD in various case for poll driver (#530) 2025-03-16 12:09:09 +01:00
Nikolay Kim
f15c3203b1
Fix operation cancelation handling for poll driver (#528) 2025-03-15 01:19:35 +05:00
46 changed files with 893 additions and 1094 deletions

View file

@ -12,7 +12,7 @@ jobs:
with: with:
toolchain: stable toolchain: stable
- run: - run:
cargo check --tests --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" cargo check --tests --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
clippy: clippy:
name: Clippy name: Clippy
@ -24,7 +24,7 @@ jobs:
toolchain: stable toolchain: stable
components: clippy components: clippy
- run: - run:
cargo clippy --tests --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" cargo clippy --tests --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
fmt: fmt:
name: Rustfmt name: Rustfmt

View file

@ -8,11 +8,6 @@ jobs:
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
steps: steps:
- name: Free Disk Space
uses: jlumbroso/free-disk-space@main
with:
tool-cache: true
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Install Rust - name: Install Rust
run: rustup update nightly run: rustup update nightly
@ -26,17 +21,17 @@ jobs:
- name: Clean coverage results - name: Clean coverage results
run: cargo llvm-cov clean --workspace run: cargo llvm-cov clean --workspace
- name: Code coverage (tokio)
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
- name: Code coverage (compio)
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
- name: Code coverage (neon) - name: Code coverage (neon)
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/neon,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/neon,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Code coverage (neon-uring) - name: Code coverage (neon-uring)
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/neon-uring,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/neon-uring,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Code coverage (tokio)
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Code coverage (compio)
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Generate coverage report - name: Generate coverage report
run: cargo llvm-cov report --lcov --output-path lcov.info --ignore-filename-regex="ntex-compio|ntex-tokio" run: cargo llvm-cov report --lcov --output-path lcov.info --ignore-filename-regex="ntex-compio|ntex-tokio"

View file

@ -16,11 +16,6 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Free Disk Space
uses: jlumbroso/free-disk-space@main
with:
tool-cache: true
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Install ${{ matrix.version }} - name: Install ${{ matrix.version }}
@ -44,25 +39,25 @@ jobs:
path: ~/.cargo/git path: ~/.cargo/git
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-index-trimmed-${{ hashFiles('**/Cargo.lock') }} key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-index-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: Run tests (tokio)
timeout-minutes: 40
run: |
cargo test --all --no-fail-fast --no-default-features --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
- name: Run tests (compio)
timeout-minutes: 40
run: |
cargo test --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
- name: Run tests (neon) - name: Run tests (neon)
timeout-minutes: 40 timeout-minutes: 40
run: | run: |
cargo test --all --no-default-features --features="ntex/neon,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" cargo test --all --no-default-features --features="ntex/neon,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Run tests (neon-uring) - name: Run tests (neon-uring)
timeout-minutes: 40 timeout-minutes: 40
run: | run: |
cargo test --all --no-default-features --features="ntex/neon-uring,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" cargo test --all --no-default-features --features="ntex/neon-uring,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Run tests (tokio)
timeout-minutes: 40
run: |
cargo test --all --no-fail-fast --no-default-features --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Run tests (compio)
timeout-minutes: 40
run: |
cargo test --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Install cargo-cache - name: Install cargo-cache
continue-on-error: true continue-on-error: true

View file

@ -37,16 +37,16 @@ jobs:
path: ~/.cargo/git path: ~/.cargo/git
key: ${{ matrix.version }}-aarch64-apple-darwin-cargo-index-trimmed-${{ hashFiles('**/Cargo.lock') }} key: ${{ matrix.version }}-aarch64-apple-darwin-cargo-index-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: Run tests (neon)
timeout-minutes: 40
run: cargo test --all --no-default-features --no-fail-fast --features="ntex/neon,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Run tests (tokio) - name: Run tests (tokio)
run: cargo test --all --no-default-features --no-fail-fast --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" run: cargo test --all --no-default-features --no-fail-fast --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Run tests (compio) - name: Run tests (compio)
timeout-minutes: 40 timeout-minutes: 40
run: cargo test --all --no-default-features --no-fail-fast --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" run: cargo test --all --no-default-features --no-fail-fast --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Run tests (neon)
timeout-minutes: 40
run: cargo test --all --no-default-features --no-fail-fast --features="ntex/neon,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
- name: Install cargo-cache - name: Install cargo-cache
continue-on-error: true continue-on-error: true

View file

@ -63,8 +63,8 @@ jobs:
- name: Run tests (tokio) - name: Run tests (tokio)
run: | run: |
cargo test --all --lib --no-default-features --no-fail-fast --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" -- --skip test_timer cargo test --all --lib --no-default-features --no-fail-fast --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws" -- --skip test_timer
- name: Run tests (compio) - name: Run tests (compio)
run: | run: |
cargo test --all --lib --no-default-features --no-fail-fast --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" -- --skip test_timer cargo test --all --lib --no-default-features --no-fail-fast --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws" -- --skip test_timer

View file

@ -46,7 +46,10 @@ ntex-compio = { path = "ntex-compio" }
ntex-tokio = { path = "ntex-tokio" } ntex-tokio = { path = "ntex-tokio" }
[workspace.dependencies] [workspace.dependencies]
async-channel = "2"
async-task = "4.5.0" async-task = "4.5.0"
atomic-waker = "1.1"
core_affinity = "0.8"
bitflags = "2" bitflags = "2"
cfg_aliases = "0.2.1" cfg_aliases = "0.2.1"
cfg-if = "1.0.0" cfg-if = "1.0.0"
@ -57,7 +60,8 @@ fxhash = "0.2"
libc = "0.2.164" libc = "0.2.164"
log = "0.4" log = "0.4"
io-uring = "0.7.4" io-uring = "0.7.4"
polling = "3.3.0" oneshot = "0.1"
polling = "3.7.4"
nohash-hasher = "0.2.0" nohash-hasher = "0.2.0"
scoped-tls = "1.0.1" scoped-tls = "1.0.1"
slab = "0.4.9" slab = "0.4.9"

View file

@ -1,5 +1,9 @@
# Changes # Changes
## [2.11.1] - 2025-03-20
* Add readiness check support
## [2.11.0] - 2025-03-10 ## [2.11.0] - 2025-03-10
* Add single io context * Add single io context

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ntex-io" name = "ntex-io"
version = "2.11.0" version = "2.11.1"
authors = ["ntex contributors <team@ntex.rs>"] authors = ["ntex contributors <team@ntex.rs>"]
description = "Utilities for encoding and decoding frames" description = "Utilities for encoding and decoding frames"
keywords = ["network", "framework", "async", "futures"] keywords = ["network", "framework", "async", "futures"]
@ -28,4 +28,3 @@ pin-project-lite = "0.2"
[dev-dependencies] [dev-dependencies]
ntex = "2" ntex = "2"
rand = "0.8" rand = "0.8"
env_logger = "0.11"

View file

@ -537,7 +537,9 @@ impl IoContext {
self.0.tag(), self.0.tag(),
nbytes nbytes
); );
inner.dispatch_task.wake(); if !inner.dispatch_task.wake_checked() {
log::error!("Dispatcher waker is not registered");
}
} else { } else {
if nbytes >= hw { if nbytes >= hw {
// read task is paused because of read back-pressure // read task is paused because of read back-pressure
@ -722,28 +724,20 @@ impl IoContext {
} }
/// Get read buffer /// Get read buffer
pub fn with_read_buf<F>(&self, f: F) -> Poll<()> pub fn is_read_ready(&self) -> bool {
where
F: FnOnce(&mut BytesVec) -> Poll<io::Result<usize>>,
{
let result = self.with_read_buf_inner(f);
// check read readiness // check read readiness
if result.is_pending() {
if let Some(waker) = self.0 .0.read_task.take() { if let Some(waker) = self.0 .0.read_task.take() {
let mut cx = Context::from_waker(&waker); let mut cx = Context::from_waker(&waker);
if let Poll::Ready(ReadStatus::Ready) = if let Poll::Ready(ReadStatus::Ready) = self.0.filter().poll_read_ready(&mut cx)
self.0.filter().poll_read_ready(&mut cx)
{ {
return Poll::Pending; return true;
} }
} }
} false
result
} }
fn with_read_buf_inner<F>(&self, f: F) -> Poll<()> pub fn with_read_buf<F>(&self, f: F) -> Poll<()>
where where
F: FnOnce(&mut BytesVec) -> Poll<io::Result<usize>>, F: FnOnce(&mut BytesVec) -> Poll<io::Result<usize>>,
{ {
@ -795,7 +789,9 @@ impl IoContext {
self.0.tag(), self.0.tag(),
nbytes nbytes
); );
inner.dispatch_task.wake(); if !inner.dispatch_task.wake_checked() {
log::error!("Dispatcher waker is not registered");
}
} else { } else {
if nbytes >= hw { if nbytes >= hw {
// read task is paused because of read back-pressure // read task is paused because of read back-pressure
@ -838,33 +834,8 @@ impl IoContext {
} }
} }
pub fn with_write_buf<F>(&self, f: F) -> Poll<()>
where
F: FnOnce(&BytesVec) -> Poll<io::Result<usize>>,
{
let result = self.with_write_buf_inner(f);
// check write readiness
if result.is_pending() {
let inner = &self.0 .0;
if let Some(waker) = inner.write_task.take() {
let ready = self
.0
.filter()
.poll_write_ready(&mut Context::from_waker(&waker));
if !matches!(
ready,
Poll::Ready(WriteStatus::Ready | WriteStatus::Shutdown)
) {
return Poll::Ready(());
}
}
}
result
}
/// Get write buffer /// Get write buffer
fn with_write_buf_inner<F>(&self, f: F) -> Poll<()> pub fn with_write_buf<F>(&self, f: F) -> Poll<()>
where where
F: FnOnce(&BytesVec) -> Poll<io::Result<usize>>, F: FnOnce(&BytesVec) -> Poll<io::Result<usize>>,
{ {

View file

@ -18,4 +18,3 @@ proc-macro2 = "^1"
[dev-dependencies] [dev-dependencies]
ntex = "2" ntex = "2"
futures = "0.3" futures = "0.3"
env_logger = "0.11"

View file

@ -1,5 +1,39 @@
# Changes # Changes
## [2.5.10] - 2025-03-28
* Better closed sockets handling
## [2.5.9] - 2025-03-27
* Handle closed sockets
## [2.5.8] - 2025-03-25
* Update neon runtime
## [2.5.7] - 2025-03-21
* Simplify neon poll impl
## [2.5.6] - 2025-03-20
* Redesign neon poll support
## [2.5.5] - 2025-03-17
* Add check for required io-uring opcodes
* Handle io-uring cancelation
## [2.5.4] - 2025-03-15
* Close FD in various case for poll driver
## [2.5.3] - 2025-03-14
* Fix operation cancelation handling for poll driver
## [2.5.2] - 2025-03-14 ## [2.5.2] - 2025-03-14
* Fix operation cancelation handling for io-uring driver * Fix operation cancelation handling for io-uring driver

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ntex-net" name = "ntex-net"
version = "2.5.2" version = "2.5.10"
authors = ["ntex contributors <team@ntex.rs>"] authors = ["ntex contributors <team@ntex.rs>"]
description = "ntexwork utils for ntex framework" description = "ntexwork utils for ntex framework"
keywords = ["network", "framework", "async", "futures"] keywords = ["network", "framework", "async", "futures"]
@ -27,27 +27,27 @@ compio = ["ntex-rt/compio", "ntex-compio"]
# neon runtime # neon runtime
neon = ["ntex-rt/neon", "ntex-neon", "slab", "socket2"] neon = ["ntex-rt/neon", "ntex-neon", "slab", "socket2"]
polling = ["ntex-neon/polling", "dep:polling"] polling = ["ntex-neon/polling", "dep:polling", "socket2"]
io-uring = ["ntex-neon/io-uring", "dep:io-uring"] io-uring = ["ntex-neon/io-uring", "dep:io-uring", "socket2"]
[dependencies] [dependencies]
ntex-service = "3.3" ntex-service = "3.3"
ntex-bytes = "0.1" ntex-bytes = "0.1"
ntex-http = "0.1" ntex-http = "0.1"
ntex-io = "2.11" ntex-io = "2.11.1"
ntex-rt = "0.4.25" ntex-rt = "0.4.25"
ntex-util = "2.5" ntex-util = "2.5"
ntex-tokio = { version = "0.5.3", optional = true } ntex-tokio = { version = "0.5.3", optional = true }
ntex-compio = { version = "0.2.4", optional = true } ntex-compio = { version = "0.2.4", optional = true }
ntex-neon = { version = "0.1.3", optional = true } ntex-neon = { version = "0.1.15", optional = true }
bitflags = { workspace = true } bitflags = { workspace = true }
cfg-if = { workspace = true } cfg-if = { workspace = true }
log = { workspace = true } log = { workspace = true }
libc = { workspace = true } libc = { workspace = true }
slab = { workspace = true, optional = true } slab = { workspace = true, optional = true }
socket2 = { workspace = true, optional = true } socket2 = { workspace = true, optional = true, features = ["all"] }
thiserror = { workspace = true } thiserror = { workspace = true }
# Linux specific dependencies # Linux specific dependencies
@ -57,4 +57,3 @@ polling = { workspace = true, optional = true }
[dev-dependencies] [dev-dependencies]
ntex = "2" ntex = "2"
env_logger = "0.11"

View file

@ -1,7 +1,7 @@
use std::os::fd::{AsRawFd, RawFd}; use std::os::fd::{AsRawFd, RawFd};
use std::{cell::RefCell, collections::VecDeque, io, rc::Rc, task::Poll}; use std::{cell::RefCell, io, rc::Rc, task::Poll};
use ntex_neon::driver::{DriverApi, Handler, Interest}; use ntex_neon::driver::{DriverApi, Event, Handler};
use ntex_neon::{syscall, Runtime}; use ntex_neon::{syscall, Runtime};
use ntex_util::channel::oneshot::Sender; use ntex_util::channel::oneshot::Sender;
use slab::Slab; use slab::Slab;
@ -12,13 +12,11 @@ pub(crate) struct ConnectOps(Rc<ConnectOpsInner>);
#[derive(Debug)] #[derive(Debug)]
enum Change { enum Change {
Readable, Event(Event),
Writable,
Error(io::Error), Error(io::Error),
} }
struct ConnectOpsBatcher { struct ConnectOpsBatcher {
feed: VecDeque<(usize, Change)>,
inner: Rc<ConnectOpsInner>, inner: Rc<ConnectOpsInner>,
} }
@ -42,10 +40,7 @@ impl ConnectOps {
connects: RefCell::new(Slab::new()), connects: RefCell::new(Slab::new()),
}); });
inner = Some(ops.clone()); inner = Some(ops.clone());
Box::new(ConnectOpsBatcher { Box::new(ConnectOpsBatcher { inner: ops })
inner: ops,
feed: VecDeque::new(),
})
}); });
ConnectOps(inner.unwrap()) ConnectOps(inner.unwrap())
@ -67,44 +62,22 @@ impl ConnectOps {
let item = Item { fd, sender }; let item = Item { fd, sender };
let id = self.0.connects.borrow_mut().insert(item); let id = self.0.connects.borrow_mut().insert(item);
self.0.api.register(fd, id, Interest::Writable); self.0.api.attach(fd, id as u32, Some(Event::writable(0)));
Ok(id) Ok(id)
} }
} }
impl Handler for ConnectOpsBatcher { impl Handler for ConnectOpsBatcher {
fn readable(&mut self, id: usize) { fn event(&mut self, id: usize, event: Event) {
log::debug!("connect-fd is readable {:?}", id); log::debug!("connect-fd is readable {:?}", id);
self.feed.push_back((id, Change::Readable));
}
fn writable(&mut self, id: usize) {
log::debug!("connect-fd is writable {:?}", id);
self.feed.push_back((id, Change::Writable));
}
fn error(&mut self, id: usize, err: io::Error) {
self.feed.push_back((id, Change::Error(err)));
}
fn commit(&mut self) {
if self.feed.is_empty() {
return;
}
log::debug!("Commit connect driver changes, num: {:?}", self.feed.len());
let mut connects = self.inner.connects.borrow_mut(); let mut connects = self.inner.connects.borrow_mut();
for (id, change) in self.feed.drain(..) {
if connects.contains(id) { if connects.contains(id) {
let item = connects.remove(id); let item = connects.remove(id);
match change { if event.writable {
Change::Readable => unreachable!(),
Change::Writable => {
let mut err: libc::c_int = 0; let mut err: libc::c_int = 0;
let mut err_len = let mut err_len = std::mem::size_of::<libc::c_int>() as libc::socklen_t;
std::mem::size_of::<libc::c_int>() as libc::socklen_t;
let res = syscall!(libc::getsockopt( let res = syscall!(libc::getsockopt(
item.fd.as_raw_fd(), item.fd.as_raw_fd(),
@ -120,15 +93,19 @@ impl Handler for ConnectOpsBatcher {
Err(io::Error::from_raw_os_error(err)) Err(io::Error::from_raw_os_error(err))
}; };
self.inner.api.unregister_all(item.fd); self.inner.api.detach(item.fd, id as u32);
let _ = item.sender.send(res); let _ = item.sender.send(res);
} }
Change::Error(err) => { }
}
fn error(&mut self, id: usize, err: io::Error) {
let mut connects = self.inner.connects.borrow_mut();
if connects.contains(id) {
let item = connects.remove(id);
let _ = item.sender.send(Err(err)); let _ = item.sender.send(Err(err));
self.inner.api.unregister_all(item.fd); self.inner.api.detach(item.fd, id as u32);
}
}
}
} }
} }
} }

View file

@ -1,55 +1,53 @@
use std::os::fd::{AsRawFd, RawFd}; use std::os::fd::{AsRawFd, RawFd};
use std::{cell::Cell, collections::VecDeque, io, rc::Rc, task, task::Poll}; use std::{cell::Cell, cell::RefCell, future::Future, io, mem, rc::Rc, task, task::Poll};
use ntex_neon::driver::{DriverApi, Handler, Interest}; use ntex_neon::driver::{DriverApi, Event, Handler};
use ntex_neon::{syscall, Runtime}; use ntex_neon::{syscall, Runtime};
use slab::Slab; use slab::Slab;
use ntex_bytes::BufMut; use ntex_bytes::BufMut;
use ntex_io::IoContext; use ntex_io::IoContext;
bitflags::bitflags! { pub(crate) struct StreamCtl<T> {
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] id: u32,
struct Flags: u8 { inner: Rc<StreamOpsInner<T>>,
const ERROR = 0b0000_0001;
const RD = 0b0000_0010;
const WR = 0b0000_0100;
}
} }
pub(crate) struct StreamCtl<T> { bitflags::bitflags! {
id: usize, #[derive(Copy, Clone, Debug)]
inner: Rc<StreamOpsInner<T>>, struct Flags: u8 {
const RD = 0b0000_0001;
const WR = 0b0000_0010;
}
} }
struct StreamItem<T> { struct StreamItem<T> {
io: Option<T>, io: Option<T>,
fd: RawFd, fd: RawFd,
context: IoContext,
flags: Flags, flags: Flags,
ref_count: usize, ref_count: u16,
context: IoContext,
} }
pub(crate) struct StreamOps<T>(Rc<StreamOpsInner<T>>); pub(crate) struct StreamOps<T>(Rc<StreamOpsInner<T>>);
#[derive(Debug)]
enum Change {
Readable,
Writable,
Error(io::Error),
}
struct StreamOpsHandler<T> { struct StreamOpsHandler<T> {
feed: VecDeque<(usize, Change)>,
inner: Rc<StreamOpsInner<T>>, inner: Rc<StreamOpsInner<T>>,
} }
struct StreamOpsInner<T> { struct StreamOpsInner<T> {
api: DriverApi, api: DriverApi,
feed: Cell<Option<VecDeque<usize>>>, delayd_drop: Cell<bool>,
feed: RefCell<Vec<u32>>,
streams: Cell<Option<Box<Slab<StreamItem<T>>>>>, streams: Cell<Option<Box<Slab<StreamItem<T>>>>>,
} }
impl<T> StreamItem<T> {
fn tag(&self) -> &'static str {
self.context.tag()
}
}
impl<T: AsRawFd + 'static> StreamOps<T> { impl<T: AsRawFd + 'static> StreamOps<T> {
pub(crate) fn current() -> Self { pub(crate) fn current() -> Self {
Runtime::value(|rt| { Runtime::value(|rt| {
@ -57,14 +55,12 @@ impl<T: AsRawFd + 'static> StreamOps<T> {
rt.driver().register(|api| { rt.driver().register(|api| {
let ops = Rc::new(StreamOpsInner { let ops = Rc::new(StreamOpsInner {
api, api,
feed: Cell::new(Some(VecDeque::new())), feed: RefCell::new(Vec::new()),
delayd_drop: Cell::new(false),
streams: Cell::new(Some(Box::new(Slab::new()))), streams: Cell::new(Some(Box::new(Slab::new()))),
}); });
inner = Some(ops.clone()); inner = Some(ops.clone());
Box::new(StreamOpsHandler { Box::new(StreamOpsHandler { inner: ops })
inner: ops,
feed: VecDeque::new(),
})
}); });
StreamOps(inner.unwrap()) StreamOps(inner.unwrap())
@ -72,30 +68,27 @@ impl<T: AsRawFd + 'static> StreamOps<T> {
} }
pub(crate) fn register(&self, io: T, context: IoContext) -> StreamCtl<T> { pub(crate) fn register(&self, io: T, context: IoContext) -> StreamCtl<T> {
let fd = io.as_raw_fd();
let stream = self.0.with(move |streams| {
let item = StreamItem { let item = StreamItem {
fd,
context, context,
fd: io.as_raw_fd(),
io: Some(io), io: Some(io),
flags: Flags::empty(),
ref_count: 1, ref_count: 1,
flags: Flags::empty(),
}; };
self.with(|streams| {
let id = streams.insert(item);
StreamCtl { StreamCtl {
id, id: streams.insert(item) as u32,
inner: self.0.clone(), inner: self.0.clone(),
} }
}) });
}
fn with<F, R>(&self, f: F) -> R self.0.api.attach(
where fd,
F: FnOnce(&mut Slab<StreamItem<T>>) -> R, stream.id,
{ Some(Event::new(0, false, false).with_interrupt()),
let mut inner = self.0.streams.take().unwrap(); );
let result = f(&mut inner); stream
self.0.streams.set(Some(inner));
result
} }
} }
@ -106,239 +99,242 @@ impl<T> Clone for StreamOps<T> {
} }
impl<T> Handler for StreamOpsHandler<T> { impl<T> Handler for StreamOpsHandler<T> {
fn readable(&mut self, id: usize) { fn event(&mut self, id: usize, ev: Event) {
log::debug!("FD is readable {:?}", id); self.inner.with(|streams| {
self.feed.push_back((id, Change::Readable)); if !streams.contains(id) {
}
fn writable(&mut self, id: usize) {
log::debug!("FD is writable {:?}", id);
self.feed.push_back((id, Change::Writable));
}
fn error(&mut self, id: usize, err: io::Error) {
log::debug!("FD is failed {:?}, err: {:?}", id, err);
self.feed.push_back((id, Change::Error(err)));
}
fn commit(&mut self) {
if self.feed.is_empty() {
return; return;
} }
log::debug!("Commit changes, num: {:?}", self.feed.len());
let mut streams = self.inner.streams.take().unwrap();
for (id, change) in self.feed.drain(..) {
match change {
Change::Readable => {
let item = &mut streams[id]; let item = &mut streams[id];
let result = item.context.with_read_buf(|buf| { if item.io.is_none() {
return;
}
log::debug!("{}: FD event {:?} event: {:?}", item.tag(), id, ev);
// handle HUP
if ev.is_interrupt() {
item.context.stopped(None);
close(id as u32, item, &self.inner.api, None, true);
return;
}
let mut renew_ev = Event::new(0, false, false).with_interrupt();
if ev.readable {
let res = item.context.with_read_buf(|buf| {
let chunk = buf.chunk_mut(); let chunk = buf.chunk_mut();
let b = chunk.as_mut_ptr(); let result = task::ready!(syscall!(
Poll::Ready( break libc::read(item.fd, chunk.as_mut_ptr() as _, chunk.len())
task::ready!(syscall!( ));
break libc::read(item.fd, b as _, chunk.len()) if let Ok(size) = result {
)) log::debug!("{}: data {:?}, s: {:?}", item.tag(), item.fd, size);
.inspect(|size| { unsafe { buf.advance_mut(size) };
unsafe { buf.advance_mut(*size) }; }
log::debug!( Poll::Ready(result)
"{}: {:?}, SIZE: {:?}, BUF: {:?}",
item.context.tag(),
item.fd,
size,
buf
);
}),
)
}); });
if result.is_pending() { if res.is_pending() && item.context.is_read_ready() {
renew_ev.readable = true;
item.flags.insert(Flags::RD); item.flags.insert(Flags::RD);
self.inner.api.register(item.fd, id, Interest::Readable);
} else { } else {
item.flags.remove(Flags::RD); item.flags.remove(Flags::RD);
} }
} else if item.flags.contains(Flags::RD) {
renew_ev.readable = true;
} }
Change::Writable => {
let item = &mut streams[id];
let result = item.context.with_write_buf(|buf| {
let slice = &buf[..];
syscall!(
break libc::write(item.fd, slice.as_ptr() as _, slice.len())
)
});
if ev.writable {
let result = item.context.with_write_buf(|buf| {
log::debug!("{}: write {:?} s: {:?}", item.tag(), item.fd, buf.len());
syscall!(break libc::write(item.fd, buf[..].as_ptr() as _, buf.len()))
});
if result.is_pending() { if result.is_pending() {
renew_ev.writable = true;
item.flags.insert(Flags::WR); item.flags.insert(Flags::WR);
self.inner.api.register(item.fd, id, Interest::Writable);
} else { } else {
item.flags.remove(Flags::WR); item.flags.remove(Flags::WR);
} }
} } else if item.flags.contains(Flags::WR) {
Change::Error(err) => { renew_ev.writable = true;
if let Some(item) = streams.get_mut(id) {
item.context.stopped(Some(err));
if !item.flags.contains(Flags::ERROR) {
item.flags.insert(Flags::ERROR);
item.flags.remove(Flags::RD | Flags::WR);
self.inner.api.unregister_all(item.fd);
}
}
}
}
} }
// extra self.inner.api.modify(item.fd, id as u32, renew_ev);
let mut feed = self.inner.feed.take().unwrap();
for id in feed.drain(..) {
let item = &mut streams[id];
log::debug!("{}: Drop io ({}), {:?}", item.context.tag(), id, item.fd);
// delayed drops
if self.inner.delayd_drop.get() {
for id in self.inner.feed.borrow_mut().drain(..) {
let item = &mut streams[id as usize];
item.ref_count -= 1; item.ref_count -= 1;
if item.ref_count == 0 { if item.ref_count == 0 {
let item = streams.remove(id); let mut item = streams.remove(id as usize);
if item.io.is_some() { log::debug!(
self.inner.api.unregister_all(item.fd); "{}: Drop ({}), {:?}, has-io: {}",
item.tag(),
id,
item.fd,
item.io.is_some()
);
close(id, &mut item, &self.inner.api, None, true);
} }
} }
self.inner.delayd_drop.set(false);
}
});
} }
self.inner.feed.set(Some(feed)); fn error(&mut self, id: usize, err: io::Error) {
self.inner.streams.set(Some(streams)); self.inner.with(|streams| {
if let Some(item) = streams.get_mut(id) {
log::debug!(
"{}: FD is failed ({}) {:?}, err: {:?}",
item.tag(),
id,
item.fd,
err
);
close(id as u32, item, &self.inner.api, Some(err), false);
}
})
}
}
impl<T> StreamOpsInner<T> {
fn with<F, R>(&self, f: F) -> R
where
F: FnOnce(&mut Slab<StreamItem<T>>) -> R,
{
let mut streams = self.streams.take().unwrap();
let result = f(&mut streams);
self.streams.set(Some(streams));
result
}
}
fn close<T>(
id: u32,
item: &mut StreamItem<T>,
api: &DriverApi,
error: Option<io::Error>,
shutdown: bool,
) -> Option<ntex_rt::JoinHandle<io::Result<i32>>> {
if let Some(io) = item.io.take() {
log::debug!("{}: Closing ({}), {:?}", item.tag(), id, item.fd);
mem::forget(io);
if let Some(err) = error {
item.context.stopped(Some(err));
}
let fd = item.fd;
api.detach(fd, id);
Some(ntex_rt::spawn_blocking(move || {
if shutdown {
let _ = syscall!(libc::shutdown(fd, libc::SHUT_RDWR));
}
syscall!(libc::close(fd))
}))
} else {
None
} }
} }
impl<T> StreamCtl<T> { impl<T> StreamCtl<T> {
pub(crate) async fn close(self) -> io::Result<()> { pub(crate) fn close(self) -> impl Future<Output = io::Result<()>> {
let (io, fd) = let id = self.id as usize;
self.with(|streams| (streams[self.id].io.take(), streams[self.id].fd)); let fut = self.inner.with(|streams| {
if let Some(io) = io { let item = &mut streams[id];
std::mem::forget(io); close(self.id, item, &self.inner.api, None, false)
});
ntex_rt::spawn_blocking(move || syscall!(libc::close(fd))) async move {
.await if let Some(fut) = fut {
fut.await
.map_err(|e| io::Error::new(io::ErrorKind::Other, e)) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))
.and_then(crate::helpers::pool_io_err)?; .and_then(crate::helpers::pool_io_err)?;
} }
Ok(()) Ok(())
} }
}
pub(crate) fn with_io<F, R>(&self, f: F) -> R pub(crate) fn with_io<F, R>(&self, f: F) -> R
where where
F: FnOnce(Option<&T>) -> R, F: FnOnce(Option<&T>) -> R,
{ {
self.with(|streams| f(streams[self.id].io.as_ref()))
}
pub(crate) fn pause_all(&self) {
self.with(|streams| {
let item = &mut streams[self.id];
if item.flags.intersects(Flags::RD | Flags::WR) {
log::debug!(
"{}: Pause all io ({}), {:?}",
item.context.tag(),
self.id,
item.fd
);
item.flags.remove(Flags::RD | Flags::WR);
self.inner.api.unregister_all(item.fd);
}
})
}
pub(crate) fn pause_read(&self) {
self.with(|streams| {
let item = &mut streams[self.id];
log::debug!(
"{}: Pause io read ({}), {:?}",
item.context.tag(),
self.id,
item.fd
);
if item.flags.contains(Flags::RD) {
item.flags.remove(Flags::RD);
self.inner.api.unregister(item.fd, Interest::Readable);
}
})
}
pub(crate) fn resume_read(&self) {
self.with(|streams| {
let item = &mut streams[self.id];
log::debug!(
"{}: Resume io read ({}), {:?}",
item.context.tag(),
self.id,
item.fd
);
if !item.flags.contains(Flags::RD) {
item.flags.insert(Flags::RD);
self.inner self.inner
.api .with(|streams| f(streams[self.id as usize].io.as_ref()))
.register(item.fd, self.id, Interest::Readable);
}
})
} }
pub(crate) fn resume_write(&self) { pub(crate) fn modify(&self, rd: bool, wr: bool) {
self.with(|streams| { self.inner.with(|streams| {
let item = &mut streams[self.id]; let item = &mut streams[self.id as usize];
if !item.flags.contains(Flags::WR) {
log::debug!( log::debug!(
"{}: Resume io write ({}), {:?}", "{}: Modify interest ({}), {:?} rd: {:?}, wr: {:?}",
item.context.tag(), item.tag(),
self.id, self.id,
item.fd item.fd,
rd,
wr
); );
let mut event = Event::new(0, false, false).with_interrupt();
if rd {
if item.flags.contains(Flags::RD) {
event.readable = true;
} else {
let res = item.context.with_read_buf(|buf| {
let chunk = buf.chunk_mut();
let result = task::ready!(syscall!(
break libc::read(item.fd, chunk.as_mut_ptr() as _, chunk.len())
));
if let Ok(size) = result {
log::debug!(
"{}: read {:?}, s: {:?}",
item.tag(),
item.fd,
size
);
unsafe { buf.advance_mut(size) };
}
Poll::Ready(result)
});
if res.is_pending() && item.context.is_read_ready() {
event.readable = true;
item.flags.insert(Flags::RD);
}
}
}
if wr {
if item.flags.contains(Flags::WR) {
event.writable = true;
} else {
let result = item.context.with_write_buf(|buf| { let result = item.context.with_write_buf(|buf| {
log::debug!( log::debug!(
"{}: Writing io ({}), buf: {:?}", "{}: Writing ({}), buf: {:?}",
item.context.tag(), item.tag(),
self.id, self.id,
buf.len() buf.len()
); );
syscall!(
let slice = &buf[..]; break libc::write(item.fd, buf[..].as_ptr() as _, buf.len())
syscall!(break libc::write(item.fd, slice.as_ptr() as _, slice.len())) )
}); });
if result.is_pending() { if result.is_pending() {
log::debug!( event.writable = true;
"{}: Write is pending ({}), {:?}",
item.context.tag(),
self.id,
item.context.flags()
);
item.flags.insert(Flags::WR); item.flags.insert(Flags::WR);
self.inner
.api
.register(item.fd, self.id, Interest::Writable);
} }
} }
})
} }
fn with<F, R>(&self, f: F) -> R self.inner.api.modify(item.fd, self.id, event);
where })
F: FnOnce(&mut Slab<StreamItem<T>>) -> R,
{
let mut inner = self.inner.streams.take().unwrap();
let result = f(&mut inner);
self.inner.streams.set(Some(inner));
result
} }
} }
impl<T> Clone for StreamCtl<T> { impl<T> Clone for StreamCtl<T> {
fn clone(&self) -> Self { fn clone(&self) -> Self {
self.with(|streams| { self.inner.with(|streams| {
streams[self.id].ref_count += 1; streams[self.id as usize].ref_count += 1;
Self { Self {
id: self.id, id: self.id,
inner: self.inner.clone(), inner: self.inner.clone(),
@ -350,25 +346,23 @@ impl<T> Clone for StreamCtl<T> {
impl<T> Drop for StreamCtl<T> { impl<T> Drop for StreamCtl<T> {
fn drop(&mut self) { fn drop(&mut self) {
if let Some(mut streams) = self.inner.streams.take() { if let Some(mut streams) = self.inner.streams.take() {
let id = self.id as usize;
streams[id].ref_count -= 1;
if streams[id].ref_count == 0 {
let mut item = streams.remove(id);
log::debug!( log::debug!(
"{}: Drop io ({}), {:?}", "{}: Drop io ({}), {:?}, has-io: {}",
streams[self.id].context.tag(), item.tag(),
self.id, self.id,
streams[self.id].fd item.fd,
item.io.is_some()
); );
close(self.id, &mut item, &self.inner.api, None, true);
streams[self.id].ref_count -= 1;
if streams[self.id].ref_count == 0 {
let item = streams.remove(self.id);
if item.io.is_some() {
self.inner.api.unregister_all(item.fd);
}
} }
self.inner.streams.set(Some(streams)); self.inner.streams.set(Some(streams));
} else { } else {
let mut feed = self.inner.feed.take().unwrap(); self.inner.delayd_drop.set(true);
feed.push_back(self.id); self.inner.feed.borrow_mut().push(self.id);
self.inner.feed.set(Some(feed));
} }
} }
} }

View file

@ -54,21 +54,26 @@ enum Status {
async fn run<T>(ctl: StreamCtl<T>, context: IoContext) { async fn run<T>(ctl: StreamCtl<T>, context: IoContext) {
// Handle io read readiness // Handle io read readiness
let st = poll_fn(|cx| { let st = poll_fn(|cx| {
let mut modify = false;
let mut readable = false;
let mut writable = false;
let read = match context.poll_read_ready(cx) { let read = match context.poll_read_ready(cx) {
Poll::Ready(ReadStatus::Ready) => { Poll::Ready(ReadStatus::Ready) => {
ctl.resume_read(); modify = true;
readable = true;
Poll::Pending Poll::Pending
} }
Poll::Ready(ReadStatus::Terminate) => Poll::Ready(()), Poll::Ready(ReadStatus::Terminate) => Poll::Ready(()),
Poll::Pending => { Poll::Pending => {
ctl.pause_read(); modify = true;
Poll::Pending Poll::Pending
} }
}; };
let write = match context.poll_write_ready(cx) { let write = match context.poll_write_ready(cx) {
Poll::Ready(WriteStatus::Ready) => { Poll::Ready(WriteStatus::Ready) => {
ctl.resume_write(); modify = true;
writable = true;
Poll::Pending Poll::Pending
} }
Poll::Ready(WriteStatus::Shutdown) => Poll::Ready(Status::Shutdown), Poll::Ready(WriteStatus::Shutdown) => Poll::Ready(Status::Shutdown),
@ -76,6 +81,10 @@ async fn run<T>(ctl: StreamCtl<T>, context: IoContext) {
Poll::Pending => Poll::Pending, Poll::Pending => Poll::Pending,
}; };
if modify {
ctl.modify(readable, writable);
}
if read.is_pending() && write.is_pending() { if read.is_pending() && write.is_pending() {
Poll::Pending Poll::Pending
} else if write.is_ready() { } else if write.is_ready() {
@ -86,11 +95,7 @@ async fn run<T>(ctl: StreamCtl<T>, context: IoContext) {
}) })
.await; .await;
ctl.resume_write(); ctl.modify(false, true);
context.shutdown(st == Status::Shutdown).await; context.shutdown(st == Status::Shutdown).await;
context.stopped(ctl.close().await.err());
ctl.pause_all();
let result = ctl.close().await;
context.stopped(result.err());
} }

View file

@ -8,6 +8,9 @@ pub(crate) mod connect;
mod driver; mod driver;
mod io; mod io;
#[cfg(not(target_pointer_width = "64"))]
compile_error!("Only 64bit platforms are supported");
/// Tcp stream wrapper for neon TcpStream /// Tcp stream wrapper for neon TcpStream
struct TcpStream(socket2::Socket); struct TcpStream(socket2::Socket);

View file

@ -32,6 +32,10 @@ impl ConnectOps {
Runtime::value(|rt| { Runtime::value(|rt| {
let mut inner = None; let mut inner = None;
rt.driver().register(|api| { rt.driver().register(|api| {
if !api.is_supported(opcode::Connect::CODE) {
panic!("opcode::Connect is required for io-uring support");
}
let ops = Rc::new(ConnectOpsInner { let ops = Rc::new(ConnectOpsInner {
api, api,
ops: RefCell::new(Slab::new()), ops: RefCell::new(Slab::new()),

View file

@ -13,15 +13,32 @@ pub(crate) struct StreamCtl<T> {
inner: Rc<StreamOpsInner<T>>, inner: Rc<StreamOpsInner<T>>,
} }
bitflags::bitflags! {
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
struct Flags: u8 {
const RD_CANCELING = 0b0000_0001;
const RD_REISSUE = 0b0000_0010;
const WR_CANCELING = 0b0001_0000;
const WR_REISSUE = 0b0010_0000;
}
}
struct StreamItem<T> { struct StreamItem<T> {
io: Option<T>, io: Option<T>,
fd: Fd, fd: Fd,
context: IoContext, context: IoContext,
ref_count: usize, ref_count: usize,
flags: Flags,
rd_op: Option<NonZeroU32>, rd_op: Option<NonZeroU32>,
wr_op: Option<NonZeroU32>, wr_op: Option<NonZeroU32>,
} }
impl<T> StreamItem<T> {
fn tag(&self) -> &'static str {
self.context.tag()
}
}
enum Operation { enum Operation {
Recv { Recv {
id: usize, id: usize,
@ -61,6 +78,16 @@ impl<T: os::fd::AsRawFd + 'static> StreamOps<T> {
Runtime::value(|rt| { Runtime::value(|rt| {
let mut inner = None; let mut inner = None;
rt.driver().register(|api| { rt.driver().register(|api| {
if !api.is_supported(opcode::Recv::CODE) {
panic!("opcode::Recv is required for io-uring support");
}
if !api.is_supported(opcode::Send::CODE) {
panic!("opcode::Send is required for io-uring support");
}
if !api.is_supported(opcode::Close::CODE) {
panic!("opcode::Close is required for io-uring support");
}
let mut ops = Slab::new(); let mut ops = Slab::new();
ops.insert(Operation::Nop); ops.insert(Operation::Nop);
@ -88,6 +115,7 @@ impl<T: os::fd::AsRawFd + 'static> StreamOps<T> {
ref_count: 1, ref_count: 1,
rd_op: None, rd_op: None,
wr_op: None, wr_op: None,
flags: Flags::empty(),
}; };
let id = self.0.storage.borrow_mut().streams.insert(item); let id = self.0.storage.borrow_mut().streams.insert(item);
StreamCtl { StreamCtl {
@ -116,10 +144,19 @@ impl<T> Handler for StreamOpsHandler<T> {
match storage.ops.remove(user_data) { match storage.ops.remove(user_data) {
Operation::Recv { id, buf, context } => { Operation::Recv { id, buf, context } => {
log::debug!("{}: Recv canceled {:?}", context.tag(), id,); log::debug!("{}: Recv canceled {:?}", context.tag(), id);
context.release_read_buf(buf); context.release_read_buf(buf);
if let Some(item) = storage.streams.get_mut(id) { if let Some(item) = storage.streams.get_mut(id) {
item.rd_op.take(); item.rd_op.take();
item.flags.remove(Flags::RD_CANCELING);
if item.flags.contains(Flags::RD_REISSUE) {
item.flags.remove(Flags::RD_REISSUE);
let result = storage.recv(id, Some(context));
if let Some((id, op)) = result {
self.inner.api.submit(id, op);
}
}
} }
} }
Operation::Send { id, buf, context } => { Operation::Send { id, buf, context } => {
@ -127,6 +164,15 @@ impl<T> Handler for StreamOpsHandler<T> {
context.release_write_buf(buf); context.release_write_buf(buf);
if let Some(item) = storage.streams.get_mut(id) { if let Some(item) = storage.streams.get_mut(id) {
item.wr_op.take(); item.wr_op.take();
item.flags.remove(Flags::WR_CANCELING);
if item.flags.contains(Flags::WR_REISSUE) {
item.flags.remove(Flags::WR_REISSUE);
let result = storage.send(id, Some(context));
if let Some((id, op)) = result {
self.inner.api.submit(id, op);
}
}
} }
} }
Operation::Nop | Operation::Close { .. } => {} Operation::Nop | Operation::Close { .. } => {}
@ -151,12 +197,11 @@ impl<T> Handler for StreamOpsHandler<T> {
// reset op reference // reset op reference
if let Some(item) = storage.streams.get_mut(id) { if let Some(item) = storage.streams.get_mut(id) {
log::debug!( log::debug!(
"{}: Recv completed {:?}, res: {:?}, buf({}): {:?}", "{}: Recv completed {:?}, res: {:?}, buf({})",
context.tag(), context.tag(),
item.fd, item.fd,
result, result,
buf.remaining_mut(), buf.remaining_mut()
buf,
); );
item.rd_op.take(); item.rd_op.take();
} }
@ -173,21 +218,24 @@ impl<T> Handler for StreamOpsHandler<T> {
} }
Operation::Send { id, buf, context } => { Operation::Send { id, buf, context } => {
// reset op reference // reset op reference
if let Some(item) = storage.streams.get_mut(id) { let fd = if let Some(item) = storage.streams.get_mut(id) {
log::debug!( log::debug!(
"{}: Send completed: {:?}, res: {:?}", "{}: Send completed: {:?}, res: {:?}, buf({})",
context.tag(), context.tag(),
item.fd, item.fd,
result result,
buf.len()
); );
item.wr_op.take(); item.wr_op.take();
} Some(item.fd)
} else {
None
};
// set read buf // set read buf
if context let result = context.set_write_buf(result.map(|size| size as usize), buf);
.set_write_buf(result.map(|size| size as usize), buf) if result.is_pending() {
.is_pending() log::debug!("{}: Need to send more: {:?}", context.tag(), fd);
{
if let Some((id, op)) = storage.send(id, Some(context)) { if let Some((id, op)) = storage.send(id, Some(context)) {
self.inner.api.submit(id, op); self.inner.api.submit(id, op);
} }
@ -207,7 +255,7 @@ impl<T> Handler for StreamOpsHandler<T> {
if storage.streams[id].ref_count == 0 { if storage.streams[id].ref_count == 0 {
let mut item = storage.streams.remove(id); let mut item = storage.streams.remove(id);
log::debug!("{}: Drop io ({}), {:?}", item.context.tag(), id, item.fd); log::debug!("{}: Drop io ({}), {:?}", item.tag(), id, item.fd);
if let Some(io) = item.io.take() { if let Some(io) = item.io.take() {
mem::forget(io); mem::forget(io);
@ -230,11 +278,10 @@ impl<T> StreamOpsStorage<T> {
if item.rd_op.is_none() { if item.rd_op.is_none() {
if let Poll::Ready(mut buf) = item.context.get_read_buf() { if let Poll::Ready(mut buf) = item.context.get_read_buf() {
log::debug!( log::debug!(
"{}: Recv resume ({}), {:?} - {:?} = {:?}", "{}: Recv resume ({}), {:?} rem: {:?}",
item.context.tag(), item.tag(),
id, id,
item.fd, item.fd,
buf,
buf.remaining_mut() buf.remaining_mut()
); );
@ -252,6 +299,8 @@ impl<T> StreamOpsStorage<T> {
item.rd_op = NonZeroU32::new(op_id as u32); item.rd_op = NonZeroU32::new(op_id as u32);
return Some((op_id as u32, op)); return Some((op_id as u32, op));
} }
} else if item.flags.contains(Flags::RD_CANCELING) {
item.flags.insert(Flags::RD_REISSUE);
} }
None None
} }
@ -262,11 +311,11 @@ impl<T> StreamOpsStorage<T> {
if item.wr_op.is_none() { if item.wr_op.is_none() {
if let Poll::Ready(buf) = item.context.get_write_buf() { if let Poll::Ready(buf) = item.context.get_write_buf() {
log::debug!( log::debug!(
"{}: Send resume ({}), {:?} {:?}", "{}: Send resume ({}), {:?} len: {:?}",
item.context.tag(), item.tag(),
id, id,
item.fd, item.fd,
buf buf.len()
); );
let slice = buf.chunk(); let slice = buf.chunk();
@ -283,6 +332,8 @@ impl<T> StreamOpsStorage<T> {
item.wr_op = NonZeroU32::new(op_id as u32); item.wr_op = NonZeroU32::new(op_id as u32);
return Some((op_id as u32, op)); return Some((op_id as u32, op));
} }
} else if item.flags.contains(Flags::WR_CANCELING) {
item.flags.insert(Flags::WR_REISSUE);
} }
None None
} }
@ -350,15 +401,13 @@ impl<T> StreamCtl<T> {
let item = &mut storage.streams[self.id]; let item = &mut storage.streams[self.id];
if let Some(rd_op) = item.rd_op { if let Some(rd_op) = item.rd_op {
log::debug!( if !item.flags.contains(Flags::RD_CANCELING) {
"{}: Recv to pause ({}), {:?}", log::debug!("{}: Recv to pause ({}), {:?}", item.tag(), self.id, item.fd);
item.context.tag(), item.flags.insert(Flags::RD_CANCELING);
self.id,
item.fd
);
self.inner.api.cancel(rd_op.get()); self.inner.api.cancel(rd_op.get());
} }
} }
}
} }
impl<T> Clone for StreamCtl<T> { impl<T> Clone for StreamCtl<T> {
@ -378,12 +427,7 @@ impl<T> Drop for StreamCtl<T> {
if storage.streams[self.id].ref_count == 0 { if storage.streams[self.id].ref_count == 0 {
let mut item = storage.streams.remove(self.id); let mut item = storage.streams.remove(self.id);
if let Some(io) = item.io.take() { if let Some(io) = item.io.take() {
log::debug!( log::debug!("{}: Close io ({}), {:?}", item.tag(), self.id, item.fd);
"{}: Close io ({}), {:?}",
item.context.tag(),
self.id,
item.fd
);
mem::forget(io); mem::forget(io);
let id = storage.ops.insert(Operation::Close { tx: None }); let id = storage.ops.insert(Operation::Close { tx: None });

View file

@ -1,5 +1,9 @@
# Changes # Changes
## [0.4.29] - 2025-03-26
* Add Arbiter::get_value() helper method
## [0.4.27] - 2025-03-14 ## [0.4.27] - 2025-03-14
* Add srbiters pings ttl * Add srbiters pings ttl

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ntex-rt" name = "ntex-rt"
version = "0.4.28" version = "0.4.29"
authors = ["ntex contributors <team@ntex.rs>"] authors = ["ntex contributors <team@ntex.rs>"]
description = "ntex runtime" description = "ntex runtime"
keywords = ["network", "framework", "async", "futures"] keywords = ["network", "framework", "async", "futures"]
@ -32,8 +32,8 @@ neon = ["ntex-neon"]
[dependencies] [dependencies]
async-channel = "2" async-channel = "2"
futures-timer = "3.0" futures-timer = "3.0"
log = "0.4"
oneshot = "0.1" oneshot = "0.1"
log = "0.4"
compio-driver = { version = "0.6", optional = true } compio-driver = { version = "0.6", optional = true }
compio-runtime = { version = "0.6", optional = true } compio-runtime = { version = "0.6", optional = true }
@ -42,7 +42,4 @@ tok-io = { version = "1", package = "tokio", default-features = false, features
"net", "net",
], optional = true } ], optional = true }
ntex-neon = { version = "0.1.1", optional = true } ntex-neon = { version = "0.1.14", optional = true }
[dev-dependencies]
env_logger = "0.11"

View file

@ -286,6 +286,25 @@ impl Arbiter {
}) })
} }
/// Get a type previously inserted to this runtime or create new one.
pub fn get_value<T, F>(f: F) -> T
where
T: Clone + 'static,
F: FnOnce() -> T,
{
STORAGE.with(move |cell| {
let mut st = cell.borrow_mut();
if let Some(boxed) = st.get(&TypeId::of::<T>()) {
if let Some(val) = (&**boxed as &(dyn Any + 'static)).downcast_ref::<T>() {
return val.clone();
}
}
let val = f();
st.insert(TypeId::of::<T>(), Box::new(val.clone()));
val
})
}
/// Wait for the event loop to stop by joining the underlying thread (if have Some). /// Wait for the event loop to stop by joining the underlying thread (if have Some).
pub fn join(&mut self) -> thread::Result<()> { pub fn join(&mut self) -> thread::Result<()> {
if let Some(thread_handle) = self.thread_handle.take() { if let Some(thread_handle) = self.thread_handle.take() {
@ -355,6 +374,7 @@ mod tests {
assert!(Arbiter::get_item::<&'static str, _, _>(|s| *s == "test")); assert!(Arbiter::get_item::<&'static str, _, _>(|s| *s == "test"));
assert!(Arbiter::get_mut_item::<&'static str, _, _>(|s| *s == "test")); assert!(Arbiter::get_mut_item::<&'static str, _, _>(|s| *s == "test"));
assert!(Arbiter::contains_item::<&'static str>()); assert!(Arbiter::contains_item::<&'static str>());
assert!(Arbiter::get_value(|| 64u64) == 64);
assert!(format!("{:?}", Arbiter::current()).contains("Arbiter")); assert!(format!("{:?}", Arbiter::current()).contains("Arbiter"));
} }
} }

View file

@ -112,6 +112,8 @@ mod tokio {
/// ///
/// This function panics if ntex system is not running. /// This function panics if ntex system is not running.
#[inline] #[inline]
#[doc(hidden)]
#[deprecated]
pub fn spawn_fn<F, R>(f: F) -> tok_io::task::JoinHandle<R::Output> pub fn spawn_fn<F, R>(f: F) -> tok_io::task::JoinHandle<R::Output>
where where
F: FnOnce() -> R + 'static, F: FnOnce() -> R + 'static,
@ -196,6 +198,8 @@ mod compio {
/// ///
/// This function panics if ntex system is not running. /// This function panics if ntex system is not running.
#[inline] #[inline]
#[doc(hidden)]
#[deprecated]
pub fn spawn_fn<F, R>(f: F) -> JoinHandle<R::Output> pub fn spawn_fn<F, R>(f: F) -> JoinHandle<R::Output>
where where
F: FnOnce() -> R + 'static, F: FnOnce() -> R + 'static,
@ -323,6 +327,8 @@ mod neon {
/// ///
/// This function panics if ntex system is not running. /// This function panics if ntex system is not running.
#[inline] #[inline]
#[doc(hidden)]
#[deprecated]
pub fn spawn_fn<F, R>(f: F) -> Task<R::Output> pub fn spawn_fn<F, R>(f: F) -> Task<R::Output>
where where
F: FnOnce() -> R + 'static, F: FnOnce() -> R + 'static,
@ -377,7 +383,7 @@ mod neon {
impl<T> JoinHandle<T> { impl<T> JoinHandle<T> {
pub fn is_finished(&self) -> bool { pub fn is_finished(&self) -> bool {
false self.fut.is_none()
} }
} }

View file

@ -1,5 +1,13 @@
# Changes # Changes
## [2.7.3] - 2025-03-28
* Better worker availability handling
## [2.7.2] - 2025-03-27
* Handle paused state
## [2.7.1] - 2025-02-28 ## [2.7.1] - 2025-02-28
* Fix set core affinity out of worker start #508 * Fix set core affinity out of worker start #508

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ntex-server" name = "ntex-server"
version = "2.7.1" version = "2.7.4"
authors = ["ntex contributors <team@ntex.rs>"] authors = ["ntex contributors <team@ntex.rs>"]
description = "Server for ntex framework" description = "Server for ntex framework"
keywords = ["network", "framework", "async", "futures"] keywords = ["network", "framework", "async", "futures"]
@ -22,13 +22,13 @@ ntex-service = "3.4"
ntex-rt = "0.4" ntex-rt = "0.4"
ntex-util = "2.8" ntex-util = "2.8"
async-channel = "2" async-channel = { workspace = true }
async-broadcast = "0.7" atomic-waker = { workspace = true }
core_affinity = "0.8" core_affinity = { workspace = true }
polling = "3.3" oneshot = { workspace = true }
log = "0.4" polling = { workspace = true }
socket2 = "0.5" log = { workspace = true }
oneshot = { version = "0.1", default-features = false, features = ["async"] } socket2 = { workspace = true }
[dev-dependencies] [dev-dependencies]
ntex = "2" ntex = "2"

View file

@ -139,7 +139,6 @@ impl<F: ServerConfiguration> ServerManager<F> {
fn start_worker<F: ServerConfiguration>(mgr: ServerManager<F>, cid: Option<CoreId>) { fn start_worker<F: ServerConfiguration>(mgr: ServerManager<F>, cid: Option<CoreId>) {
let _ = ntex_rt::spawn(async move { let _ = ntex_rt::spawn(async move {
let id = mgr.next_id(); let id = mgr.next_id();
let mut wrk = Worker::start(id, mgr.factory(), cid); let mut wrk = Worker::start(id, mgr.factory(), cid);
loop { loop {
@ -181,7 +180,7 @@ impl<F: ServerConfiguration> HandleCmdState<F> {
fn process(&mut self, mut item: F::Item) { fn process(&mut self, mut item: F::Item) {
loop { loop {
if !self.workers.is_empty() { if !self.workers.is_empty() {
if self.next > self.workers.len() { if self.next >= self.workers.len() {
self.next = self.workers.len() - 1; self.next = self.workers.len() - 1;
} }
match self.workers[self.next].send(item) { match self.workers[self.next].send(item) {
@ -212,10 +211,9 @@ impl<F: ServerConfiguration> HandleCmdState<F> {
match upd { match upd {
Update::Available(worker) => { Update::Available(worker) => {
self.workers.push(worker); self.workers.push(worker);
self.workers.sort();
if self.workers.len() == 1 { if self.workers.len() == 1 {
self.mgr.resume(); self.mgr.resume();
} else {
self.workers.sort();
} }
} }
Update::Unavailable(worker) => { Update::Unavailable(worker) => {
@ -234,6 +232,9 @@ impl<F: ServerConfiguration> HandleCmdState<F> {
if let Err(item) = self.workers[0].send(item) { if let Err(item) = self.workers[0].send(item) {
self.backlog.push_back(item); self.backlog.push_back(item);
self.workers.remove(0); self.workers.remove(0);
if self.workers.is_empty() {
self.mgr.pause();
}
break; break;
} }
} }

View file

@ -92,12 +92,14 @@ impl AcceptLoop {
/// Start accept loop /// Start accept loop
pub fn start(mut self, socks: Vec<(Token, Listener)>, srv: Server) { pub fn start(mut self, socks: Vec<(Token, Listener)>, srv: Server) {
let (tx, rx_start) = oneshot::channel();
let (rx, poll) = self let (rx, poll) = self
.inner .inner
.take() .take()
.expect("AcceptLoop cannot be used multiple times"); .expect("AcceptLoop cannot be used multiple times");
Accept::start( Accept::start(
tx,
rx, rx,
poll, poll,
socks, socks,
@ -105,6 +107,8 @@ impl AcceptLoop {
self.notify.clone(), self.notify.clone(),
self.status_handler.take(), self.status_handler.take(),
); );
let _ = rx_start.recv();
} }
} }
@ -121,6 +125,7 @@ impl fmt::Debug for AcceptLoop {
struct Accept { struct Accept {
poller: Arc<Poller>, poller: Arc<Poller>,
rx: mpsc::Receiver<AcceptorCommand>, rx: mpsc::Receiver<AcceptorCommand>,
tx: Option<oneshot::Sender<()>>,
sockets: Vec<ServerSocketInfo>, sockets: Vec<ServerSocketInfo>,
srv: Server, srv: Server,
notify: AcceptNotify, notify: AcceptNotify,
@ -131,6 +136,7 @@ struct Accept {
impl Accept { impl Accept {
fn start( fn start(
tx: oneshot::Sender<()>,
rx: mpsc::Receiver<AcceptorCommand>, rx: mpsc::Receiver<AcceptorCommand>,
poller: Arc<Poller>, poller: Arc<Poller>,
socks: Vec<(Token, Listener)>, socks: Vec<(Token, Listener)>,
@ -145,11 +151,12 @@ impl Accept {
.name("ntex-server accept loop".to_owned()) .name("ntex-server accept loop".to_owned())
.spawn(move || { .spawn(move || {
System::set_current(sys); System::set_current(sys);
Accept::new(rx, poller, socks, srv, notify, status_handler).poll() Accept::new(tx, rx, poller, socks, srv, notify, status_handler).poll()
}); });
} }
fn new( fn new(
tx: oneshot::Sender<()>,
rx: mpsc::Receiver<AcceptorCommand>, rx: mpsc::Receiver<AcceptorCommand>,
poller: Arc<Poller>, poller: Arc<Poller>,
socks: Vec<(Token, Listener)>, socks: Vec<(Token, Listener)>,
@ -175,6 +182,7 @@ impl Accept {
notify, notify,
srv, srv,
status_handler, status_handler,
tx: Some(tx),
backpressure: true, backpressure: true,
backlog: VecDeque::new(), backlog: VecDeque::new(),
} }
@ -192,19 +200,23 @@ impl Accept {
// Create storage for events // Create storage for events
let mut events = Events::with_capacity(NonZeroUsize::new(512).unwrap()); let mut events = Events::with_capacity(NonZeroUsize::new(512).unwrap());
let mut timeout = Some(Duration::ZERO);
loop { loop {
if let Err(e) = self.poller.wait(&mut events, None) { if let Err(e) = self.poller.wait(&mut events, timeout) {
if e.kind() == io::ErrorKind::Interrupted { if e.kind() != io::ErrorKind::Interrupted {
continue;
} else {
panic!("Cannot wait for events in poller: {}", e) panic!("Cannot wait for events in poller: {}", e)
} }
} else if timeout.is_some() {
timeout = None;
let _ = self.tx.take().unwrap().send(());
} }
for event in events.iter() { for idx in 0..self.sockets.len() {
let readd = self.accept(event.key); if self.sockets[idx].registered.get() {
let readd = self.accept(idx);
if readd { if readd {
self.add_source(event.key); self.add_source(idx);
}
} }
} }

View file

@ -59,17 +59,19 @@ where
.workers(1) .workers(1)
.disable_signals() .disable_signals()
.run(); .run();
ntex_rt::spawn(async move {
ntex_util::time::sleep(ntex_util::time::Millis(75)).await;
tx.send((system, local_addr, server)) tx.send((system, local_addr, server))
.expect("Failed to send Server to TestServer"); .expect("Failed to send Server to TestServer");
});
Ok(()) Ok(())
}) })
}); });
let (system, addr, server) = rx.recv().unwrap(); let (system, addr, server) = rx.recv().unwrap();
// wait for server
thread::sleep(std::time::Duration::from_millis(50));
TestServer { TestServer {
addr, addr,
server, server,

View file

@ -2,8 +2,8 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::task::{ready, Context, Poll}; use std::task::{ready, Context, Poll};
use std::{cmp, future::poll_fn, future::Future, hash, pin::Pin, sync::Arc}; use std::{cmp, future::poll_fn, future::Future, hash, pin::Pin, sync::Arc};
use async_broadcast::{self as bus, broadcast};
use async_channel::{unbounded, Receiver, Sender}; use async_channel::{unbounded, Receiver, Sender};
use atomic_waker::AtomicWaker;
use core_affinity::CoreId; use core_affinity::CoreId;
use ntex_rt::{spawn, Arbiter}; use ntex_rt::{spawn, Arbiter};
@ -99,10 +99,10 @@ impl<T> Worker<T> {
log::debug!("Creating server instance in {:?}", id); log::debug!("Creating server instance in {:?}", id);
let factory = cfg.create().await; let factory = cfg.create().await;
log::debug!("Server instance has been created in {:?}", id);
match create(id, rx1, rx2, factory, avail_tx).await { match create(id, rx1, rx2, factory, avail_tx).await {
Ok((svc, wrk)) => { Ok((svc, wrk)) => {
log::debug!("Server instance has been created in {:?}", id);
run_worker(svc, wrk).await; run_worker(svc, wrk).await;
} }
Err(e) => { Err(e) => {
@ -151,10 +151,8 @@ impl<T> Worker<T> {
if self.failed.load(Ordering::Acquire) { if self.failed.load(Ordering::Acquire) {
WorkerStatus::Failed WorkerStatus::Failed
} else { } else {
// cleanup updates self.avail.wait_for_update().await;
while self.avail.notify.try_recv().is_ok() {} if self.avail.failed() {
if self.avail.notify.recv_direct().await.is_err() {
self.failed.store(true, Ordering::Release); self.failed.store(true, Ordering::Release);
} }
self.status() self.status()
@ -196,52 +194,85 @@ impl Future for WorkerStop {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
struct WorkerAvailability { struct WorkerAvailability {
notify: bus::Receiver<()>, inner: Arc<Inner>,
available: Arc<AtomicBool>,
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
struct WorkerAvailabilityTx { struct WorkerAvailabilityTx {
notify: bus::Sender<()>, inner: Arc<Inner>,
available: Arc<AtomicBool>, }
#[derive(Debug)]
struct Inner {
waker: AtomicWaker,
updated: AtomicBool,
available: AtomicBool,
failed: AtomicBool,
} }
impl WorkerAvailability { impl WorkerAvailability {
fn create() -> (Self, WorkerAvailabilityTx) { fn create() -> (Self, WorkerAvailabilityTx) {
let (mut tx, rx) = broadcast(16); let inner = Arc::new(Inner {
tx.set_overflow(true); waker: AtomicWaker::new(),
updated: AtomicBool::new(false),
available: AtomicBool::new(false),
failed: AtomicBool::new(false),
});
let avail = WorkerAvailability { let avail = WorkerAvailability {
notify: rx, inner: inner.clone(),
available: Arc::new(AtomicBool::new(false)),
};
let avail_tx = WorkerAvailabilityTx {
notify: tx,
available: avail.available.clone(),
}; };
let avail_tx = WorkerAvailabilityTx { inner };
(avail, avail_tx) (avail, avail_tx)
} }
fn failed(&self) -> bool {
self.inner.failed.load(Ordering::Acquire)
}
fn available(&self) -> bool { fn available(&self) -> bool {
self.available.load(Ordering::Acquire) self.inner.available.load(Ordering::Acquire)
}
async fn wait_for_update(&self) {
poll_fn(|cx| {
if self.inner.updated.load(Ordering::Acquire) {
self.inner.updated.store(false, Ordering::Release);
Poll::Ready(())
} else {
self.inner.waker.register(cx.waker());
Poll::Pending
}
})
.await;
} }
} }
impl WorkerAvailabilityTx { impl WorkerAvailabilityTx {
fn set(&self, val: bool) { fn set(&self, val: bool) {
let old = self.available.swap(val, Ordering::Release); let old = self.inner.available.swap(val, Ordering::Release);
if !old && val { if old != val {
let _ = self.notify.try_broadcast(()); self.inner.updated.store(true, Ordering::Release);
self.inner.waker.wake();
} }
} }
} }
impl Drop for WorkerAvailabilityTx {
fn drop(&mut self) {
self.inner.failed.store(true, Ordering::Release);
self.inner.updated.store(true, Ordering::Release);
self.inner.available.store(false, Ordering::Release);
self.inner.waker.wake();
}
}
/// Service worker /// Service worker
/// ///
/// Worker accepts message via unbounded channel and starts processing. /// Worker accepts message via unbounded channel and starts processing.
struct WorkerSt<T, F: ServiceFactory<T>> { struct WorkerSt<T, F: ServiceFactory<T>> {
id: WorkerId, id: WorkerId,
rx: Pin<Box<dyn Stream<Item = T>>>, rx: Receiver<T>,
stop: Pin<Box<dyn Stream<Item = Shutdown>>>, stop: Pin<Box<dyn Stream<Item = Shutdown>>>,
factory: F, factory: F,
availability: WorkerAvailabilityTx, availability: WorkerAvailabilityTx,
@ -253,25 +284,43 @@ where
F: ServiceFactory<T> + 'static, F: ServiceFactory<T> + 'static,
{ {
loop { loop {
let mut recv = std::pin::pin!(wrk.rx.recv());
let fut = poll_fn(|cx| { let fut = poll_fn(|cx| {
ready!(svc.poll_ready(cx)?); match svc.poll_ready(cx) {
Poll::Ready(Ok(())) => {
wrk.availability.set(true);
}
Poll::Ready(Err(err)) => {
wrk.availability.set(false);
return Poll::Ready(Err(err));
}
Poll::Pending => {
wrk.availability.set(false);
return Poll::Pending;
}
}
if let Some(item) = ready!(Pin::new(&mut wrk.rx).poll_next(cx)) { match ready!(recv.as_mut().poll(cx)) {
Ok(item) => {
let fut = svc.call(item); let fut = svc.call(item);
let _ = spawn(async move { let _ = spawn(async move {
let _ = fut.await; let _ = fut.await;
}); });
Poll::Ready(Ok::<_, F::Error>(true))
}
Err(_) => {
log::error!("Server is gone");
Poll::Ready(Ok(false))
}
} }
Poll::Ready(Ok::<(), F::Error>(()))
}); });
match select(fut, stream_recv(&mut wrk.stop)).await { match select(fut, stream_recv(&mut wrk.stop)).await {
Either::Left(Ok(())) => continue, Either::Left(Ok(true)) => continue,
Either::Left(Err(_)) => { Either::Left(Err(_)) => {
let _ = ntex_rt::spawn(async move { let _ = ntex_rt::spawn(async move {
svc.shutdown().await; svc.shutdown().await;
}); });
wrk.availability.set(false);
} }
Either::Right(Some(Shutdown { timeout, result })) => { Either::Right(Some(Shutdown { timeout, result })) => {
wrk.availability.set(false); wrk.availability.set(false);
@ -285,7 +334,8 @@ where
stop_svc(wrk.id, svc, timeout, Some(result)).await; stop_svc(wrk.id, svc, timeout, Some(result)).await;
return; return;
} }
Either::Right(None) => { Either::Left(Ok(false)) | Either::Right(None) => {
wrk.availability.set(false);
stop_svc(wrk.id, svc, STOP_TIMEOUT, None).await; stop_svc(wrk.id, svc, STOP_TIMEOUT, None).await;
return; return;
} }
@ -295,7 +345,6 @@ where
loop { loop {
match select(wrk.factory.create(()), stream_recv(&mut wrk.stop)).await { match select(wrk.factory.create(()), stream_recv(&mut wrk.stop)).await {
Either::Left(Ok(service)) => { Either::Left(Ok(service)) => {
wrk.availability.set(true);
svc = Pipeline::new(service).bind(); svc = Pipeline::new(service).bind();
break; break;
} }
@ -336,8 +385,6 @@ where
{ {
availability.set(false); availability.set(false);
let factory = factory?; let factory = factory?;
let rx = Box::pin(rx);
let mut stop = Box::pin(stop); let mut stop = Box::pin(stop);
let svc = match select(factory.create(()), stream_recv(&mut stop)).await { let svc = match select(factory.create(()), stream_recv(&mut stop)).await {
@ -356,9 +403,9 @@ where
svc, svc,
WorkerSt { WorkerSt {
id, id,
rx,
factory, factory,
availability, availability,
rx: Box::pin(rx),
stop: Box::pin(stop), stop: Box::pin(stop),
}, },
)) ))

View file

@ -13,9 +13,8 @@ async fn main() -> io::Result<()> {
println!("Started openssl echp server: 127.0.0.1:8443"); println!("Started openssl echp server: 127.0.0.1:8443");
// load ssl keys // load ssl keys
let cert_file = let cert_file = &mut BufReader::new(File::open("../examples/cert.pem").unwrap());
&mut BufReader::new(File::open("../ntex-tls/examples/cert.pem").unwrap()); let key_file = &mut BufReader::new(File::open("../examples/key.pem").unwrap());
let key_file = &mut BufReader::new(File::open("../ntex-tls/examples/key.pem").unwrap());
let keys = rustls_pemfile::private_key(key_file).unwrap().unwrap(); let keys = rustls_pemfile::private_key(key_file).unwrap().unwrap();
let cert_chain = rustls_pemfile::certs(cert_file) let cert_chain = rustls_pemfile::certs(cert_file)
.collect::<Result<Vec<_>, _>>() .collect::<Result<Vec<_>, _>>()

View file

@ -8,18 +8,18 @@ use tls_openssl::ssl::{self, SslFiletype, SslMethod};
#[ntex::main] #[ntex::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
//std::env::set_var("RUST_LOG", "trace"); std::env::set_var("RUST_LOG", "trace");
//env_logger::init(); let _ = env_logger::try_init();
println!("Started openssl web server: 127.0.0.1:8443"); println!("Started openssl web server: 127.0.0.1:8443");
// load ssl keys // load ssl keys
let mut builder = ssl::SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); let mut builder = ssl::SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder builder
.set_private_key_file("../tests/key.pem", SslFiletype::PEM) .set_private_key_file("./examples/key.pem", SslFiletype::PEM)
.unwrap(); .unwrap();
builder builder
.set_certificate_chain_file("../tests/cert.pem") .set_certificate_chain_file("./examples/cert.pem")
.unwrap(); .unwrap();
// h2 alpn config // h2 alpn config

View file

@ -1,5 +1,23 @@
# Changes # Changes
## [2.12.4] - 2025-03-28
* http: Return PayloadError::Incomplete on server disconnect
* web: Expose WebStack for external wrapper support in downstream crates #542
## [2.12.3] - 2025-03-22
* web: Export web::app_service::AppService #534
* http: Add delay for test server availability, could cause connect race
## [2.12.2] - 2025-03-15
* http: Allow to run publish future to completion in case error
* http: Remove brotli support
## [2.12.1] - 2025-03-14 ## [2.12.1] - 2025-03-14
* Allow to disable test logging (no-test-logging features) * Allow to disable test logging (no-test-logging features)

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ntex" name = "ntex"
version = "2.12.1" version = "2.12.4"
authors = ["ntex contributors <team@ntex.rs>"] authors = ["ntex contributors <team@ntex.rs>"]
description = "Framework for composable network services" description = "Framework for composable network services"
readme = "README.md" readme = "README.md"
@ -18,7 +18,7 @@ edition = "2021"
rust-version = "1.75" rust-version = "1.75"
[package.metadata.docs.rs] [package.metadata.docs.rs]
features = ["tokio", "openssl", "rustls", "compress", "cookie", "ws", "brotli", "ntex-tls/rustls-ring"] features = ["tokio", "openssl", "rustls", "compress", "cookie", "ws", "ntex-tls/rustls-ring"]
[lib] [lib]
name = "ntex" name = "ntex"
@ -57,9 +57,6 @@ neon-uring = ["ntex-net/neon", "ntex-net/io-uring"]
# websocket support # websocket support
ws = ["dep:sha-1"] ws = ["dep:sha-1"]
# brotli2 support
brotli = ["dep:brotli2"]
# disable [ntex::test] logging configuration # disable [ntex::test] logging configuration
no-test-logging = [] no-test-logging = []
@ -71,11 +68,11 @@ ntex-service = "3.4"
ntex-macros = "0.1" ntex-macros = "0.1"
ntex-util = "2.8" ntex-util = "2.8"
ntex-bytes = "0.1.27" ntex-bytes = "0.1.27"
ntex-server = "2.7" ntex-server = "2.7.4"
ntex-h2 = "1.8.6" ntex-h2 = "1.8.6"
ntex-rt = "0.4.27" ntex-rt = "0.4.27"
ntex-io = "2.11" ntex-io = "2.11"
ntex-net = "2.5" ntex-net = "2.5.10"
ntex-tls = "2.3" ntex-tls = "2.3"
base64 = "0.22" base64 = "0.22"
@ -112,12 +109,12 @@ tls-rustls = { version = "0.23", package = "rustls", optional = true, default-fe
webpki-roots = { version = "0.26", optional = true } webpki-roots = { version = "0.26", optional = true }
# compression # compression
brotli2 = { version = "0.3.2", optional = true }
flate2 = { version = "1.0", optional = true } flate2 = { version = "1.0", optional = true }
[dev-dependencies] [dev-dependencies]
rand = "0.8" rand = "0.8"
time = "0.3" time = "0.3"
oneshot = "0.1"
futures-util = "0.3" futures-util = "0.3"
tls-openssl = { version = "0.10", package = "openssl" } tls-openssl = { version = "0.10", package = "openssl" }
tls-rustls = { version = "0.23", package = "rustls", features = ["ring", "std"], default-features = false } tls-rustls = { version = "0.23", package = "rustls", features = ["ring", "std"], default-features = false }

View file

@ -1,13 +1,11 @@
use std::{ use std::{future::poll_fn, io, io::Write, pin::Pin, task, task::Poll, time::Instant};
future::poll_fn, io, io::Write, pin::Pin, task::Context, task::Poll, time::Instant,
};
use crate::http::body::{BodySize, MessageBody}; use crate::http::body::{BodySize, MessageBody};
use crate::http::error::PayloadError; use crate::http::error::PayloadError;
use crate::http::h1;
use crate::http::header::{HeaderMap, HeaderValue, HOST}; use crate::http::header::{HeaderMap, HeaderValue, HOST};
use crate::http::message::{RequestHeadType, ResponseHead}; use crate::http::message::{RequestHeadType, ResponseHead};
use crate::http::payload::{Payload, PayloadStream}; use crate::http::payload::{Payload, PayloadStream};
use crate::http::{h1, Version};
use crate::io::{IoBoxed, RecvError}; use crate::io::{IoBoxed, RecvError};
use crate::time::{timeout_checked, Millis}; use crate::time::{timeout_checked, Millis};
use crate::util::{ready, BufMut, Bytes, BytesMut, Stream}; use crate::util::{ready, BufMut, Bytes, BytesMut, Stream};
@ -101,7 +99,13 @@ where
Ok((head, Payload::None)) Ok((head, Payload::None))
} }
_ => { _ => {
let pl: PayloadStream = Box::pin(PlStream::new(io, codec, created, pool)); let pl: PayloadStream = Box::pin(PlStream::new(
io,
codec,
created,
pool,
head.version == Version::HTTP_10,
));
Ok((head, pl.into())) Ok((head, pl.into()))
} }
} }
@ -137,6 +141,7 @@ pub(super) struct PlStream {
io: Option<IoBoxed>, io: Option<IoBoxed>,
codec: h1::ClientPayloadCodec, codec: h1::ClientPayloadCodec,
created: Instant, created: Instant,
http_10: bool,
pool: Option<Acquired>, pool: Option<Acquired>,
} }
@ -146,12 +151,14 @@ impl PlStream {
codec: h1::ClientCodec, codec: h1::ClientCodec,
created: Instant, created: Instant,
pool: Option<Acquired>, pool: Option<Acquired>,
http_10: bool,
) -> Self { ) -> Self {
PlStream { PlStream {
io: Some(io), io: Some(io),
codec: codec.into_payload_codec(), codec: codec.into_payload_codec(),
created, created,
pool, pool,
http_10,
} }
} }
} }
@ -161,12 +168,12 @@ impl Stream for PlStream {
fn poll_next( fn poll_next(
mut self: Pin<&mut Self>, mut self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut task::Context<'_>,
) -> Poll<Option<Self::Item>> { ) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut(); let mut this = self.as_mut();
loop { loop {
return Poll::Ready(Some( let item = ready!(this.io.as_ref().unwrap().poll_recv(&this.codec, cx));
match ready!(this.io.as_ref().unwrap().poll_recv(&this.codec, cx)) { return Poll::Ready(Some(match item {
Ok(chunk) => { Ok(chunk) => {
if let Some(chunk) = chunk { if let Some(chunk) = chunk {
Ok(chunk) Ok(chunk)
@ -184,18 +191,23 @@ impl Stream for PlStream {
Err(io::Error::new(io::ErrorKind::TimedOut, "Keep-alive").into()) Err(io::Error::new(io::ErrorKind::TimedOut, "Keep-alive").into())
} }
Err(RecvError::Stop) => { Err(RecvError::Stop) => {
Err(io::Error::new(io::ErrorKind::Other, "Dispatcher stopped") Err(io::Error::new(io::ErrorKind::Other, "Dispatcher stopped").into())
.into())
} }
Err(RecvError::WriteBackpressure) => { Err(RecvError::WriteBackpressure) => {
ready!(this.io.as_ref().unwrap().poll_flush(cx, false))?; ready!(this.io.as_ref().unwrap().poll_flush(cx, false))?;
continue; continue;
} }
Err(RecvError::Decoder(err)) => Err(err), Err(RecvError::Decoder(err)) => Err(err),
Err(RecvError::PeerGone(Some(err))) => Err(err.into()), Err(RecvError::PeerGone(Some(err))) => {
Err(RecvError::PeerGone(None)) => return Poll::Ready(None), Err(PayloadError::Incomplete(Some(err)))
}, }
)); Err(RecvError::PeerGone(None)) => {
if this.http_10 {
return Poll::Ready(None);
}
Err(PayloadError::Incomplete(None))
}
}));
} }
} }
} }

View file

@ -387,8 +387,8 @@ impl Future for ReadBody {
let this = self.get_mut(); let this = self.get_mut();
loop { loop {
return match Pin::new(&mut this.stream).poll_next(cx)? { return match Pin::new(&mut this.stream).poll_next(cx) {
Poll::Ready(Some(chunk)) => { Poll::Ready(Some(Ok(chunk))) => {
if this.limit > 0 && (this.buf.len() + chunk.len()) > this.limit { if this.limit > 0 && (this.buf.len() + chunk.len()) > this.limit {
Poll::Ready(Err(PayloadError::Overflow)) Poll::Ready(Err(PayloadError::Overflow))
} else { } else {
@ -397,6 +397,7 @@ impl Future for ReadBody {
} }
} }
Poll::Ready(None) => Poll::Ready(Ok(this.buf.split().freeze())), Poll::Ready(None) => Poll::Ready(Ok(this.buf.split().freeze())),
Poll::Ready(Some(Err(err))) => Poll::Ready(Err(err)),
Poll::Pending => { Poll::Pending => {
if this.timeout.poll_elapsed(cx).is_ready() { if this.timeout.poll_elapsed(cx).is_ready() {
Poll::Ready(Err(PayloadError::Incomplete(Some( Poll::Ready(Err(PayloadError::Incomplete(Some(

View file

@ -1,7 +1,5 @@
use std::{future::Future, io, io::Write, pin::Pin, task::Context, task::Poll}; use std::{future::Future, io, io::Write, pin::Pin, task::Context, task::Poll};
#[cfg(feature = "brotli")]
use brotli2::write::BrotliDecoder;
use flate2::write::{GzDecoder, ZlibDecoder}; use flate2::write::{GzDecoder, ZlibDecoder};
use super::Writer; use super::Writer;
@ -27,10 +25,6 @@ where
#[inline] #[inline]
pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> { pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> {
let decoder = match encoding { let decoder = match encoding {
#[cfg(feature = "brotli")]
ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(BrotliDecoder::new(
Writer::new(),
)))),
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new( ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
ZlibDecoder::new(Writer::new()), ZlibDecoder::new(Writer::new()),
))), ))),
@ -137,25 +131,11 @@ where
enum ContentDecoder { enum ContentDecoder {
Deflate(Box<ZlibDecoder<Writer>>), Deflate(Box<ZlibDecoder<Writer>>),
Gzip(Box<GzDecoder<Writer>>), Gzip(Box<GzDecoder<Writer>>),
#[cfg(feature = "brotli")]
Br(Box<BrotliDecoder<Writer>>),
} }
impl ContentDecoder { impl ContentDecoder {
fn feed_eof(&mut self) -> io::Result<Option<Bytes>> { fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
match self { match self {
#[cfg(feature = "brotli")]
ContentDecoder::Br(ref mut decoder) => match decoder.flush() {
Ok(()) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() { ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() {
Ok(_) => { Ok(_) => {
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
@ -183,19 +163,6 @@ impl ContentDecoder {
fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> { fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
match self { match self {
#[cfg(feature = "brotli")]
ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) { ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => { Ok(_) => {
decoder.flush()?; decoder.flush()?;

View file

@ -1,8 +1,6 @@
//! Stream encoder //! Stream encoder
use std::{fmt, future::Future, io, io::Write, pin::Pin, task::Context, task::Poll}; use std::{fmt, future::Future, io, io::Write, pin::Pin, task::Context, task::Poll};
#[cfg(feature = "brotli")]
use brotli2::write::BrotliEncoder;
use flate2::write::{GzEncoder, ZlibEncoder}; use flate2::write::{GzEncoder, ZlibEncoder};
use crate::http::body::{Body, BodySize, MessageBody, ResponseBody}; use crate::http::body::{Body, BodySize, MessageBody, ResponseBody};
@ -191,24 +189,12 @@ fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
enum ContentEncoder { enum ContentEncoder {
Deflate(ZlibEncoder<Writer>), Deflate(ZlibEncoder<Writer>),
Gzip(GzEncoder<Writer>), Gzip(GzEncoder<Writer>),
#[cfg(feature = "brotli")]
Br(BrotliEncoder<Writer>),
} }
impl ContentEncoder { impl ContentEncoder {
fn can_encode(encoding: ContentEncoding) -> bool { fn can_encode(encoding: ContentEncoding) -> bool {
#[cfg(feature = "brotli")]
{
matches!(
encoding,
ContentEncoding::Deflate | ContentEncoding::Gzip | ContentEncoding::Br
)
}
#[cfg(not(feature = "brotli"))]
{
matches!(encoding, ContentEncoding::Deflate | ContentEncoding::Gzip) matches!(encoding, ContentEncoding::Deflate | ContentEncoding::Gzip)
} }
}
fn encoder(encoding: ContentEncoding) -> Option<Self> { fn encoder(encoding: ContentEncoding) -> Option<Self> {
match encoding { match encoding {
@ -220,18 +206,12 @@ impl ContentEncoder {
Writer::new(), Writer::new(),
flate2::Compression::fast(), flate2::Compression::fast(),
))), ))),
#[cfg(feature = "brotli")]
ContentEncoding::Br => {
Some(ContentEncoder::Br(BrotliEncoder::new(Writer::new(), 3)))
}
_ => None, _ => None,
} }
} }
fn take(&mut self) -> Bytes { fn take(&mut self) -> Bytes {
match *self { match *self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(),
ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(), ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(),
ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(), ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(),
} }
@ -239,11 +219,6 @@ impl ContentEncoder {
fn finish(self) -> Result<Bytes, io::Error> { fn finish(self) -> Result<Bytes, io::Error> {
match self { match self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
ContentEncoder::Gzip(encoder) => match encoder.finish() { ContentEncoder::Gzip(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()), Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err), Err(err) => Err(err),
@ -257,14 +232,6 @@ impl ContentEncoder {
fn write(&mut self, data: &[u8]) -> Result<(), io::Error> { fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
match *self { match *self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
log::trace!("Error decoding br encoding: {}", err);
Err(err)
}
},
ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) { ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(err) => { Err(err) => {
@ -288,8 +255,6 @@ impl fmt::Debug for ContentEncoder {
match self { match self {
ContentEncoder::Deflate(_) => write!(f, "ContentEncoder::Deflate"), ContentEncoder::Deflate(_) => write!(f, "ContentEncoder::Deflate"),
ContentEncoder::Gzip(_) => write!(f, "ContentEncoder::Gzip"), ContentEncoder::Gzip(_) => write!(f, "ContentEncoder::Gzip"),
#[cfg(feature = "brotli")]
ContentEncoder::Br(_) => write!(f, "ContentEncoder::Br"),
} }
} }
} }

View file

@ -1,5 +1,5 @@
//! HTTP/1 protocol dispatcher //! HTTP/1 protocol dispatcher
use std::{error, future, io, marker, pin::Pin, rc::Rc, task::Context, task::Poll}; use std::{error, future, io, marker, mem, pin::Pin, rc::Rc, task::Context, task::Poll};
use crate::io::{Decoded, Filter, Io, IoStatusUpdate, RecvError}; use crate::io::{Decoded, Filter, Io, IoStatusUpdate, RecvError};
use crate::service::{PipelineCall, Service}; use crate::service::{PipelineCall, Service};
@ -144,7 +144,20 @@ where
inner.send_response(res, body) inner.send_response(res, body)
} }
Poll::Ready(Err(err)) => inner.control(Control::err(err)), Poll::Ready(Err(err)) => inner.control(Control::err(err)),
Poll::Pending => ready!(inner.poll_request(cx)), Poll::Pending => {
// state changed because of error.
// spawn current publish future to runtime
// so it could complete error handling
let st = ready!(inner.poll_request(cx));
if inner.payload.is_some() {
if let State::CallPublish { fut } =
mem::replace(&mut *this.st, State::ReadRequest)
{
crate::rt::spawn(fut);
}
}
st
}
}, },
// handle control service responses // handle control service responses
State::CallControl { fut } => match Pin::new(fut).poll(cx) { State::CallControl { fut } => match Pin::new(fut).poll(cx) {
@ -339,7 +352,7 @@ where
.io .io
.encode(Message::Item((msg, body.size())), &self.codec) .encode(Message::Item((msg, body.size())), &self.codec)
.map_err(|err| { .map_err(|err| {
if let Some(mut payload) = self.payload.take() { if let Some(ref mut payload) = self.payload {
payload.1.set_error(PayloadError::Incomplete(None)); payload.1.set_error(PayloadError::Incomplete(None));
} }
err err
@ -438,7 +451,7 @@ where
} }
fn set_payload_error(&mut self, err: PayloadError) { fn set_payload_error(&mut self, err: PayloadError) {
if let Some(mut payload) = self.payload.take() { if let Some(ref mut payload) = self.payload {
payload.1.set_error(err); payload.1.set_error(err);
} }
} }

View file

@ -3,8 +3,7 @@ use std::rc::{Rc, Weak};
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::{cell::RefCell, collections::VecDeque, pin::Pin}; use std::{cell::RefCell, collections::VecDeque, pin::Pin};
use crate::http::error::PayloadError; use crate::{http::error::PayloadError, task::LocalWaker, util::Bytes, util::Stream};
use crate::{task::LocalWaker, util::Bytes, util::Stream};
/// max buffer size 32k /// max buffer size 32k
const MAX_BUFFER_SIZE: usize = 32_768; const MAX_BUFFER_SIZE: usize = 32_768;
@ -119,7 +118,7 @@ impl PayloadSender {
// we check only if Payload (other side) is alive, // we check only if Payload (other side) is alive,
// otherwise always return true (consume payload) // otherwise always return true (consume payload)
if let Some(shared) = self.inner.upgrade() { if let Some(shared) = self.inner.upgrade() {
if shared.borrow().need_read { if shared.borrow().flags.contains(Flags::NEED_READ) {
PayloadStatus::Read PayloadStatus::Read
} else { } else {
shared.borrow_mut().io_task.register(cx.waker()); shared.borrow_mut().io_task.register(cx.waker());
@ -131,12 +130,20 @@ impl PayloadSender {
} }
} }
bitflags::bitflags! {
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
struct Flags: u8 {
const EOF = 0b0000_0001;
const ERROR = 0b0000_0010;
const NEED_READ = 0b0000_0100;
}
}
#[derive(Debug)] #[derive(Debug)]
struct Inner { struct Inner {
len: usize, len: usize,
eof: bool, flags: Flags,
err: Option<PayloadError>, err: Option<PayloadError>,
need_read: bool,
items: VecDeque<Bytes>, items: VecDeque<Bytes>,
task: LocalWaker, task: LocalWaker,
io_task: LocalWaker, io_task: LocalWaker,
@ -144,12 +151,16 @@ struct Inner {
impl Inner { impl Inner {
fn new(eof: bool) -> Self { fn new(eof: bool) -> Self {
let flags = if eof {
Flags::EOF | Flags::NEED_READ
} else {
Flags::NEED_READ
};
Inner { Inner {
eof, flags,
len: 0, len: 0,
err: None, err: None,
items: VecDeque::new(), items: VecDeque::new(),
need_read: true,
task: LocalWaker::new(), task: LocalWaker::new(),
io_task: LocalWaker::new(), io_task: LocalWaker::new(),
} }
@ -157,18 +168,23 @@ impl Inner {
fn set_error(&mut self, err: PayloadError) { fn set_error(&mut self, err: PayloadError) {
self.err = Some(err); self.err = Some(err);
self.flags.insert(Flags::ERROR);
self.task.wake() self.task.wake()
} }
fn feed_eof(&mut self) { fn feed_eof(&mut self) {
self.eof = true; self.flags.insert(Flags::EOF);
self.task.wake() self.task.wake()
} }
fn feed_data(&mut self, data: Bytes) { fn feed_data(&mut self, data: Bytes) {
self.len += data.len(); self.len += data.len();
self.items.push_back(data); self.items.push_back(data);
self.need_read = self.len < MAX_BUFFER_SIZE; if self.len < MAX_BUFFER_SIZE {
self.flags.insert(Flags::NEED_READ);
} else {
self.flags.remove(Flags::NEED_READ);
}
self.task.wake(); self.task.wake();
} }
@ -178,19 +194,25 @@ impl Inner {
) -> Poll<Option<Result<Bytes, PayloadError>>> { ) -> Poll<Option<Result<Bytes, PayloadError>>> {
if let Some(data) = self.items.pop_front() { if let Some(data) = self.items.pop_front() {
self.len -= data.len(); self.len -= data.len();
self.need_read = self.len < MAX_BUFFER_SIZE; if self.len < MAX_BUFFER_SIZE {
self.flags.insert(Flags::NEED_READ);
} else {
self.flags.remove(Flags::NEED_READ);
}
if self.need_read && !self.eof { if self.flags.contains(Flags::NEED_READ)
&& !self.flags.intersects(Flags::EOF | Flags::ERROR)
{
self.task.register(cx.waker()); self.task.register(cx.waker());
} }
self.io_task.wake(); self.io_task.wake();
Poll::Ready(Some(Ok(data))) Poll::Ready(Some(Ok(data)))
} else if let Some(err) = self.err.take() { } else if let Some(err) = self.err.take() {
Poll::Ready(Some(Err(err))) Poll::Ready(Some(Err(err)))
} else if self.eof { } else if self.flags.intersects(Flags::EOF | Flags::ERROR) {
Poll::Ready(None) Poll::Ready(None)
} else { } else {
self.need_read = true; self.flags.insert(Flags::NEED_READ);
self.task.register(cx.waker()); self.task.register(cx.waker());
self.io_task.wake(); self.io_task.wake();
Poll::Pending Poll::Pending

View file

@ -11,7 +11,7 @@ use crate::server::Server;
use crate::service::ServiceFactory; use crate::service::ServiceFactory;
#[cfg(feature = "ws")] #[cfg(feature = "ws")]
use crate::ws::{error::WsClientError, WsClient, WsConnection}; use crate::ws::{error::WsClientError, WsClient, WsConnection};
use crate::{rt::System, time::Millis, time::Seconds, util::Bytes}; use crate::{rt::System, time::sleep, time::Millis, time::Seconds, util::Bytes};
use super::client::{Client, ClientRequest, ClientResponse, Connector}; use super::client::{Client, ClientRequest, ClientResponse, Connector};
use super::error::{HttpError, PayloadError}; use super::error::{HttpError, PayloadError};
@ -244,14 +244,15 @@ where
.workers(1) .workers(1)
.disable_signals() .disable_signals()
.run(); .run();
crate::rt::spawn(async move {
sleep(Millis(125)).await;
tx.send((system, srv, local_addr)).unwrap(); tx.send((system, srv, local_addr)).unwrap();
});
Ok(()) Ok(())
}) })
}); });
// wait for server
if std::env::var("GITHUB_ACTIONS") == Ok("true".to_string()) {
thread::sleep(std::time::Duration::from_millis(150)); thread::sleep(std::time::Duration::from_millis(150));
}
let (system, server, addr) = rx.recv().unwrap(); let (system, server, addr) = rx.recv().unwrap();

View file

@ -68,7 +68,7 @@ pub struct ServiceConfig<Err = DefaultError> {
} }
impl<Err: ErrorRenderer> ServiceConfig<Err> { impl<Err: ErrorRenderer> ServiceConfig<Err> {
pub(crate) fn new() -> Self { pub fn new() -> Self {
Self { Self {
services: Vec::new(), services: Vec::new(),
state: Extensions::new(), state: Extensions::new(),
@ -132,7 +132,7 @@ mod tests {
use crate::http::{Method, StatusCode}; use crate::http::{Method, StatusCode};
use crate::util::Bytes; use crate::util::Bytes;
use crate::web::test::{call_service, init_service, read_body, TestRequest}; use crate::web::test::{call_service, init_service, read_body, TestRequest};
use crate::web::{self, App, HttpRequest, HttpResponse}; use crate::web::{self, App, DefaultError, HttpRequest, HttpResponse};
#[crate::rt_test] #[crate::rt_test]
async fn test_configure_state() { async fn test_configure_state() {
@ -205,4 +205,11 @@ mod tests {
let resp = call_service(&srv, req).await; let resp = call_service(&srv, req).await;
assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.status(), StatusCode::OK);
} }
#[test]
fn test_new_service_config() {
let cfg: ServiceConfig<DefaultError> = ServiceConfig::new();
assert!(cfg.services.is_empty());
assert!(cfg.external.is_empty());
}
} }

View file

@ -82,7 +82,7 @@ mod route;
mod scope; mod scope;
mod server; mod server;
mod service; mod service;
mod stack; pub mod stack;
pub mod test; pub mod test;
pub mod types; pub mod types;
mod util; mod util;
@ -128,6 +128,7 @@ pub mod dev {
//! The purpose of this module is to alleviate imports of many common //! The purpose of this module is to alleviate imports of many common
//! traits by adding a glob import to the top of ntex::web heavy modules: //! traits by adding a glob import to the top of ntex::web heavy modules:
pub use crate::web::app_service::AppService;
pub use crate::web::config::AppConfig; pub use crate::web::config::AppConfig;
pub use crate::web::info::ConnectionInfo; pub use crate::web::info::ConnectionInfo;
pub use crate::web::rmap::ResourceMap; pub use crate::web::rmap::ResourceMap;

View file

@ -697,14 +697,13 @@ where
.set_tag("test", "WEB-SRV") .set_tag("test", "WEB-SRV")
.run(); .run();
crate::rt::spawn(async move {
sleep(Millis(125)).await;
tx.send((System::current(), srv, local_addr)).unwrap(); tx.send((System::current(), srv, local_addr)).unwrap();
});
Ok(()) Ok(())
}) })
}); });
// wait for server
if std::env::var("GITHUB_ACTIONS") == Ok("true".to_string()) {
thread::sleep(std::time::Duration::from_millis(150));
}
let (system, server, addr) = rx.recv().unwrap(); let (system, server, addr) = rx.recv().unwrap();

View file

@ -3,16 +3,14 @@ use std::io::{Read, Write};
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;
use brotli2::write::BrotliEncoder;
use coo_kie::Cookie; use coo_kie::Cookie;
use flate2::{read::GzDecoder, write::GzEncoder, write::ZlibEncoder, Compression}; use flate2::{read::GzDecoder, write::GzEncoder, write::ZlibEncoder, Compression};
use futures_util::stream::once;
use rand::Rng; use rand::Rng;
use ntex::http::client::error::{JsonPayloadError, SendRequestError}; use ntex::http::client::error::SendRequestError;
use ntex::http::client::{Client, Connector}; use ntex::http::client::{Client, Connector};
use ntex::http::test::server as test_server; use ntex::http::test::server as test_server;
use ntex::http::{header, HttpMessage, HttpService, Method}; use ntex::http::{header, HttpMessage, HttpService};
use ntex::service::{chain_factory, map_config}; use ntex::service::{chain_factory, map_config};
use ntex::web::dev::AppConfig; use ntex::web::dev::AppConfig;
use ntex::web::middleware::Compress; use ntex::web::middleware::Compress;
@ -510,12 +508,14 @@ async fn test_client_gzip_encoding_large() {
async fn test_client_gzip_encoding_large_random() { async fn test_client_gzip_encoding_large_random() {
let data = rand::thread_rng() let data = rand::thread_rng()
.sample_iter(&rand::distributions::Alphanumeric) .sample_iter(&rand::distributions::Alphanumeric)
.take(100_000) .take(1_048_500)
.map(char::from) .map(char::from)
.collect::<String>(); .collect::<String>();
let srv = test::server(|| { let srv = test::server(|| {
App::new().service(web::resource("/").route(web::to(|data: Bytes| async move { App::new()
.state(web::types::PayloadConfig::default().limit(1_048_576))
.service(web::resource("/").route(web::to(|data: Bytes| async move {
let mut e = GzEncoder::new(Vec::new(), Compression::default()); let mut e = GzEncoder::new(Vec::new(), Compression::default());
e.write_all(&data).unwrap(); e.write_all(&data).unwrap();
let data = e.finish().unwrap(); let data = e.finish().unwrap();
@ -530,130 +530,10 @@ async fn test_client_gzip_encoding_large_random() {
assert!(response.status().is_success()); assert!(response.status().is_success());
// read response // read response
let bytes = response.body().await.unwrap(); let bytes = response.body().limit(1_048_576).await.unwrap();
assert_eq!(bytes, Bytes::from(data)); assert_eq!(bytes, Bytes::from(data));
} }
#[ntex::test]
async fn test_client_brotli_encoding() {
let srv = test::server(|| {
App::new().service(web::resource("/").route(web::to(|data: Bytes| async move {
let mut e = BrotliEncoder::new(Vec::new(), 5);
e.write_all(&data).unwrap();
let data = e.finish().unwrap();
HttpResponse::Ok()
.header("content-encoding", "br")
.body(data)
})))
});
// client request
let mut response = srv.post("/").send_body(STR).await.unwrap();
assert!(response.status().is_success());
// read response
let bytes = response.body().await.unwrap();
assert_eq!(bytes, Bytes::from_static(STR.as_ref()));
}
#[ntex::test]
async fn test_client_brotli_encoding_large_random() {
let data = rand::thread_rng()
.sample_iter(&rand::distributions::Alphanumeric)
.take(70_000)
.map(char::from)
.collect::<String>();
let srv = test::server(|| {
App::new().service(web::resource("/").route(web::to(|data: Bytes| async move {
let mut e = BrotliEncoder::new(Vec::new(), 5);
e.write_all(&data).unwrap();
let data = e.finish().unwrap();
HttpResponse::Ok()
.header("content-encoding", "br")
.body(data)
})))
});
// client request
let mut response = srv.post("/").send_body(data.clone()).await.unwrap();
assert!(response.status().is_success());
// read response
let bytes = response.body().await.unwrap();
assert_eq!(bytes.len(), data.len());
assert_eq!(bytes, Bytes::from(data.clone()));
// frozen request
let request = srv.post("/").timeout(Seconds(30)).freeze().unwrap();
assert_eq!(request.get_method(), Method::POST);
assert_eq!(request.get_uri(), srv.url("/").as_str());
let mut response = request.send_body(data.clone()).await.unwrap();
assert!(response.status().is_success());
// read response
let bytes = response.body().await.unwrap();
assert_eq!(bytes.len(), data.len());
assert_eq!(bytes, Bytes::from(data.clone()));
// extra header
let mut response = request
.extra_header("x-test2", "222")
.send_body(data.clone())
.await
.unwrap();
assert!(response.status().is_success());
// read response
let bytes = response.body().await.unwrap();
assert_eq!(bytes.len(), data.len());
assert_eq!(bytes, Bytes::from(data.clone()));
// client stream request
let mut response = srv
.post("/")
.send_stream(once(Ready::Ok::<_, JsonPayloadError>(Bytes::from(
data.clone(),
))))
.await
.unwrap();
assert!(response.status().is_success());
// read response
let bytes = response.body().await.unwrap();
assert_eq!(bytes.len(), data.len());
assert_eq!(bytes, Bytes::from(data.clone()));
// frozen request
let request = srv.post("/").timeout(Seconds(30)).freeze().unwrap();
let mut response = request
.send_stream(once(Ready::Ok::<_, JsonPayloadError>(Bytes::from(
data.clone(),
))))
.await
.unwrap();
assert!(response.status().is_success());
// read response
let bytes = response.body().await.unwrap();
assert_eq!(bytes.len(), data.len());
assert_eq!(bytes, Bytes::from(data.clone()));
let mut response = request
.extra_header("x-test2", "222")
.send_stream(once(Ready::Ok::<_, JsonPayloadError>(Bytes::from(
data.clone(),
))))
.await
.unwrap();
assert!(response.status().is_success());
// read response
let bytes = response.body().await.unwrap();
assert_eq!(bytes.len(), data.len());
assert_eq!(bytes, Bytes::from(data.clone()));
}
#[ntex::test] #[ntex::test]
async fn test_client_deflate_encoding() { async fn test_client_deflate_encoding() {
let srv = test::server(|| { let srv = test::server(|| {

View file

@ -1,5 +1,6 @@
#![cfg(feature = "openssl")] #![cfg(feature = "openssl")]
use std::{io, sync::atomic::AtomicUsize, sync::atomic::Ordering, sync::Arc}; use std::io;
use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc, Mutex};
use futures_util::stream::{once, Stream, StreamExt}; use futures_util::stream::{once, Stream, StreamExt};
use tls_openssl::ssl::{AlpnError, SslAcceptor, SslFiletype, SslMethod}; use tls_openssl::ssl::{AlpnError, SslAcceptor, SslFiletype, SslMethod};
@ -424,11 +425,12 @@ async fn test_h2_service_error() {
assert_eq!(bytes, Bytes::from_static(b"error")); assert_eq!(bytes, Bytes::from_static(b"error"));
} }
struct SetOnDrop(Arc<AtomicUsize>); struct SetOnDrop(Arc<AtomicUsize>, Arc<Mutex<Option<::oneshot::Sender<()>>>>);
impl Drop for SetOnDrop { impl Drop for SetOnDrop {
fn drop(&mut self) { fn drop(&mut self) {
self.0.fetch_add(1, Ordering::Relaxed); self.0.fetch_add(1, Ordering::Relaxed);
let _ = self.1.lock().unwrap().take().unwrap().send(());
} }
} }
@ -436,17 +438,20 @@ impl Drop for SetOnDrop {
async fn test_h2_client_drop() -> io::Result<()> { async fn test_h2_client_drop() -> io::Result<()> {
let count = Arc::new(AtomicUsize::new(0)); let count = Arc::new(AtomicUsize::new(0));
let count2 = count.clone(); let count2 = count.clone();
let (tx, rx) = ::oneshot::channel();
let tx = Arc::new(Mutex::new(Some(tx)));
let srv = test_server(move || { let srv = test_server(move || {
let tx = tx.clone();
let count = count2.clone(); let count = count2.clone();
HttpService::build() HttpService::build()
.h2(move |req: Request| { .h2(move |req: Request| {
let count = count.clone(); let st = SetOnDrop(count.clone(), tx.clone());
async move { async move {
let _st = SetOnDrop(count);
assert!(req.peer_addr().is_some()); assert!(req.peer_addr().is_some());
assert_eq!(req.version(), Version::HTTP_2); assert_eq!(req.version(), Version::HTTP_2);
sleep(Seconds(100)).await; sleep(Seconds(30)).await;
drop(st);
Ok::<_, io::Error>(Response::Ok().finish()) Ok::<_, io::Error>(Response::Ok().finish())
} }
}) })
@ -454,9 +459,9 @@ async fn test_h2_client_drop() -> io::Result<()> {
.map_err(|_| ()) .map_err(|_| ())
}); });
let result = timeout(Millis(250), srv.srequest(Method::GET, "/").send()).await; let result = timeout(Millis(1500), srv.srequest(Method::GET, "/").send()).await;
assert!(result.is_err()); assert!(result.is_err());
sleep(Millis(150)).await; let _ = timeout(Millis(1500), rx).await;
assert_eq!(count.load(Ordering::Relaxed), 1); assert_eq!(count.load(Ordering::Relaxed), 1);
Ok(()) Ok(())
} }
@ -539,13 +544,19 @@ async fn test_ws_transport() {
async fn test_h2_graceful_shutdown() -> io::Result<()> { async fn test_h2_graceful_shutdown() -> io::Result<()> {
let count = Arc::new(AtomicUsize::new(0)); let count = Arc::new(AtomicUsize::new(0));
let count2 = count.clone(); let count2 = count.clone();
let (tx, rx) = ::oneshot::channel();
let tx = Arc::new(Mutex::new(Some(tx)));
let srv = test_server(move || { let srv = test_server(move || {
let tx = tx.clone();
let count = count2.clone(); let count = count2.clone();
HttpService::build() HttpService::build()
.h2(move |_| { .h2(move |_| {
let count = count.clone(); let count = count.clone();
count.fetch_add(1, Ordering::Relaxed); count.fetch_add(1, Ordering::Relaxed);
if count.load(Ordering::Relaxed) == 2 {
let _ = tx.lock().unwrap().take().unwrap().send(());
}
async move { async move {
sleep(Millis(1000)).await; sleep(Millis(1000)).await;
count.fetch_sub(1, Ordering::Relaxed); count.fetch_sub(1, Ordering::Relaxed);
@ -566,7 +577,7 @@ async fn test_h2_graceful_shutdown() -> io::Result<()> {
let _ = req.send().await.unwrap(); let _ = req.send().await.unwrap();
sleep(Millis(100000)).await; sleep(Millis(100000)).await;
}); });
sleep(Millis(150)).await; let _ = rx.await;
assert_eq!(count.load(Ordering::Relaxed), 2); assert_eq!(count.load(Ordering::Relaxed), 2);
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
@ -574,8 +585,6 @@ async fn test_h2_graceful_shutdown() -> io::Result<()> {
srv.stop().await; srv.stop().await;
let _ = tx.send(()); let _ = tx.send(());
}); });
sleep(Millis(150)).await;
assert_eq!(count.load(Ordering::Relaxed), 2);
let _ = rx.await; let _ = rx.await;
assert_eq!(count.load(Ordering::Relaxed), 0); assert_eq!(count.load(Ordering::Relaxed), 0);

View file

@ -1,4 +1,4 @@
use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc}; use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc, Mutex};
use std::{io, io::Read, io::Write, net}; use std::{io, io::Read, io::Write, net};
use futures_util::future::{self, FutureExt}; use futures_util::future::{self, FutureExt};
@ -405,6 +405,36 @@ async fn test_http1_handle_not_consumed_payload() {
assert_eq!(&data[..17], b"HTTP/1.1 200 OK\r\n"); assert_eq!(&data[..17], b"HTTP/1.1 200 OK\r\n");
} }
/// Handle payload errors (keep-alive, disconnects)
#[ntex::test]
async fn test_http1_handle_payload_errors() {
let count = Arc::new(AtomicUsize::new(0));
let count2 = count.clone();
let srv = test_server(move || {
let count = count2.clone();
HttpService::build().h1(move |mut req: Request| {
let count = count.clone();
async move {
let mut pl = req.take_payload();
let result = pl.recv().await;
if result.unwrap().is_err() {
count.fetch_add(1, Ordering::Relaxed);
}
Ok::<_, io::Error>(Response::Ok().finish())
}
})
});
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ =
stream.write_all(b"GET /test/tests/test HTTP/1.1\r\ncontent-length: 99999\r\n\r\n");
sleep(Millis(250)).await;
drop(stream);
sleep(Millis(250)).await;
assert_eq!(count.load(Ordering::Acquire), 1);
}
#[ntex::test] #[ntex::test]
async fn test_content_length() { async fn test_content_length() {
let srv = test_server(|| { let srv = test_server(|| {
@ -693,11 +723,12 @@ async fn test_h1_service_error() {
assert_eq!(bytes, Bytes::from_static(b"error")); assert_eq!(bytes, Bytes::from_static(b"error"));
} }
struct SetOnDrop(Arc<AtomicUsize>); struct SetOnDrop(Arc<AtomicUsize>, Option<::oneshot::Sender<()>>);
impl Drop for SetOnDrop { impl Drop for SetOnDrop {
fn drop(&mut self) { fn drop(&mut self) {
self.0.fetch_add(1, Ordering::Relaxed); self.0.fetch_add(1, Ordering::Relaxed);
let _ = self.1.take().unwrap().send(());
} }
} }
@ -705,24 +736,28 @@ impl Drop for SetOnDrop {
async fn test_h1_client_drop() -> io::Result<()> { async fn test_h1_client_drop() -> io::Result<()> {
let count = Arc::new(AtomicUsize::new(0)); let count = Arc::new(AtomicUsize::new(0));
let count2 = count.clone(); let count2 = count.clone();
let (tx, rx) = ::oneshot::channel();
let tx = Arc::new(Mutex::new(Some(tx)));
let srv = test_server(move || { let srv = test_server(move || {
let tx = tx.clone();
let count = count2.clone(); let count = count2.clone();
HttpService::build().h1(move |req: Request| { HttpService::build().h1(move |req: Request| {
let tx = tx.clone();
let count = count.clone(); let count = count.clone();
async move { async move {
let _st = SetOnDrop(count); let _st = SetOnDrop(count, tx.lock().unwrap().take());
assert!(req.peer_addr().is_some()); assert!(req.peer_addr().is_some());
assert_eq!(req.version(), Version::HTTP_11); assert_eq!(req.version(), Version::HTTP_11);
sleep(Seconds(100)).await; sleep(Millis(50000)).await;
Ok::<_, io::Error>(Response::Ok().finish()) Ok::<_, io::Error>(Response::Ok().finish())
} }
}) })
}); });
let result = timeout(Millis(100), srv.request(Method::GET, "/").send()).await; let result = timeout(Millis(1500), srv.request(Method::GET, "/").send()).await;
assert!(result.is_err()); assert!(result.is_err());
sleep(Millis(250)).await; let _ = rx.await;
assert_eq!(count.load(Ordering::Relaxed), 1); assert_eq!(count.load(Ordering::Relaxed), 1);
Ok(()) Ok(())
} }
@ -731,12 +766,18 @@ async fn test_h1_client_drop() -> io::Result<()> {
async fn test_h1_gracefull_shutdown() { async fn test_h1_gracefull_shutdown() {
let count = Arc::new(AtomicUsize::new(0)); let count = Arc::new(AtomicUsize::new(0));
let count2 = count.clone(); let count2 = count.clone();
let (tx, rx) = ::oneshot::channel();
let tx = Arc::new(Mutex::new(Some(tx)));
let srv = test_server(move || { let srv = test_server(move || {
let tx = tx.clone();
let count = count2.clone(); let count = count2.clone();
HttpService::build().h1(move |_: Request| { HttpService::build().h1(move |_: Request| {
let count = count.clone(); let count = count.clone();
count.fetch_add(1, Ordering::Relaxed); count.fetch_add(1, Ordering::Relaxed);
if count.load(Ordering::Relaxed) == 2 {
let _ = tx.lock().unwrap().take().unwrap().send(());
}
async move { async move {
sleep(Millis(1000)).await; sleep(Millis(1000)).await;
count.fetch_sub(1, Ordering::Relaxed); count.fetch_sub(1, Ordering::Relaxed);
@ -751,7 +792,7 @@ async fn test_h1_gracefull_shutdown() {
let mut stream2 = net::TcpStream::connect(srv.addr()).unwrap(); let mut stream2 = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream2.write_all(b"GET /index.html HTTP/1.1\r\n\r\n"); let _ = stream2.write_all(b"GET /index.html HTTP/1.1\r\n\r\n");
sleep(Millis(150)).await; let _ = rx.await;
assert_eq!(count.load(Ordering::Relaxed), 2); assert_eq!(count.load(Ordering::Relaxed), 2);
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
@ -759,8 +800,6 @@ async fn test_h1_gracefull_shutdown() {
srv.stop().await; srv.stop().await;
let _ = tx.send(()); let _ = tx.send(());
}); });
sleep(Millis(150)).await;
assert_eq!(count.load(Ordering::Relaxed), 2);
let _ = rx.await; let _ = rx.await;
assert_eq!(count.load(Ordering::Relaxed), 0); assert_eq!(count.load(Ordering::Relaxed), 0);
@ -770,12 +809,18 @@ async fn test_h1_gracefull_shutdown() {
async fn test_h1_gracefull_shutdown_2() { async fn test_h1_gracefull_shutdown_2() {
let count = Arc::new(AtomicUsize::new(0)); let count = Arc::new(AtomicUsize::new(0));
let count2 = count.clone(); let count2 = count.clone();
let (tx, rx) = ::oneshot::channel();
let tx = Arc::new(Mutex::new(Some(tx)));
let srv = test_server(move || { let srv = test_server(move || {
let tx = tx.clone();
let count = count2.clone(); let count = count2.clone();
HttpService::build().finish(move |_: Request| { HttpService::build().finish(move |_: Request| {
let count = count.clone(); let count = count.clone();
count.fetch_add(1, Ordering::Relaxed); count.fetch_add(1, Ordering::Relaxed);
if count.load(Ordering::Relaxed) == 2 {
let _ = tx.lock().unwrap().take().unwrap().send(());
}
async move { async move {
sleep(Millis(1000)).await; sleep(Millis(1000)).await;
count.fetch_sub(1, Ordering::Relaxed); count.fetch_sub(1, Ordering::Relaxed);
@ -790,17 +835,14 @@ async fn test_h1_gracefull_shutdown_2() {
let mut stream2 = net::TcpStream::connect(srv.addr()).unwrap(); let mut stream2 = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream2.write_all(b"GET /index.html HTTP/1.1\r\n\r\n"); let _ = stream2.write_all(b"GET /index.html HTTP/1.1\r\n\r\n");
sleep(Millis(150)).await; let _ = rx.await;
assert_eq!(count.load(Ordering::Relaxed), 2); assert_eq!(count.load(Ordering::Acquire), 2);
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
rt::spawn(async move { rt::spawn(async move {
srv.stop().await; srv.stop().await;
let _ = tx.send(()); let _ = tx.send(());
}); });
sleep(Millis(150)).await;
assert_eq!(count.load(Ordering::Relaxed), 2);
let _ = rx.await; let _ = rx.await;
assert_eq!(count.load(Ordering::Relaxed), 0); assert_eq!(count.load(Ordering::Relaxed), 0);
} }

View file

@ -1,6 +1,5 @@
use std::{future::Future, io, io::Read, io::Write, pin::Pin, task::Context, task::Poll}; use std::{future::Future, io, io::Read, io::Write, pin::Pin, task::Context, task::Poll};
use brotli2::write::{BrotliDecoder, BrotliEncoder};
use flate2::read::GzDecoder; use flate2::read::GzDecoder;
use flate2::write::{GzEncoder, ZlibDecoder, ZlibEncoder}; use flate2::write::{GzEncoder, ZlibDecoder, ZlibEncoder};
use flate2::Compression; use flate2::Compression;
@ -318,36 +317,6 @@ async fn test_body_chunked_implicit() {
assert_eq!(Bytes::from(dec), Bytes::from_static(STR.as_ref())); assert_eq!(Bytes::from(dec), Bytes::from_static(STR.as_ref()));
} }
#[ntex::test]
async fn test_body_br_streaming() {
let srv = test::server_with(test::config().h1(), || {
App::new().wrap(Compress::new(ContentEncoding::Br)).service(
web::resource("/").route(web::to(move || async {
HttpResponse::Ok()
.streaming(TestBody::new(Bytes::from_static(STR.as_ref()), 24))
})),
)
});
let mut response = srv
.get("/")
.header(ACCEPT_ENCODING, "br")
.no_decompress()
.send()
.await
.unwrap();
assert!(response.status().is_success());
// read response
let bytes = response.body().await.unwrap();
// decode br
let mut e = BrotliDecoder::new(Vec::with_capacity(2048));
e.write_all(bytes.as_ref()).unwrap();
let dec = e.finish().unwrap();
assert_eq!(Bytes::from(dec), Bytes::from_static(STR.as_ref()));
}
#[ntex::test] #[ntex::test]
async fn test_head_binary() { async fn test_head_binary() {
let srv = test::server_with(test::config().h1(), || { let srv = test::server_with(test::config().h1(), || {
@ -422,35 +391,6 @@ async fn test_body_deflate() {
assert_eq!(Bytes::from(dec), Bytes::from_static(STR.as_ref())); assert_eq!(Bytes::from(dec), Bytes::from_static(STR.as_ref()));
} }
#[ntex::test]
async fn test_body_brotli() {
let srv = test::server_with(test::config().h1(), || {
App::new().wrap(Compress::new(ContentEncoding::Br)).service(
web::resource("/")
.route(web::to(move || async { HttpResponse::Ok().body(STR) })),
)
});
// client request
let mut response = srv
.get("/")
.header(ACCEPT_ENCODING, "br")
.no_decompress()
.send()
.await
.unwrap();
assert!(response.status().is_success());
// read response
let bytes = response.body().await.unwrap();
// decode brotli
let mut e = BrotliDecoder::new(Vec::with_capacity(2048));
e.write_all(bytes.as_ref()).unwrap();
let dec = e.finish().unwrap();
assert_eq!(Bytes::from(dec), Bytes::from_static(STR.as_ref()));
}
#[ntex::test] #[ntex::test]
async fn test_encoding() { async fn test_encoding() {
let srv = test::server_with(test::config().h1(), || { let srv = test::server_with(test::config().h1(), || {
@ -644,204 +584,6 @@ async fn test_reading_deflate_encoding_large_random() {
assert_eq!(bytes, Bytes::from(data)); assert_eq!(bytes, Bytes::from(data));
} }
#[ntex::test]
async fn test_brotli_encoding() {
let srv = test::server_with(test::config().h1(), || {
App::new().service(web::resource("/").route(web::to(move |body: Bytes| async {
HttpResponse::Ok().body(body)
})))
});
let mut e = BrotliEncoder::new(Vec::new(), 5);
e.write_all(STR.as_ref()).unwrap();
let enc = e.finish().unwrap();
// client request
let request = srv
.post("/")
.header(CONTENT_ENCODING, "br")
.send_body(enc.clone());
let mut response = request.await.unwrap();
assert!(response.status().is_success());
// read response
let bytes = response.body().await.unwrap();
assert_eq!(bytes, Bytes::from_static(STR.as_ref()));
}
#[ntex::test]
async fn test_brotli_encoding_large() {
let data = rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(320_000)
.map(char::from)
.collect::<String>();
let srv = test::server_with(test::config().h1(), || {
App::new().service(
web::resource("/")
.state(web::types::PayloadConfig::new(320_000))
.route(web::to(move |body: Bytes| async {
HttpResponse::Ok().streaming(TestBody::new(body, 10240))
})),
)
});
let mut e = BrotliEncoder::new(Vec::new(), 5);
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
// client request
let request = srv
.post("/")
.header(CONTENT_ENCODING, "br")
.send_body(enc.clone());
let mut response = request.await.unwrap();
assert!(response.status().is_success());
// read response
let bytes = response.body().limit(320_000).await.unwrap();
assert_eq!(bytes, Bytes::from(data));
}
#[cfg(feature = "openssl")]
#[ntex::test]
async fn test_brotli_encoding_large_openssl() {
// load ssl keys
use tls_openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder
.set_private_key_file("./tests/key.pem", SslFiletype::PEM)
.unwrap();
builder
.set_certificate_chain_file("./tests/cert.pem")
.unwrap();
let data = STR.repeat(10);
let srv = test::server_with(test::config().openssl(builder.build()), move || {
App::new().service(web::resource("/").route(web::to(|bytes: Bytes| async {
HttpResponse::Ok()
.encoding(ContentEncoding::Identity)
.body(bytes)
})))
});
// body
let mut e = BrotliEncoder::new(Vec::new(), 3);
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
// client request
let mut response = srv
.post("/")
.header(CONTENT_ENCODING, "br")
.send_body(enc)
.await
.unwrap();
assert!(response.status().is_success());
// read response
let bytes = response.body().await.unwrap();
assert_eq!(bytes, Bytes::from(data));
}
#[cfg(feature = "openssl")]
#[ntex::test]
async fn test_brotli_encoding_large_openssl_h1() {
// load ssl keys
use tls_openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder
.set_private_key_file("./tests/key.pem", SslFiletype::PEM)
.unwrap();
builder
.set_certificate_chain_file("./tests/cert.pem")
.unwrap();
let data = STR.repeat(10);
let srv = test::server_with(test::config().openssl(builder.build()).h1(), move || {
App::new().service(web::resource("/").route(web::to(|bytes: Bytes| async {
HttpResponse::Ok()
.encoding(ContentEncoding::Identity)
.body(bytes)
})))
});
// body
let mut e = BrotliEncoder::new(Vec::new(), 3);
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
// client request
let mut response = srv
.post("/")
.header(CONTENT_ENCODING, "br")
.send_body(enc)
.await
.unwrap();
assert!(response.status().is_success());
// read response
let bytes = response.body().await.unwrap();
assert_eq!(bytes, Bytes::from(data));
}
#[cfg(feature = "openssl")]
#[ntex::test]
async fn test_brotli_encoding_large_openssl_h2() {
// load ssl keys
use tls_openssl::ssl::{AlpnError, SslAcceptor, SslFiletype, SslMethod};
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder
.set_private_key_file("./tests/key.pem", SslFiletype::PEM)
.unwrap();
builder
.set_certificate_chain_file("./tests/cert.pem")
.unwrap();
builder.set_alpn_select_callback(|_, protos| {
const H2: &[u8] = b"\x02h2";
const H11: &[u8] = b"\x08http/1.1";
if protos.windows(3).any(|window| window == H2) {
Ok(b"h2")
} else if protos.windows(9).any(|window| window == H11) {
Ok(b"http/1.1")
} else {
Err(AlpnError::NOACK)
}
});
builder.set_alpn_protos(b"\x08http/1.1\x02h2").unwrap();
let data = STR.repeat(10);
let srv = test::server_with(test::config().openssl(builder.build()).h2(), move || {
App::new().service(web::resource("/").route(web::to(|bytes: Bytes| async {
HttpResponse::Ok()
.encoding(ContentEncoding::Identity)
.body(bytes)
})))
});
// body
let mut e = BrotliEncoder::new(Vec::new(), 3);
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
// client request
let mut response = srv
.post("/")
.header(CONTENT_ENCODING, "br")
.send_body(enc)
.await
.unwrap();
assert!(response.status().is_success());
// read response
let bytes = response.body().await.unwrap();
assert_eq!(bytes, Bytes::from(data));
}
#[cfg(all(feature = "rustls", feature = "openssl"))] #[cfg(all(feature = "rustls", feature = "openssl"))]
#[ntex::test] #[ntex::test]
async fn test_reading_deflate_encoding_large_random_rustls() { async fn test_reading_deflate_encoding_large_random_rustls() {