Compare commits

...

75 commits

Author SHA1 Message Date
Nikolay Kim
01d3a2440b
Prepare net release (#550) 2025-03-28 21:26:07 +01:00
Nikolay Kim
f5ee55d598
Handle socket close for poll driver (#549) 2025-03-28 21:06:11 +01:00
Nikolay Kim
e4f24ee41f Handle flaky tests 2025-03-28 11:39:24 +01:00
Nikolay Kim
f6fe9c3e10
Improve tests (#548) 2025-03-28 10:07:10 +01:00
Nikolay Kim
30928d019c
Improve tests (#547) 2025-03-28 09:11:59 +01:00
Nikolay Kim
e9a1284151
Better worker availability handling (#546) 2025-03-28 08:51:44 +01:00
Nikolay Kim
8f2d5056c9
Return PayloadError::Incomplete on server disconnect (#545) 2025-03-28 02:10:25 +01:00
Nikolay Kim
f647ad2eac
Update tests (#544) 2025-03-27 22:16:51 +01:00
Ruangyot Nanchiang
728ab919a3
Expose WebStack for external wrapper support in downstream crates (#542)
* add public ServiceConfig::register constructor to support external configuration (#250)

* fix: doctest ServiceConfig::register() error (#250)

* add unit testing for ServiceConfig::register()

* replace pub(crate) to pub in ServiceConfig::new() (#250)

* replace pub to pub(crate) for ServiceConfig::new() and add pub for mod ntex::web::stack instead

* remove unsed DefaultError import in config.rs tests

---------

Co-authored-by: RuangyotN <ruangyotn@skyller.co>
2025-03-27 22:12:34 +01:00
Nikolay Kim
b2915f4868
Improve tests (#543) 2025-03-27 20:45:43 +01:00
Nikolay Kim
eb4ec4b3e1
Add Arbiter::get_value() helper method (#541) 2025-03-26 14:40:05 +01:00
Nikolay Kim
0d3f1293c9
Update neon runtime (#540) 2025-03-25 12:40:42 +01:00
Ruangyot Nanchiang
e903e65e27
add public ServiceConfig::register constructor to support external configuration (#250) (#539)
* add public ServiceConfig::register constructor to support external configuration (#250)

* fix: doctest ServiceConfig::register() error (#250)

* add unit testing for ServiceConfig::register()

* replace pub(crate) to pub in ServiceConfig::new() (#250)

---------

Co-authored-by: RuangyotN <ruangyotn@skyller.co>
2025-03-25 12:31:09 +01:00
Nikolay Kim
eaec50d8a2
Prepare release (#538) 2025-03-22 22:17:59 +01:00
Drew Pirrone-Brusse
b32df88500
Publicize web::app_service::AppService (#534) 2025-03-22 22:16:00 +01:00
Nikolay Kim
5484009c92
Simplify neon poll impl (#537) 2025-03-21 08:21:45 +01:00
Nikolay Kim
bf6b1d6c79
Maintain interest info for poll driver (#536) 2025-03-20 08:56:31 +01:00
Nikolay Kim
e3f58cce27
Redesign neon poll support (#535) 2025-03-19 21:13:39 +01:00
Nikolay Kim
e904cf85f1 Fix tls examples
Some checks failed
Checks / Check (push) Failing after 4s
Checks / Clippy (push) Failing after 3s
Checks / Rustfmt (push) Failing after 3s
Coverage / coverage (push) Failing after 2s
CI (Linux) / 1.75.0 - x86_64-unknown-linux-gnu (push) Failing after 2s
CI (Linux) / nightly - x86_64-unknown-linux-gnu (push) Failing after 2s
CI (Linux) / stable - x86_64-unknown-linux-gnu (push) Failing after 2s
CI (Windows) / nightly - x86_64-pc-windows-msvc (push) Has been cancelled
CI (Windows) / stable - x86_64-pc-windows-msvc (push) Has been cancelled
CI (OSX) / nightly - aarch64-apple-darwin (push) Has been cancelled
CI (OSX) / stable - aarch64-apple-darwin (push) Has been cancelled
2025-03-18 06:10:52 +01:00
Nikolay Kim
3b58f5a111
Add delay for test server availability, could cause connect race (#533) 2025-03-18 05:50:28 +01:00
Nikolay Kim
5621ca1898
Add check for required io-uring opcodes (#532)
Some checks failed
Coverage / coverage (push) Failing after 2s
CI (OSX) / nightly - aarch64-apple-darwin (push) Waiting to run
CI (OSX) / stable - aarch64-apple-darwin (push) Waiting to run
CI (Windows) / nightly - x86_64-pc-windows-msvc (push) Waiting to run
CI (Windows) / stable - x86_64-pc-windows-msvc (push) Waiting to run
Checks / Check (push) Failing after 2s
Checks / Clippy (push) Failing after 2s
Checks / Rustfmt (push) Failing after 2s
CI (Linux) / nightly - x86_64-unknown-linux-gnu (push) Failing after 2s
CI (Linux) / stable - x86_64-unknown-linux-gnu (push) Failing after 2s
CI (Linux) / 1.75.0 - x86_64-unknown-linux-gnu (push) Failing after 2s
2025-03-17 22:03:55 +01:00
Nikolay Kim
11734e8f1b Run neon tests forst 2025-03-17 04:57:28 +01:00
Nikolay Kim
5426790eb0
Remove brotli support (#531) 2025-03-16 12:38:09 +01:00
Nikolay Kim
7417ee3a4b
Allow to run publish future to completion in case error (#529) 2025-03-16 12:11:01 +01:00
Nikolay Kim
1f71b200ad
Close FD in various case for poll driver (#530) 2025-03-16 12:09:09 +01:00
Nikolay Kim
f15c3203b1
Fix operation cancelation handling for poll driver (#528) 2025-03-15 01:19:35 +05:00
Nikolay Kim
a83ed4c3fa Fix op cancelation in poll driver 2025-03-14 23:04:33 +05:00
Nikolay Kim
ae5980cdd9
Fix operation cancelation handling for io-uring driver (#527) 2025-03-14 22:46:48 +05:00
Nikolay Kim
5db953cea5 no-test-logging impl 2025-03-14 20:11:28 +05:00
Nikolay Kim
d3f9275f7a
Prep ntex release (#526) 2025-03-14 20:07:57 +05:00
Nikolay Kim
fe108f30c9
Fix socket connect for io-uring driver (#525) 2025-03-14 20:05:22 +05:00
Nikolay Kim
14d2634e3d
Add readiness checks (#524) 2025-03-14 15:39:43 +05:00
Nikolay Kim
81eaf88752
Remove neon::net dep (#523) 2025-03-14 15:13:31 +05:00
Nikolay Kim
9a8a2b3216
Check test server availability before using it (#522) 2025-03-14 14:08:43 +05:00
Nikolay Kim
ab5fb624b7
Use new values api (#521) 2025-03-13 17:42:59 +05:00
Nikolay Kim
cfc32ed74f
Update neon apis (#520) 2025-03-13 16:17:19 +05:00
Nikolay Kim
ecfc2936b5
ntex-rt improvements (#519) 2025-03-13 02:07:26 +05:00
Nikolay Kim
2db266ca0c Use release for neon 2025-03-12 15:14:46 +05:00
Nikolay Kim
12afaa00ea
Refactor uring feature (#518) 2025-03-12 15:12:28 +05:00
Nikolay Kim
db16b71c5f
fmt (#517) 2025-03-12 01:15:32 +05:00
jamescarterbell
dcc08b72d8
Feature/add spawn with (#516)
* Adds send bound to arbiter exec

* Adds spawn with function


---------

Co-authored-by: James Bell <jamesbell@microsoft.com>
2025-03-12 00:31:20 +05:00
Nikolay Kim
9c78181c7b fix features 2025-03-12 00:25:32 +05:00
Nikolay Kim
b8f8d637b0 fix features 2025-03-11 23:57:27 +05:00
Nikolay Kim
60a686b2f6
Add io-uring driver (#515) 2025-03-11 23:31:41 +05:00
Nikolay Kim
47afec7351 Merge release 2025-03-11 14:44:57 +05:00
Nikolay Kim
6a0aa33504 Prepare release 2025-03-11 14:41:39 +05:00
jamescarterbell
f520e88dd7 Adds send bound to arbiter exec (#514)
Co-authored-by: James Bell <jamesbell@microsoft.com>
2025-03-11 14:40:36 +05:00
jamescarterbell
8cfe0e50b1
Adds send bound to arbiter exec (#514)
Co-authored-by: James Bell <jamesbell@microsoft.com>
2025-03-11 14:39:22 +05:00
Nikolay Kim
e6a25db7ee Fix neon compat 2025-03-10 12:42:59 +05:00
Nikolay Kim
4e77e9ce24
Rename runtime to neon (#513) 2025-03-10 12:06:13 +05:00
Nikolay Kim
8ffa646af7
Drop async-std support (#512) 2025-03-09 18:53:47 +05:00
Nikolay Kim
59ffd17b91
Drop glommio support (#511) 2025-03-09 18:19:34 +05:00
Nikolay Kim
4c1bc3249b
Experimental poll based runtime (#510) 2025-03-09 18:11:33 +05:00
Nikolay Kim
3e5211eb79
Prep release (#509) 2025-02-28 13:46:02 -05:00
ldseraph
415711222e
Fix set core affinity out of worker start. (#508)
Co-authored-by: ldseraph <ldseraph@163.com>
2025-02-28 13:40:41 -05:00
Nikolay Kim
ae9d4ab331
Prepare release (#507) 2025-02-26 22:54:21 -03:00
jamescarterbell
9b7d001f4f
Add Filter impl to seal (#506)
* Add Filter impl to seal

* Version bump

* Fmt

---------

Co-authored-by: James Bell <jamesbell@microsoft.com>
2025-02-26 22:21:22 -03:00
Nikolay Kim
bbbb7a393e Use core-affinity for core count detection 2025-01-31 20:43:41 +05:00
Nikolay Kim
ba57330058
Cpu affinity support for server workers (#505) 2025-01-31 20:39:41 +05:00
Nikolay Kim
282e3224cd
Allow to access io write destination buffer (#504) 2025-01-21 21:32:31 +05:00
Nikolay Kim
5b11a3e30e
Clippy fixes (#503) 2025-01-15 18:27:00 +05:00
Nikolay Kim
4d4ab811bd
Retry middleware (#502) 2025-01-15 17:13:50 +05:00
Nikolay Kim
451f546a13
Future on-drop helper (#501) 2025-01-15 16:06:03 +05:00
Nikolay Kim
71ba4d28a3
Add EitherService/EitherServiceFactory (#500) 2025-01-15 15:59:26 +05:00
Nikolay Kim
cd56883197
Clippy (#499) 2025-01-14 21:58:10 +05:00
Nikolay Kim
44fcfd62ff
Fix status badge (#498) 2025-01-14 21:45:17 +05:00
Nikolay Kim
69b0fe49d7
Relax runtime requirements (#495) 2025-01-03 22:17:16 +05:00
Nikolay Kim
5fd9d7ce90
Enable rustls/std feature (#494) 2024-12-30 14:40:31 +01:00
metent
48702413f3
Add build checks (#491) 2024-12-30 14:05:48 +01:00
Nikolay Kim
a049a05c33
Fix error log (#493) 2024-12-30 12:02:43 +01:00
Nikolay Kim
80676aa242
Tune shutdown logging (#489) 2024-12-26 10:05:25 +01:00
Nikolay Kim
b5be9502b4
Remove Unpin requirements for Arbiter::spawn() (#485) 2024-12-10 19:57:10 +05:00
Nikolay Kim
22ee7f2af2
Better io error handling (#482) 2024-12-05 14:02:59 +05:00
Nikolay Kim
2631e70a4b Fix tests 2024-12-04 15:28:02 +05:00
Nikolay Kim
e222832270 Check service readiness for every turn 2024-12-04 15:23:20 +05:00
120 changed files with 4164 additions and 2110 deletions

View file

@ -3,6 +3,17 @@ name: Checks
on: [push, pull_request] on: [push, pull_request]
jobs: jobs:
check:
name: Check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: stable
- run:
cargo check --tests --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
clippy: clippy:
name: Clippy name: Clippy
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -13,7 +24,7 @@ jobs:
toolchain: stable toolchain: stable
components: clippy components: clippy
- run: - run:
cargo test --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" cargo clippy --tests --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
fmt: fmt:
name: Rustfmt name: Rustfmt

View file

@ -8,11 +8,6 @@ jobs:
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
steps: steps:
- name: Free Disk Space
uses: jlumbroso/free-disk-space@main
with:
tool-cache: true
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Install Rust - name: Install Rust
run: rustup update nightly run: rustup update nightly
@ -26,18 +21,20 @@ jobs:
- name: Clean coverage results - name: Clean coverage results
run: cargo llvm-cov clean --workspace run: cargo llvm-cov clean --workspace
- name: Code coverage (glommio) - name: Code coverage (neon)
continue-on-error: true run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/neon,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/glommio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" -- --skip test_unhandled_data
- name: Code coverage (neon-uring)
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/neon-uring,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Code coverage (tokio) - name: Code coverage (tokio)
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Code coverage (compio) - name: Code coverage (compio)
run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" run: cargo llvm-cov --no-report --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Generate coverage report - name: Generate coverage report
run: cargo llvm-cov report --lcov --output-path lcov.info --ignore-filename-regex="ntex-compio|ntex-tokio|ntex-glommio|ntex-async-std" run: cargo llvm-cov report --lcov --output-path lcov.info --ignore-filename-regex="ntex-compio|ntex-tokio"
- name: Upload coverage to Codecov - name: Upload coverage to Codecov
uses: codecov/codecov-action@v4 uses: codecov/codecov-action@v4

View file

@ -16,11 +16,6 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Free Disk Space
uses: jlumbroso/free-disk-space@main
with:
tool-cache: true
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Install ${{ matrix.version }} - name: Install ${{ matrix.version }}
@ -44,21 +39,25 @@ jobs:
path: ~/.cargo/git path: ~/.cargo/git
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-index-trimmed-${{ hashFiles('**/Cargo.lock') }} key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-index-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: Run tests (neon)
timeout-minutes: 40
run: |
cargo test --all --no-default-features --features="ntex/neon,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Run tests (neon-uring)
timeout-minutes: 40
run: |
cargo test --all --no-default-features --features="ntex/neon-uring,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Run tests (tokio) - name: Run tests (tokio)
timeout-minutes: 40 timeout-minutes: 40
run: | run: |
cargo test --all --no-fail-fast --no-default-features --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" cargo test --all --no-fail-fast --no-default-features --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Run tests (compio) - name: Run tests (compio)
timeout-minutes: 40 timeout-minutes: 40
run: | run: |
cargo test --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" cargo test --all --no-default-features --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Run tests (async-std)
timeout-minutes: 40
continue-on-error: true
run: |
cargo test --all --no-default-features --features="ntex/async-std,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli"
- name: Install cargo-cache - name: Install cargo-cache
continue-on-error: true continue-on-error: true

View file

@ -37,12 +37,16 @@ jobs:
path: ~/.cargo/git path: ~/.cargo/git
key: ${{ matrix.version }}-aarch64-apple-darwin-cargo-index-trimmed-${{ hashFiles('**/Cargo.lock') }} key: ${{ matrix.version }}-aarch64-apple-darwin-cargo-index-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: Run tests (neon)
timeout-minutes: 40
run: cargo test --all --no-default-features --no-fail-fast --features="ntex/neon,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Run tests (tokio) - name: Run tests (tokio)
run: cargo test --all --no-default-features --no-fail-fast --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" run: cargo test --all --no-default-features --no-fail-fast --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Run tests (compio) - name: Run tests (compio)
timeout-minutes: 40 timeout-minutes: 40
run: cargo test --all --no-default-features --no-fail-fast --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" run: cargo test --all --no-default-features --no-fail-fast --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws"
- name: Install cargo-cache - name: Install cargo-cache
continue-on-error: true continue-on-error: true

View file

@ -63,8 +63,8 @@ jobs:
- name: Run tests (tokio) - name: Run tests (tokio)
run: | run: |
cargo test --all --lib --no-default-features --no-fail-fast --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" -- --skip test_timer cargo test --all --lib --no-default-features --no-fail-fast --features="ntex/tokio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws" -- --skip test_timer
- name: Run tests (compio) - name: Run tests (compio)
run: | run: |
cargo test --all --lib --no-default-features --no-fail-fast --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws,ntex/brotli" -- --skip test_timer cargo test --all --lib --no-default-features --no-fail-fast --features="ntex/compio,ntex/cookie,ntex/url,ntex/compress,ntex/openssl,ntex/rustls,ntex/ws" -- --skip test_timer

View file

@ -15,12 +15,18 @@ members = [
"ntex-macros", "ntex-macros",
"ntex-util", "ntex-util",
"ntex-async-std",
"ntex-compio", "ntex-compio",
"ntex-glommio",
"ntex-tokio", "ntex-tokio",
] ]
[workspace.package]
authors = ["ntex contributors <team@ntex.rs>"]
repository = "https://github.com/ntex-rs/ntex"
documentation = "https://docs.rs/ntex/"
license = "MIT OR Apache-2.0"
edition = "2021"
rust-version = "1.75"
[patch.crates-io] [patch.crates-io]
ntex = { path = "ntex" } ntex = { path = "ntex" }
ntex-bytes = { path = "ntex-bytes" } ntex-bytes = { path = "ntex-bytes" }
@ -37,6 +43,28 @@ ntex-macros = { path = "ntex-macros" }
ntex-util = { path = "ntex-util" } ntex-util = { path = "ntex-util" }
ntex-compio = { path = "ntex-compio" } ntex-compio = { path = "ntex-compio" }
ntex-glommio = { path = "ntex-glommio" }
ntex-tokio = { path = "ntex-tokio" } ntex-tokio = { path = "ntex-tokio" }
ntex-async-std = { path = "ntex-async-std" }
[workspace.dependencies]
async-channel = "2"
async-task = "4.5.0"
atomic-waker = "1.1"
core_affinity = "0.8"
bitflags = "2"
cfg_aliases = "0.2.1"
cfg-if = "1.0.0"
crossbeam-channel = "0.5.8"
crossbeam-queue = "0.3.8"
futures-util = "0.3.29"
fxhash = "0.2"
libc = "0.2.164"
log = "0.4"
io-uring = "0.7.4"
oneshot = "0.1"
polling = "3.7.4"
nohash-hasher = "0.2.0"
scoped-tls = "1.0.1"
slab = "0.4.9"
socket2 = "0.5.6"
windows-sys = "0.52.0"
thiserror = "1"

View file

@ -3,7 +3,7 @@
<p><strong>Framework for composable network services.</strong> </p> <p><strong>Framework for composable network services.</strong> </p>
<p> <p>
[![build status](https://github.com/ntex-rs/ntex/workflows/CI%20%28Linux%29/badge.svg?branch=master&event=push)](https://github.com/ntex-rs/ntex/actions?query=workflow%3A"CI+(Linux)") [![build status](https://github.com/ntex-rs/ntex/actions/workflows/linux.yml/badge.svg?branch=master&event=push)](https://github.com/ntex-rs/ntex/actions/workflows/linux.yml/badge.svg)
[![crates.io](https://img.shields.io/crates/v/ntex.svg)](https://crates.io/crates/ntex) [![crates.io](https://img.shields.io/crates/v/ntex.svg)](https://crates.io/crates/ntex)
[![Documentation](https://img.shields.io/docsrs/ntex/latest)](https://docs.rs/ntex) [![Documentation](https://img.shields.io/docsrs/ntex/latest)](https://docs.rs/ntex)
[![Version](https://img.shields.io/badge/rustc-1.75+-lightgray.svg)](https://blog.rust-lang.org/2023/12/28/Rust-1.75.0.html) [![Version](https://img.shields.io/badge/rustc-1.75+-lightgray.svg)](https://blog.rust-lang.org/2023/12/28/Rust-1.75.0.html)
@ -18,18 +18,18 @@
| Platform | Build Status | | Platform | Build Status |
| ---------------- | ------------ | | ---------------- | ------------ |
| Linux | [![build status](https://github.com/ntex-rs/ntex/workflows/CI%20%28Linux%29/badge.svg?branch=master&event=push)](https://github.com/ntex-rs/ntex/actions?query=workflow%3A"CI+(Linux)") | | Linux | [![build status](https://github.com/ntex-rs/ntex/actions/workflows/linux.yml/badge.svg?branch=master&event=push)](https://github.com/ntex-rs/ntex/actions/workflows/linux.yml/badge.svg) |
| macOS | [![build status](https://github.com/ntex-rs/ntex/workflows/CI%20%28OSX%29/badge.svg?branch=master&event=push)](https://github.com/ntex-rs/ntex/actions?query=workflow%3A"CI+(OSX)") | | macOS | [![build status](https://github.com/ntex-rs/ntex/actions/workflows/osx.yml/badge.svg?branch=master&event=push)](https://github.com/ntex-rs/ntex/actions/workflows/osx.yml/badge.svg) |
| Windows | [![build status](https://github.com/ntex-rs/ntex/workflows/CI%20%28Windows%29/badge.svg?branch=master&event=push)](https://github.com/ntex-rs/ntex/actions?query=workflow%3A"CI+(Windows)") | | Windows | [![build status](https://github.com/ntex-rs/ntex/actions/workflows/windows.yml/badge.svg?branch=master&event=push)](https://github.com/ntex-rs/ntex/actions/workflows/windows.yml/badge.svg) |
## Usage ## Usage
ntex supports multiple async runtimes, runtime must be selected as a feature. Available options are `compio`, `tokio`, ntex supports multiple async runtimes, runtime must be selected as a feature. Available options are `compio`, `tokio`,
`glommio` or `async-std`. `neon` or `neon-uring`.
```toml ```toml
[dependencies] [dependencies]
ntex = { version = "2", features = ["tokio"] } ntex = { version = "2", features = ["compio"] }
``` ```
## Documentation & community resources ## Documentation & community resources

View file

@ -1,45 +0,0 @@
# Changes
## [0.4.0] - 2024-01-09
* Release
## [0.4.0-b.0] - 2024-01-07
* Use "async fn" in trait for Service definition
## [0.3.2] - 2023-11-22
* Replace async-oneshot with oneshot
## [0.3.1] - 2023-11-12
* Optimize io read task
## [0.3.0] - 2023-06-22
* Release v0.3.0
## [0.3.0-beta.0] - 2023-06-16
* Migrate to ntex-service 1.2
## [0.2.2] - 2023-01-26
* Update io api usage
## [0.2.0] - 2023-01-04
* Release
## [0.2.0-beta.0] - 2022-12-28
* Migrate to ntex-service 1.0
## [0.1.1] - 2022-01-30
* Update to ntex-io 0.1.7
## [0.1.0] - 2022-01-03
* Initial release

View file

@ -1,24 +0,0 @@
[package]
name = "ntex-async-std"
version = "0.5.1"
authors = ["ntex contributors <team@ntex.rs>"]
description = "async-std intergration for ntex framework"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://ntex.rs"
repository = "https://github.com/ntex-rs/ntex.git"
documentation = "https://docs.rs/ntex-rt-async-std/"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2021"
[lib]
name = "ntex_async_std"
path = "src/lib.rs"
[dependencies]
ntex-bytes = "0.1"
ntex-io = "2.5"
ntex-util = "2.0"
log = "0.4"
async-std = { version = "1", features = ["unstable"] }
oneshot = { version = "0.1", default-features = false, features = ["async"] }

View file

@ -1 +0,0 @@
../LICENSE-APACHE

View file

@ -1 +0,0 @@
../LICENSE-MIT

View file

@ -1,220 +0,0 @@
use std::{
any, cell::RefCell, future::poll_fn, io, pin::Pin, task::ready, task::Context,
task::Poll,
};
use async_std::io::{Read as ARead, Write as AWrite};
use ntex_bytes::{Buf, BufMut, BytesVec};
use ntex_io::{types, Handle, IoStream, ReadContext, WriteContext, WriteContextBuf};
use crate::TcpStream;
impl IoStream for TcpStream {
fn start(self, read: ReadContext, write: WriteContext) -> Option<Box<dyn Handle>> {
let mut rio = Read(RefCell::new(self.clone()));
async_std::task::spawn_local(async move {
read.handle(&mut rio).await;
});
let mut wio = Write(RefCell::new(self.clone()));
async_std::task::spawn_local(async move {
write.handle(&mut wio).await;
});
Some(Box::new(self))
}
}
impl Handle for TcpStream {
fn query(&self, id: any::TypeId) -> Option<Box<dyn any::Any>> {
if id == any::TypeId::of::<types::PeerAddr>() {
if let Ok(addr) = self.0.peer_addr() {
return Some(Box::new(types::PeerAddr(addr)));
}
}
None
}
}
/// Read io task
struct Read(RefCell<TcpStream>);
impl ntex_io::AsyncRead for Read {
async fn read(&mut self, mut buf: BytesVec) -> (BytesVec, io::Result<usize>) {
// read data from socket
let result = poll_fn(|cx| {
let mut io = self.0.borrow_mut();
poll_read_buf(Pin::new(&mut io.0), cx, &mut buf)
})
.await;
(buf, result)
}
}
struct Write(RefCell<TcpStream>);
impl ntex_io::AsyncWrite for Write {
#[inline]
async fn write(&mut self, buf: &mut WriteContextBuf) -> io::Result<()> {
poll_fn(|cx| {
if let Some(mut b) = buf.take() {
let result = flush_io(&mut self.0.borrow_mut().0, &mut b, cx);
buf.set(b);
result
} else {
Poll::Ready(Ok(()))
}
})
.await
}
#[inline]
async fn flush(&mut self) -> io::Result<()> {
Ok(())
}
#[inline]
async fn shutdown(&mut self) -> io::Result<()> {
self.0.borrow().0.shutdown(std::net::Shutdown::Both)
}
}
/// Flush write buffer to underlying I/O stream.
pub(super) fn flush_io<T: ARead + AWrite + Unpin>(
io: &mut T,
buf: &mut BytesVec,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
let len = buf.len();
if len != 0 {
// log::trace!("flushing framed transport: {:?}", buf.len());
let mut written = 0;
let result = loop {
break match Pin::new(&mut *io).poll_write(cx, &buf[written..]) {
Poll::Ready(Ok(n)) => {
if n == 0 {
log::trace!("Disconnected during flush, written {}", written);
Poll::Ready(Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write frame to transport",
)))
} else {
written += n;
if written == len {
buf.clear();
Poll::Ready(Ok(()))
} else {
continue;
}
}
}
Poll::Pending => {
// remove written data
buf.advance(written);
Poll::Pending
}
Poll::Ready(Err(e)) => {
log::trace!("Error during flush: {}", e);
Poll::Ready(Err(e))
}
};
};
// log::trace!("flushed {} bytes", written);
// flush
if written > 0 {
match Pin::new(&mut *io).poll_flush(cx) {
Poll::Ready(Ok(_)) => result,
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => {
log::trace!("error during flush: {}", e);
Poll::Ready(Err(e))
}
}
} else {
result
}
} else {
Poll::Ready(Ok(()))
}
}
pub fn poll_read_buf<T: ARead>(
io: Pin<&mut T>,
cx: &mut Context<'_>,
buf: &mut BytesVec,
) -> Poll<io::Result<usize>> {
let dst = unsafe { &mut *(buf.chunk_mut() as *mut _ as *mut [u8]) };
let n = ready!(io.poll_read(cx, dst))?;
// Safety: This is guaranteed to be the number of initialized (and read)
// bytes due to the invariants provided by Read::poll_read() api
unsafe {
buf.advance_mut(n);
}
Poll::Ready(Ok(n))
}
#[cfg(unix)]
mod unixstream {
use super::*;
use crate::UnixStream;
impl IoStream for UnixStream {
fn start(self, read: ReadContext, write: WriteContext) -> Option<Box<dyn Handle>> {
let mut rio = Read(RefCell::new(self.clone()));
async_std::task::spawn_local(async move {
read.handle(&mut rio).await;
});
let mut wio = Write(RefCell::new(self));
async_std::task::spawn_local(async move {
write.handle(&mut wio).await;
});
None
}
}
/// Read io task
struct Read(RefCell<UnixStream>);
impl ntex_io::AsyncRead for Read {
async fn read(&mut self, mut buf: BytesVec) -> (BytesVec, io::Result<usize>) {
// read data from socket
let result = poll_fn(|cx| {
let mut io = self.0.borrow_mut();
poll_read_buf(Pin::new(&mut io.0), cx, &mut buf)
})
.await;
(buf, result)
}
}
struct Write(RefCell<UnixStream>);
impl ntex_io::AsyncWrite for Write {
#[inline]
async fn write(&mut self, buf: &mut WriteContextBuf) -> io::Result<()> {
poll_fn(|cx| {
if let Some(mut b) = buf.take() {
let result = flush_io(&mut self.0.borrow_mut().0, &mut b, cx);
buf.set(b);
result
} else {
Poll::Ready(Ok(()))
}
})
.await
}
#[inline]
async fn flush(&mut self) -> io::Result<()> {
Ok(())
}
#[inline]
async fn shutdown(&mut self) -> io::Result<()> {
self.0.borrow().0.shutdown(std::net::Shutdown::Both)
}
}
}

View file

@ -1,64 +0,0 @@
use std::{io::Result, net, net::SocketAddr};
use ntex_bytes::PoolRef;
use ntex_io::Io;
mod io;
mod signals;
pub use self::signals::{signal, Signal};
#[derive(Clone)]
struct TcpStream(async_std::net::TcpStream);
#[cfg(unix)]
#[derive(Clone)]
struct UnixStream(async_std::os::unix::net::UnixStream);
/// Opens a TCP connection to a remote host.
pub async fn tcp_connect(addr: SocketAddr) -> Result<Io> {
let sock = async_std::net::TcpStream::connect(addr).await?;
sock.set_nodelay(true)?;
Ok(Io::new(TcpStream(sock)))
}
/// Opens a TCP connection to a remote host and use specified memory pool.
pub async fn tcp_connect_in(addr: SocketAddr, pool: PoolRef) -> Result<Io> {
let sock = async_std::net::TcpStream::connect(addr).await?;
sock.set_nodelay(true)?;
Ok(Io::with_memory_pool(TcpStream(sock), pool))
}
#[cfg(unix)]
/// Opens a unix stream connection.
pub async fn unix_connect<P>(addr: P) -> Result<Io>
where
P: AsRef<async_std::path::Path>,
{
let sock = async_std::os::unix::net::UnixStream::connect(addr).await?;
Ok(Io::new(UnixStream(sock)))
}
#[cfg(unix)]
/// Opens a unix stream connection and specified memory pool.
pub async fn unix_connect_in<P>(addr: P, pool: PoolRef) -> Result<Io>
where
P: AsRef<async_std::path::Path>,
{
let sock = async_std::os::unix::net::UnixStream::connect(addr).await?;
Ok(Io::with_memory_pool(UnixStream(sock), pool))
}
/// Convert std TcpStream to async-std's TcpStream
pub fn from_tcp_stream(stream: net::TcpStream) -> Result<Io> {
stream.set_nonblocking(true)?;
stream.set_nodelay(true)?;
Ok(Io::new(TcpStream(async_std::net::TcpStream::from(stream))))
}
#[cfg(unix)]
/// Convert std UnixStream to async-std's UnixStream
pub fn from_unix_stream(stream: std::os::unix::net::UnixStream) -> Result<Io> {
stream.set_nonblocking(true)?;
Ok(Io::new(UnixStream(From::from(stream))))
}

View file

@ -1,50 +0,0 @@
use std::{cell::RefCell, future::Future, pin::Pin, rc::Rc, task::Context, task::Poll};
thread_local! {
static SRUN: RefCell<bool> = const { RefCell::new(false) };
static SHANDLERS: Rc<RefCell<Vec<oneshot::Sender<Signal>>>> = Default::default();
}
/// Different types of process signals
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum Signal {
/// SIGHUP
Hup,
/// SIGINT
Int,
/// SIGTERM
Term,
/// SIGQUIT
Quit,
}
/// Register signal handler.
///
/// Signals are handled by oneshots, you have to re-register
/// after each signal.
pub fn signal() -> Option<oneshot::Receiver<Signal>> {
if !SRUN.with(|v| *v.borrow()) {
async_std::task::spawn_local(Signals::new());
}
SHANDLERS.with(|handlers| {
let (tx, rx) = oneshot::channel();
handlers.borrow_mut().push(tx);
Some(rx)
})
}
struct Signals {}
impl Signals {
pub(super) fn new() -> Signals {
Self {}
}
}
impl Future for Signals {
type Output = ();
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(())
}
}

View file

@ -3779,7 +3779,7 @@ impl<const N: usize> PartialEq<BytesMut> for [u8; N] {
} }
} }
impl<'a, const N: usize> PartialEq<BytesMut> for &'a [u8; N] { impl<const N: usize> PartialEq<BytesMut> for &[u8; N] {
fn eq(&self, other: &BytesMut) -> bool { fn eq(&self, other: &BytesMut) -> bool {
*other == *self *other == *self
} }
@ -3878,7 +3878,7 @@ impl<const N: usize> PartialEq<Bytes> for [u8; N] {
} }
} }
impl<'a, const N: usize> PartialEq<Bytes> for &'a [u8; N] { impl<const N: usize> PartialEq<Bytes> for &[u8; N] {
fn eq(&self, other: &Bytes) -> bool { fn eq(&self, other: &Bytes) -> bool {
*other == *self *other == *self
} }
@ -4076,7 +4076,7 @@ impl<const N: usize> PartialEq<BytesVec> for [u8; N] {
} }
} }
impl<'a, const N: usize> PartialEq<BytesVec> for &'a [u8; N] { impl<const N: usize> PartialEq<BytesVec> for &[u8; N] {
fn eq(&self, other: &BytesVec) -> bool { fn eq(&self, other: &BytesVec) -> bool {
*other == *self *other == *self
} }

View file

@ -3,7 +3,7 @@ use std::fmt::{Formatter, LowerHex, Result, UpperHex};
struct BytesRef<'a>(&'a [u8]); struct BytesRef<'a>(&'a [u8]);
impl<'a> LowerHex for BytesRef<'a> { impl LowerHex for BytesRef<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result { fn fmt(&self, f: &mut Formatter<'_>) -> Result {
for b in self.0 { for b in self.0 {
write!(f, "{b:02x}")?; write!(f, "{b:02x}")?;
@ -12,7 +12,7 @@ impl<'a> LowerHex for BytesRef<'a> {
} }
} }
impl<'a> UpperHex for BytesRef<'a> { impl UpperHex for BytesRef<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result { fn fmt(&self, f: &mut Formatter<'_>) -> Result {
for b in self.0 { for b in self.0 {
write!(f, "{b:02X}")?; write!(f, "{b:02X}")?;

View file

@ -1,57 +0,0 @@
# Changes
## [0.5.2] - 2024-09-xx
* Update to glommio v0.9
## [0.4.0] - 2024-01-09
* Release
## [0.4.0-b.0] - 2024-01-07
* Use "async fn" in trait for Service definition
## [0.3.1] - 2023-11-22
* Replace async-oneshot with oneshot
## [0.3.0] - 2023-06-22
* Release v0.3.0
## [0.3.0-beta.0] - 2023-06-16
* Migrate to ntex-service 1.2
## [0.2.4] - 2023-05-30
* Fix borrow mut panic #204
## [0.2.3] - 2023-04-11
* Chore upgrade glommio to 0.8
## [0.2.2] - 2023-01-26
* Update io api usage
## [0.2.0] - 2023-01-04
* Release
## [0.2.0-beta.0] - 2022-12-28
* Migrate to ntex-service 1.0
## [0.1.2] - 2022-02-20
* Upgrade to glommio 0.7
## [0.1.1] - 2022-01-30
* Update to ntex-io 0.1.7
## [0.1.0] - 2022-01-17
* Initial release

View file

@ -1,27 +0,0 @@
[package]
name = "ntex-glommio"
version = "0.5.2"
authors = ["ntex contributors <team@ntex.rs>"]
description = "glommio intergration for ntex framework"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://ntex.rs"
repository = "https://github.com/ntex-rs/ntex.git"
documentation = "https://docs.rs/ntex-rt-glommio/"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2021"
[lib]
name = "ntex_glommio"
path = "src/lib.rs"
[dependencies]
ntex-bytes = "0.1"
ntex-io = "2.5"
ntex-util = "2.0"
futures-lite = "2.2"
log = "0.4"
oneshot = { version = "0.1", default-features = false, features = ["async"] }
[target.'cfg(target_os = "linux")'.dependencies]
glommio = "0.9"

View file

@ -1 +0,0 @@
../LICENSE-APACHE

View file

@ -1 +0,0 @@
../LICENSE-MIT

View file

@ -1,205 +0,0 @@
use std::{any, future::poll_fn, io, pin::Pin, task::ready, task::Context, task::Poll};
use futures_lite::io::{AsyncRead, AsyncWrite};
use ntex_bytes::{Buf, BufMut, BytesVec};
use ntex_io::{types, Handle, IoStream, ReadContext, WriteContext, WriteContextBuf};
use crate::net_impl::{TcpStream, UnixStream};
impl IoStream for TcpStream {
fn start(self, read: ReadContext, write: WriteContext) -> Option<Box<dyn Handle>> {
let mut rio = Read(self.clone());
glommio::spawn_local(async move { read.handle(&mut rio).await }).detach();
let mut wio = Write(self.clone());
glommio::spawn_local(async move { write.handle(&mut wio).await }).detach();
Some(Box::new(self))
}
}
impl IoStream for UnixStream {
fn start(self, read: ReadContext, write: WriteContext) -> Option<Box<dyn Handle>> {
let mut rio = UnixRead(self.clone());
glommio::spawn_local(async move {
read.handle(&mut rio).await;
})
.detach();
let mut wio = UnixWrite(self);
glommio::spawn_local(async move { write.handle(&mut wio).await }).detach();
None
}
}
impl Handle for TcpStream {
fn query(&self, id: any::TypeId) -> Option<Box<dyn any::Any>> {
if id == any::TypeId::of::<types::PeerAddr>() {
if let Ok(addr) = self.0.borrow().peer_addr() {
return Some(Box::new(types::PeerAddr(addr)));
}
}
None
}
}
/// Read io task
struct Read(TcpStream);
impl ntex_io::AsyncRead for Read {
async fn read(&mut self, mut buf: BytesVec) -> (BytesVec, io::Result<usize>) {
// read data from socket
let result = poll_fn(|cx| {
let mut io = self.0 .0.borrow_mut();
poll_read_buf(Pin::new(&mut *io), cx, &mut buf)
})
.await;
(buf, result)
}
}
struct Write(TcpStream);
impl ntex_io::AsyncWrite for Write {
#[inline]
async fn write(&mut self, buf: &mut WriteContextBuf) -> io::Result<()> {
poll_fn(|cx| {
if let Some(mut b) = buf.take() {
let result = flush_io(&mut *self.0 .0.borrow_mut(), &mut b, cx);
buf.set(b);
result
} else {
Poll::Ready(Ok(()))
}
})
.await
}
#[inline]
async fn flush(&mut self) -> io::Result<()> {
Ok(())
}
#[inline]
async fn shutdown(&mut self) -> io::Result<()> {
poll_fn(|cx| Pin::new(&mut *self.0 .0.borrow_mut()).poll_close(cx)).await
}
}
struct UnixRead(UnixStream);
impl ntex_io::AsyncRead for UnixRead {
async fn read(&mut self, mut buf: BytesVec) -> (BytesVec, io::Result<usize>) {
// read data from socket
let result = poll_fn(|cx| {
let mut io = self.0 .0.borrow_mut();
poll_read_buf(Pin::new(&mut *io), cx, &mut buf)
})
.await;
(buf, result)
}
}
struct UnixWrite(UnixStream);
impl ntex_io::AsyncWrite for UnixWrite {
#[inline]
async fn write(&mut self, buf: &mut WriteContextBuf) -> io::Result<()> {
poll_fn(|cx| {
if let Some(mut b) = buf.take() {
let result = flush_io(&mut *self.0 .0.borrow_mut(), &mut b, cx);
buf.set(b);
result
} else {
Poll::Ready(Ok(()))
}
})
.await
}
#[inline]
async fn flush(&mut self) -> io::Result<()> {
Ok(())
}
#[inline]
async fn shutdown(&mut self) -> io::Result<()> {
poll_fn(|cx| Pin::new(&mut *self.0 .0.borrow_mut()).poll_close(cx)).await
}
}
/// Flush write buffer to underlying I/O stream.
pub(super) fn flush_io<T: AsyncRead + AsyncWrite + Unpin>(
io: &mut T,
buf: &mut BytesVec,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
let len = buf.len();
if len != 0 {
// log::trace!("flushing framed transport: {:?}", buf.len());
let mut written = 0;
let result = loop {
break match Pin::new(&mut *io).poll_write(cx, &buf[written..]) {
Poll::Ready(Ok(n)) => {
if n == 0 {
log::trace!("Disconnected during flush, written {}", written);
Poll::Ready(Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write frame to transport",
)))
} else {
written += n;
if written == len {
buf.clear();
Poll::Ready(Ok(()))
} else {
continue;
}
}
}
Poll::Pending => {
// remove written data
buf.advance(written);
Poll::Pending
}
Poll::Ready(Err(e)) => {
log::trace!("Error during flush: {}", e);
Poll::Ready(Err(e))
}
};
};
// log::trace!("flushed {} bytes", written);
// flush
if written > 0 {
match Pin::new(&mut *io).poll_flush(cx) {
Poll::Ready(Ok(_)) => result,
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => {
log::trace!("error during flush: {}", e);
Poll::Ready(Err(e))
}
}
} else {
result
}
} else {
Poll::Ready(Ok(()))
}
}
pub fn poll_read_buf<T: AsyncRead>(
io: Pin<&mut T>,
cx: &mut Context<'_>,
buf: &mut BytesVec,
) -> Poll<io::Result<usize>> {
let dst = unsafe { &mut *(buf.chunk_mut() as *mut _ as *mut [u8]) };
let n = ready!(io.poll_read(cx, dst))?;
// Safety: This is guaranteed to be the number of initialized (and read)
// bytes due to the invariants provided by Read::poll_read() api
unsafe {
buf.advance_mut(n);
}
Poll::Ready(Ok(n))
}

View file

@ -1,90 +0,0 @@
#[cfg(target_os = "linux")]
mod io;
#[cfg(target_os = "linux")]
mod signals;
#[cfg(target_os = "linux")]
pub use self::signals::{signal, Signal};
#[cfg(target_os = "linux")]
mod net_impl {
use std::os::unix::io::{FromRawFd, IntoRawFd};
use std::{cell::RefCell, io::Result, net, net::SocketAddr, rc::Rc};
use ntex_bytes::PoolRef;
use ntex_io::Io;
#[derive(Clone)]
pub(crate) struct TcpStream(pub(crate) Rc<RefCell<glommio::net::TcpStream>>);
impl TcpStream {
fn new(io: glommio::net::TcpStream) -> Self {
Self(Rc::new(RefCell::new(io)))
}
}
#[derive(Clone)]
pub(crate) struct UnixStream(pub(crate) Rc<RefCell<glommio::net::UnixStream>>);
impl UnixStream {
fn new(io: glommio::net::UnixStream) -> Self {
Self(Rc::new(RefCell::new(io)))
}
}
/// Opens a TCP connection to a remote host.
pub async fn tcp_connect(addr: SocketAddr) -> Result<Io> {
let sock = glommio::net::TcpStream::connect(addr).await?;
sock.set_nodelay(true)?;
Ok(Io::new(TcpStream::new(sock)))
}
/// Opens a TCP connection to a remote host and use specified memory pool.
pub async fn tcp_connect_in(addr: SocketAddr, pool: PoolRef) -> Result<Io> {
let sock = glommio::net::TcpStream::connect(addr).await?;
sock.set_nodelay(true)?;
Ok(Io::with_memory_pool(TcpStream::new(sock), pool))
}
/// Opens a unix stream connection.
pub async fn unix_connect<P>(addr: P) -> Result<Io>
where
P: AsRef<std::path::Path>,
{
let sock = glommio::net::UnixStream::connect(addr).await?;
Ok(Io::new(UnixStream::new(sock)))
}
/// Opens a unix stream connection and specified memory pool.
pub async fn unix_connect_in<P>(addr: P, pool: PoolRef) -> Result<Io>
where
P: AsRef<std::path::Path>,
{
let sock = glommio::net::UnixStream::connect(addr).await?;
Ok(Io::with_memory_pool(UnixStream::new(sock), pool))
}
/// Convert std TcpStream to glommio's TcpStream
pub fn from_tcp_stream(stream: net::TcpStream) -> Result<Io> {
stream.set_nonblocking(true)?;
stream.set_nodelay(true)?;
unsafe {
Ok(Io::new(TcpStream::new(
glommio::net::TcpStream::from_raw_fd(stream.into_raw_fd()),
)))
}
}
/// Convert std UnixStream to glommio's UnixStream
pub fn from_unix_stream(stream: std::os::unix::net::UnixStream) -> Result<Io> {
stream.set_nonblocking(true)?;
unsafe {
Ok(Io::new(UnixStream::new(
glommio::net::UnixStream::from_raw_fd(stream.into_raw_fd()),
)))
}
}
}
#[cfg(target_os = "linux")]
pub use self::net_impl::*;

View file

@ -1,50 +0,0 @@
use std::{cell::RefCell, future::Future, pin::Pin, rc::Rc, task::Context, task::Poll};
thread_local! {
static SRUN: RefCell<bool> = const { RefCell::new(false) };
static SHANDLERS: Rc<RefCell<Vec<oneshot::Sender<Signal>>>> = Default::default();
}
/// Different types of process signals
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum Signal {
/// SIGHUP
Hup,
/// SIGINT
Int,
/// SIGTERM
Term,
/// SIGQUIT
Quit,
}
/// Register signal handler.
///
/// Signals are handled by oneshots, you have to re-register
/// after each signal.
pub fn signal() -> Option<oneshot::Receiver<Signal>> {
if !SRUN.with(|v| *v.borrow()) {
glommio::spawn_local(Signals::new()).detach();
}
SHANDLERS.with(|handlers| {
let (tx, rx) = oneshot::channel();
handlers.borrow_mut().push(tx);
Some(rx)
})
}
struct Signals {}
impl Signals {
pub(super) fn new() -> Signals {
Self {}
}
}
impl Future for Signals {
type Output = ();
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(())
}
}

View file

@ -354,13 +354,13 @@ impl AsName for HeaderName {
} }
} }
impl<'a> AsName for &'a HeaderName { impl AsName for &HeaderName {
fn as_name(&self) -> Either<&HeaderName, &str> { fn as_name(&self) -> Either<&HeaderName, &str> {
Either::Left(self) Either::Left(self)
} }
} }
impl<'a> AsName for &'a str { impl AsName for &str {
fn as_name(&self) -> Either<&HeaderName, &str> { fn as_name(&self) -> Either<&HeaderName, &str> {
Either::Right(self) Either::Right(self)
} }
@ -372,7 +372,7 @@ impl AsName for String {
} }
} }
impl<'a> AsName for &'a String { impl AsName for &String {
fn as_name(&self) -> Either<&HeaderName, &str> { fn as_name(&self) -> Either<&HeaderName, &str> {
Either::Right(self.as_str()) Either::Right(self.as_str())
} }

View file

@ -158,7 +158,7 @@ impl<'de> Deserialize<'de> for HeaderValue {
struct HeaderValueVisitor; struct HeaderValueVisitor;
impl<'de> Visitor<'de> for HeaderValueVisitor { impl Visitor<'_> for HeaderValueVisitor {
type Value = HeaderValue; type Value = HeaderValue;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {

View file

@ -641,14 +641,14 @@ impl PartialOrd<HeaderValue> for String {
} }
} }
impl<'a> PartialEq<HeaderValue> for &'a HeaderValue { impl PartialEq<HeaderValue> for &HeaderValue {
#[inline] #[inline]
fn eq(&self, other: &HeaderValue) -> bool { fn eq(&self, other: &HeaderValue) -> bool {
**self == *other **self == *other
} }
} }
impl<'a> PartialOrd<HeaderValue> for &'a HeaderValue { impl PartialOrd<HeaderValue> for &HeaderValue {
#[inline] #[inline]
fn partial_cmp(&self, other: &HeaderValue) -> Option<cmp::Ordering> { fn partial_cmp(&self, other: &HeaderValue) -> Option<cmp::Ordering> {
(**self).partial_cmp(other) (**self).partial_cmp(other)
@ -675,14 +675,14 @@ where
} }
} }
impl<'a> PartialEq<HeaderValue> for &'a str { impl PartialEq<HeaderValue> for &str {
#[inline] #[inline]
fn eq(&self, other: &HeaderValue) -> bool { fn eq(&self, other: &HeaderValue) -> bool {
*other == *self *other == *self
} }
} }
impl<'a> PartialOrd<HeaderValue> for &'a str { impl PartialOrd<HeaderValue> for &str {
#[inline] #[inline]
fn partial_cmp(&self, other: &HeaderValue) -> Option<cmp::Ordering> { fn partial_cmp(&self, other: &HeaderValue) -> Option<cmp::Ordering> {
self.as_bytes().partial_cmp(other.as_bytes()) self.as_bytes().partial_cmp(other.as_bytes())

View file

@ -1,5 +1,29 @@
# Changes # Changes
## [2.11.1] - 2025-03-20
* Add readiness check support
## [2.11.0] - 2025-03-10
* Add single io context
## [2.10.0] - 2025-02-26
* Impl Filter for Sealed #506
## [2.9.3] - 2025-01-21
* Allow to access io write destination buffer
## [2.9.2] - 2024-12-05
* Better error handling
## [2.9.1] - 2024-12-04
* Check service readiness for every turn
## [2.9.0] - 2024-12-04 ## [2.9.0] - 2024-12-04
* Use updated Service trait * Use updated Service trait

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ntex-io" name = "ntex-io"
version = "2.9.0" version = "2.11.1"
authors = ["ntex contributors <team@ntex.rs>"] authors = ["ntex contributors <team@ntex.rs>"]
description = "Utilities for encoding and decoding frames" description = "Utilities for encoding and decoding frames"
keywords = ["network", "framework", "async", "futures"] keywords = ["network", "framework", "async", "futures"]
@ -20,7 +20,6 @@ ntex-codec = "0.6"
ntex-bytes = "0.1" ntex-bytes = "0.1"
ntex-util = "2.8" ntex-util = "2.8"
ntex-service = "3.4" ntex-service = "3.4"
ntex-rt = "0.4"
bitflags = "2" bitflags = "2"
log = "0.4" log = "0.4"
@ -29,4 +28,3 @@ pin-project-lite = "0.2"
[dev-dependencies] [dev-dependencies]
ntex = "2" ntex = "2"
rand = "0.8" rand = "0.8"
env_logger = "0.11"

View file

@ -152,6 +152,27 @@ impl Stack {
} }
} }
pub(crate) fn with_read_source<F, R>(&self, io: &IoRef, f: F) -> R
where
F: FnOnce(&mut BytesVec) -> R,
{
let item = self.get_last_level();
let mut rb = item.0.take();
if rb.is_none() {
rb = Some(io.memory_pool().get_read_buf());
}
let result = f(rb.as_mut().unwrap());
if let Some(b) = rb {
if b.is_empty() {
io.memory_pool().release_read_buf(b);
} else {
item.0.set(Some(b));
}
}
result
}
pub(crate) fn with_read_destination<F, R>(&self, io: &IoRef, f: F) -> R pub(crate) fn with_read_destination<F, R>(&self, io: &IoRef, f: F) -> R
where where
F: FnOnce(&mut BytesVec) -> R, F: FnOnce(&mut BytesVec) -> R,
@ -218,12 +239,12 @@ impl Stack {
pub(crate) fn with_write_destination<F, R>(&self, io: &IoRef, f: F) -> R pub(crate) fn with_write_destination<F, R>(&self, io: &IoRef, f: F) -> R
where where
F: FnOnce(&mut Option<BytesVec>) -> R, F: FnOnce(Option<&mut BytesVec>) -> R,
{ {
let item = self.get_last_level(); let item = self.get_last_level();
let mut wb = item.1.take(); let mut wb = item.1.take();
let result = f(&mut wb); let result = f(wb.as_mut());
// check nested updates // check nested updates
if item.1.take().is_some() { if item.1.take().is_some() {
@ -300,7 +321,7 @@ pub struct ReadBuf<'a> {
pub(crate) need_write: Cell<bool>, pub(crate) need_write: Cell<bool>,
} }
impl<'a> ReadBuf<'a> { impl ReadBuf<'_> {
#[inline] #[inline]
/// Get io tag /// Get io tag
pub fn tag(&self) -> &'static str { pub fn tag(&self) -> &'static str {
@ -444,7 +465,7 @@ pub struct WriteBuf<'a> {
pub(crate) need_write: Cell<bool>, pub(crate) need_write: Cell<bool>,
} }
impl<'a> WriteBuf<'a> { impl WriteBuf<'_> {
#[inline] #[inline]
/// Get io tag /// Get io tag
pub fn tag(&self) -> &'static str { pub fn tag(&self) -> &'static str {

View file

@ -160,7 +160,6 @@ where
service: PipelineBinding<S, DispatchItem<U>>, service: PipelineBinding<S, DispatchItem<U>>,
error: Cell<Option<DispatcherError<S::Error, <U as Encoder>::Error>>>, error: Cell<Option<DispatcherError<S::Error, <U as Encoder>::Error>>>,
inflight: Cell<u32>, inflight: Cell<u32>,
ready: Cell<bool>,
} }
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
@ -222,7 +221,6 @@ where
codec, codec,
error: Cell::new(None), error: Cell::new(None),
inflight: Cell::new(0), inflight: Cell::new(0),
ready: Cell::new(false),
service: Pipeline::new(service.into_service()).bind(), service: Pipeline::new(service.into_service()).bind(),
}); });
@ -343,7 +341,6 @@ where
PollService::Continue => continue, PollService::Continue => continue,
}; };
slf.shared.ready.set(false);
slf.call_service(cx, item); slf.call_service(cx, item);
} }
// handle write back-pressure // handle write back-pressure
@ -473,16 +470,9 @@ where
} }
fn poll_service(&mut self, cx: &mut Context<'_>) -> Poll<PollService<U>> { fn poll_service(&mut self, cx: &mut Context<'_>) -> Poll<PollService<U>> {
if self.shared.ready.get() {
return Poll::Ready(self.check_error());
}
// wait until service becomes ready // wait until service becomes ready
match self.shared.service.poll_ready(cx) { match self.shared.service.poll_ready(cx) {
Poll::Ready(Ok(_)) => { Poll::Ready(Ok(_)) => Poll::Ready(self.check_error()),
self.shared.ready.set(true);
Poll::Ready(self.check_error())
}
// pause io read task // pause io read task
Poll::Pending => { Poll::Pending => {
log::trace!( log::trace!(
@ -720,7 +710,6 @@ mod tests {
io: state.into(), io: state.into(),
error: Cell::new(None), error: Cell::new(None),
inflight: Cell::new(0), inflight: Cell::new(0),
ready: Cell::new(false),
service: Pipeline::new(service).bind(), service: Pipeline::new(service).bind(),
}); });
@ -1255,6 +1244,8 @@ mod tests {
sleep(Millis(50)).await; sleep(Millis(50)).await;
if let DispatchItem::Item(msg) = msg { if let DispatchItem::Item(msg) = msg {
Ok::<_, ()>(Some(msg.freeze())) Ok::<_, ()>(Some(msg.freeze()))
} else if let DispatchItem::Disconnect(_) = msg {
Ok::<_, ()>(None)
} else { } else {
panic!() panic!()
} }

View file

@ -25,6 +25,8 @@ bitflags::bitflags! {
/// write task paused /// write task paused
const WR_PAUSED = 0b0000_0100_0000_0000; const WR_PAUSED = 0b0000_0100_0000_0000;
/// wait for write completion task
const WR_TASK_WAIT = 0b0000_1000_0000_0000;
/// dispatcher is marked stopped /// dispatcher is marked stopped
const DSP_STOP = 0b0001_0000_0000_0000; const DSP_STOP = 0b0001_0000_0000_0000;
@ -38,6 +40,10 @@ impl Flags {
self.intersects(Flags::IO_STOPPED) self.intersects(Flags::IO_STOPPED)
} }
pub(crate) fn is_task_waiting_for_write(&self) -> bool {
self.contains(Flags::WR_TASK_WAIT)
}
pub(crate) fn is_waiting_for_write(&self) -> bool { pub(crate) fn is_waiting_for_write(&self) -> bool {
self.intersects(Flags::BUF_W_MUST_FLUSH | Flags::BUF_W_BACKPRESSURE) self.intersects(Flags::BUF_W_MUST_FLUSH | Flags::BUF_W_BACKPRESSURE)
} }
@ -46,10 +52,18 @@ impl Flags {
self.remove(Flags::BUF_W_MUST_FLUSH | Flags::BUF_W_BACKPRESSURE); self.remove(Flags::BUF_W_MUST_FLUSH | Flags::BUF_W_BACKPRESSURE);
} }
pub(crate) fn task_waiting_for_write_is_done(&mut self) {
self.remove(Flags::WR_TASK_WAIT);
}
pub(crate) fn is_read_buf_ready(&self) -> bool { pub(crate) fn is_read_buf_ready(&self) -> bool {
self.contains(Flags::BUF_R_READY) self.contains(Flags::BUF_R_READY)
} }
pub(crate) fn is_waiting_for_read(&self) -> bool {
self.contains(Flags::RD_NOTIFY)
}
pub(crate) fn cannot_read(self) -> bool { pub(crate) fn cannot_read(self) -> bool {
self.intersects(Flags::RD_PAUSED | Flags::BUF_R_FULL) self.intersects(Flags::RD_PAUSED | Flags::BUF_R_FULL)
} }

View file

@ -10,7 +10,7 @@ use ntex_util::{future::Either, task::LocalWaker, time::Seconds};
use crate::buf::Stack; use crate::buf::Stack;
use crate::filter::{Base, Filter, Layer, NullFilter}; use crate::filter::{Base, Filter, Layer, NullFilter};
use crate::flags::Flags; use crate::flags::Flags;
use crate::seal::Sealed; use crate::seal::{IoBoxed, Sealed};
use crate::tasks::{ReadContext, WriteContext}; use crate::tasks::{ReadContext, WriteContext};
use crate::timer::TimerHandle; use crate::timer::TimerHandle;
use crate::{Decoded, FilterLayer, Handle, IoStatusUpdate, IoStream, RecvError}; use crate::{Decoded, FilterLayer, Handle, IoStatusUpdate, IoStream, RecvError};
@ -80,6 +80,23 @@ impl IoState {
} }
} }
/// Get current io error
pub(super) fn error(&self) -> Option<io::Error> {
if let Some(err) = self.error.take() {
self.error
.set(Some(io::Error::new(err.kind(), format!("{}", err))));
Some(err)
} else {
None
}
}
/// Get current io result
pub(super) fn error_or_disconnected(&self) -> io::Error {
self.error()
.unwrap_or_else(|| io::Error::new(io::ErrorKind::NotConnected, "Disconnected"))
}
pub(super) fn io_stopped(&self, err: Option<io::Error>) { pub(super) fn io_stopped(&self, err: Option<io::Error>) {
if err.is_some() { if err.is_some() {
self.error.set(err); self.error.set(err);
@ -257,19 +274,6 @@ impl<F> Io<F> {
fn io_ref(&self) -> &IoRef { fn io_ref(&self) -> &IoRef {
unsafe { &*self.0.get() } unsafe { &*self.0.get() }
} }
/// Get current io error
fn error(&self) -> Option<io::Error> {
self.st().error.take()
}
/// Get current io error
fn error_or_disconnected(&self) -> io::Error {
self.st()
.error
.take()
.unwrap_or_else(|| io::Error::new(io::ErrorKind::Other, "Disconnected"))
}
} }
impl<F: FilterLayer, T: Filter> Io<Layer<F, T>> { impl<F: FilterLayer, T: Filter> Io<Layer<F, T>> {
@ -290,6 +294,12 @@ impl<F: Filter> Io<F> {
Io(UnsafeCell::new(state), marker::PhantomData) Io(UnsafeCell::new(state), marker::PhantomData)
} }
#[inline]
/// Convert current io stream into boxed version
pub fn boxed(self) -> IoBoxed {
self.seal().into()
}
#[inline] #[inline]
/// Map current filter with new one /// Map current filter with new one
pub fn add_filter<U>(self, nf: U) -> Io<Layer<U, F>> pub fn add_filter<U>(self, nf: U) -> Io<Layer<U, F>>
@ -333,7 +343,7 @@ impl<F> Io<F> {
"Timeout", "Timeout",
))), ))),
Err(RecvError::Stop) => Err(Either::Right(io::Error::new( Err(RecvError::Stop) => Err(Either::Right(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::UnexpectedEof,
"Dispatcher stopped", "Dispatcher stopped",
))), ))),
Err(RecvError::WriteBackpressure) => { Err(RecvError::WriteBackpressure) => {
@ -423,11 +433,11 @@ impl<F> Io<F> {
let mut flags = st.flags.get(); let mut flags = st.flags.get();
if flags.is_stopped() { if flags.is_stopped() {
Poll::Ready(Err(self.error_or_disconnected())) Poll::Ready(Err(st.error_or_disconnected()))
} else { } else {
st.dispatch_task.register(cx.waker()); st.dispatch_task.register(cx.waker());
let ready = flags.contains(Flags::BUF_R_READY); let ready = flags.is_read_buf_ready();
if flags.cannot_read() { if flags.cannot_read() {
flags.cleanup_read_flags(); flags.cleanup_read_flags();
st.read_task.wake(); st.read_task.wake();
@ -511,7 +521,7 @@ impl<F> Io<F> {
let st = self.st(); let st = self.st();
let flags = st.flags.get(); let flags = st.flags.get();
if flags.is_stopped() { if flags.is_stopped() {
Err(RecvError::PeerGone(self.error())) Err(RecvError::PeerGone(st.error()))
} else if flags.contains(Flags::DSP_STOP) { } else if flags.contains(Flags::DSP_STOP) {
st.remove_flags(Flags::DSP_STOP); st.remove_flags(Flags::DSP_STOP);
Err(RecvError::Stop) Err(RecvError::Stop)
@ -545,27 +555,31 @@ impl<F> Io<F> {
/// otherwise wake up when size of write buffer is lower than /// otherwise wake up when size of write buffer is lower than
/// buffer max size. /// buffer max size.
pub fn poll_flush(&self, cx: &mut Context<'_>, full: bool) -> Poll<io::Result<()>> { pub fn poll_flush(&self, cx: &mut Context<'_>, full: bool) -> Poll<io::Result<()>> {
let st = self.st();
let flags = self.flags(); let flags = self.flags();
if flags.is_stopped() { let len = st.buffer.write_destination_size();
Poll::Ready(Err(self.error_or_disconnected())) if len > 0 {
} else { if full {
let st = self.st(); st.insert_flags(Flags::BUF_W_MUST_FLUSH);
let len = st.buffer.write_destination_size(); st.dispatch_task.register(cx.waker());
if len > 0 { return if flags.is_stopped() {
if full { Poll::Ready(Err(st.error_or_disconnected()))
st.insert_flags(Flags::BUF_W_MUST_FLUSH); } else {
st.dispatch_task.register(cx.waker()); Poll::Pending
return Poll::Pending; };
} else if len >= st.pool.get().write_params_high() << 1 { } else if len >= st.pool.get().write_params_high() << 1 {
st.insert_flags(Flags::BUF_W_BACKPRESSURE); st.insert_flags(Flags::BUF_W_BACKPRESSURE);
st.dispatch_task.register(cx.waker()); st.dispatch_task.register(cx.waker());
return Poll::Pending; return if flags.is_stopped() {
} Poll::Ready(Err(st.error_or_disconnected()))
} else {
Poll::Pending
};
} }
st.remove_flags(Flags::BUF_W_MUST_FLUSH | Flags::BUF_W_BACKPRESSURE);
Poll::Ready(Ok(()))
} }
st.remove_flags(Flags::BUF_W_MUST_FLUSH | Flags::BUF_W_BACKPRESSURE);
Poll::Ready(Ok(()))
} }
#[inline] #[inline]
@ -575,7 +589,7 @@ impl<F> Io<F> {
let flags = st.flags.get(); let flags = st.flags.get();
if flags.is_stopped() { if flags.is_stopped() {
if let Some(err) = self.error() { if let Some(err) = st.error() {
Poll::Ready(Err(err)) Poll::Ready(Err(err))
} else { } else {
Poll::Ready(Ok(())) Poll::Ready(Ok(()))
@ -611,7 +625,7 @@ impl<F> Io<F> {
let st = self.st(); let st = self.st();
let flags = st.flags.get(); let flags = st.flags.get();
if flags.intersects(Flags::IO_STOPPED | Flags::IO_STOPPING) { if flags.intersects(Flags::IO_STOPPED | Flags::IO_STOPPING) {
Poll::Ready(IoStatusUpdate::PeerGone(self.error())) Poll::Ready(IoStatusUpdate::PeerGone(st.error()))
} else if flags.contains(Flags::DSP_STOP) { } else if flags.contains(Flags::DSP_STOP) {
st.remove_flags(Flags::DSP_STOP); st.remove_flags(Flags::DSP_STOP);
Poll::Ready(IoStatusUpdate::Stop) Poll::Ready(IoStatusUpdate::Stop)

View file

@ -191,7 +191,7 @@ impl IoRef {
F: FnOnce(&mut BytesVec) -> R, F: FnOnce(&mut BytesVec) -> R,
{ {
if self.0.flags.get().contains(Flags::IO_STOPPED) { if self.0.flags.get().contains(Flags::IO_STOPPED) {
Err(io::Error::new(io::ErrorKind::Other, "Disconnected")) Err(self.0.error_or_disconnected())
} else { } else {
let result = self.0.buffer.with_write_source(self, f); let result = self.0.buffer.with_write_source(self, f);
self.0.filter().process_write_buf(self, &self.0.buffer, 0)?; self.0.filter().process_write_buf(self, &self.0.buffer, 0)?;
@ -199,6 +199,16 @@ impl IoRef {
} }
} }
#[doc(hidden)]
#[inline]
/// Get mut access to destination write buffer
pub fn with_write_dest_buf<F, R>(&self, f: F) -> R
where
F: FnOnce(Option<&mut BytesVec>) -> R,
{
self.0.buffer.with_write_destination(self, f)
}
#[inline] #[inline]
/// Get mut access to source read buffer /// Get mut access to source read buffer
pub fn with_read_buf<F, R>(&self, f: F) -> R pub fn with_read_buf<F, R>(&self, f: F) -> R
@ -559,6 +569,10 @@ mod tests {
assert_eq!(in_bytes.get(), BIN.len() * 2); assert_eq!(in_bytes.get(), BIN.len() * 2);
assert_eq!(out_bytes.get(), 8); assert_eq!(out_bytes.get(), 8);
assert_eq!(
state.with_write_dest_buf(|b| b.map(|b| b.len()).unwrap_or(0)),
0
);
// refs // refs
assert_eq!(Rc::strong_count(&in_bytes), 3); assert_eq!(Rc::strong_count(&in_bytes), 3);

View file

@ -29,7 +29,7 @@ pub use self::filter::{Base, Filter, Layer};
pub use self::framed::Framed; pub use self::framed::Framed;
pub use self::io::{Io, IoRef, OnDisconnect}; pub use self::io::{Io, IoRef, OnDisconnect};
pub use self::seal::{IoBoxed, Sealed}; pub use self::seal::{IoBoxed, Sealed};
pub use self::tasks::{ReadContext, WriteContext, WriteContextBuf}; pub use self::tasks::{IoContext, ReadContext, WriteContext, WriteContextBuf};
pub use self::timer::TimerHandle; pub use self::timer::TimerHandle;
pub use self::utils::{seal, Decoded}; pub use self::utils::{seal, Decoded};
@ -53,7 +53,9 @@ pub trait AsyncWrite {
/// Status for read task /// Status for read task
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum ReadStatus { pub enum ReadStatus {
/// Read task is clear to proceed with read operation
Ready, Ready,
/// Terminate read task
Terminate, Terminate,
} }

View file

@ -1,6 +1,7 @@
use std::{fmt, ops}; use std::{any::Any, any::TypeId, fmt, io, ops, task::Context, task::Poll};
use crate::{filter::Filter, Io}; use crate::filter::{Filter, FilterReadStatus};
use crate::{buf::Stack, Io, IoRef, ReadStatus, WriteStatus};
/// Sealed filter type /// Sealed filter type
pub struct Sealed(pub(crate) Box<dyn Filter>); pub struct Sealed(pub(crate) Box<dyn Filter>);
@ -11,6 +12,44 @@ impl fmt::Debug for Sealed {
} }
} }
impl Filter for Sealed {
#[inline]
fn query(&self, id: TypeId) -> Option<Box<dyn Any>> {
self.0.query(id)
}
#[inline]
fn process_read_buf(
&self,
io: &IoRef,
stack: &Stack,
idx: usize,
nbytes: usize,
) -> io::Result<FilterReadStatus> {
self.0.process_read_buf(io, stack, idx, nbytes)
}
#[inline]
fn process_write_buf(&self, io: &IoRef, stack: &Stack, idx: usize) -> io::Result<()> {
self.0.process_write_buf(io, stack, idx)
}
#[inline]
fn shutdown(&self, io: &IoRef, stack: &Stack, idx: usize) -> io::Result<Poll<()>> {
self.0.shutdown(io, stack, idx)
}
#[inline]
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<ReadStatus> {
self.0.poll_read_ready(cx)
}
#[inline]
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<WriteStatus> {
self.0.poll_write_ready(cx)
}
}
#[derive(Debug)] #[derive(Debug)]
/// Boxed `Io` object with erased filter type /// Boxed `Io` object with erased filter type
pub struct IoBoxed(Io<Sealed>); pub struct IoBoxed(Io<Sealed>);
@ -25,12 +64,6 @@ impl IoBoxed {
} }
} }
impl From<Io<Sealed>> for IoBoxed {
fn from(io: Io<Sealed>) -> Self {
Self(io)
}
}
impl<F: Filter> From<Io<F>> for IoBoxed { impl<F: Filter> From<Io<F>> for IoBoxed {
fn from(io: Io<F>) -> Self { fn from(io: Io<F>) -> Self {
Self(io.seal()) Self(io.seal())
@ -45,3 +78,9 @@ impl ops::Deref for IoBoxed {
&self.0 &self.0
} }
} }
impl From<IoBoxed> for Io<Sealed> {
fn from(value: IoBoxed) -> Self {
value.0
}
}

View file

@ -1,6 +1,6 @@
use std::{cell::Cell, fmt, future::poll_fn, io, task::Context, task::Poll}; use std::{cell::Cell, fmt, future::poll_fn, io, task::ready, task::Context, task::Poll};
use ntex_bytes::{BufMut, BytesVec}; use ntex_bytes::{Buf, BufMut, BytesVec};
use ntex_util::{future::lazy, future::select, future::Either, time::sleep, time::Sleep}; use ntex_util::{future::lazy, future::select, future::Either, time::sleep, time::Sleep};
use crate::{AsyncRead, AsyncWrite, Flags, IoRef, ReadStatus, WriteStatus}; use crate::{AsyncRead, AsyncWrite, Flags, IoRef, ReadStatus, WriteStatus};
@ -19,6 +19,13 @@ impl ReadContext {
Self(io.clone(), Cell::new(None)) Self(io.clone(), Cell::new(None))
} }
#[doc(hidden)]
#[inline]
/// Io tag
pub fn context(&self) -> IoContext {
IoContext::new(&self.0)
}
#[inline] #[inline]
/// Io tag /// Io tag
pub fn tag(&self) -> &'static str { pub fn tag(&self) -> &'static str {
@ -87,7 +94,7 @@ impl ReadContext {
// handle incoming data // handle incoming data
let total2 = buf.len(); let total2 = buf.len();
let nbytes = if total2 > total { total2 - total } else { 0 }; let nbytes = total2.saturating_sub(total);
let total = total2; let total = total2;
if let Some(mut first_buf) = inner.buffer.get_read_source() { if let Some(mut first_buf) = inner.buffer.get_read_source() {
@ -121,7 +128,7 @@ impl ReadContext {
); );
// dest buffer has new data, wake up dispatcher // dest buffer has new data, wake up dispatcher
inner.dispatch_task.wake(); inner.dispatch_task.wake();
} else if inner.flags.get().contains(Flags::RD_NOTIFY) { } else if inner.flags.get().is_waiting_for_read() {
// in case of "notify" we must wake up dispatch task // in case of "notify" we must wake up dispatch task
// if we read any data from source // if we read any data from source
inner.dispatch_task.wake(); inner.dispatch_task.wake();
@ -342,3 +349,604 @@ impl WriteContextBuf {
} }
} }
} }
/// Context for io read task
pub struct IoContext(IoRef);
impl fmt::Debug for IoContext {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("IoContext").field("io", &self.0).finish()
}
}
impl IoContext {
pub(crate) fn new(io: &IoRef) -> Self {
Self(io.clone())
}
#[inline]
/// Io tag
pub fn tag(&self) -> &'static str {
self.0.tag()
}
#[doc(hidden)]
/// Io flags
pub fn flags(&self) -> crate::flags::Flags {
self.0.flags()
}
#[inline]
/// Check readiness for read operations
pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<ReadStatus> {
self.shutdown_filters();
self.0.filter().poll_read_ready(cx)
}
#[inline]
/// Check readiness for write operations
pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<WriteStatus> {
self.0.filter().poll_write_ready(cx)
}
#[inline]
/// Get io error
pub fn stopped(&self, e: Option<io::Error>) {
self.0 .0.io_stopped(e);
}
/// Wait when io get closed or preparing for close
pub async fn shutdown(&self, flush_buf: bool) {
let st = &self.0 .0;
let mut timeout = None;
poll_fn(|cx| {
let flags = self.0.flags();
if flags.intersects(Flags::IO_STOPPING | Flags::IO_STOPPED) {
Poll::Ready(())
} else {
st.write_task.register(cx.waker());
if flags.contains(Flags::IO_STOPPING_FILTERS) {
if timeout.is_none() {
timeout = Some(sleep(st.disconnect_timeout.get()));
}
if timeout.as_ref().unwrap().poll_elapsed(cx).is_ready() {
st.dispatch_task.wake();
st.insert_flags(Flags::IO_STOPPING);
return Poll::Ready(());
}
}
Poll::Pending
}
})
.await;
if flush_buf && !self.0.flags().contains(Flags::WR_PAUSED) {
st.insert_flags(Flags::WR_TASK_WAIT);
poll_fn(|cx| {
let flags = self.0.flags();
if flags.intersects(Flags::WR_PAUSED | Flags::IO_STOPPED) {
Poll::Ready(())
} else {
st.write_task.register(cx.waker());
if timeout.is_none() {
timeout = Some(sleep(st.disconnect_timeout.get()));
}
if timeout.as_ref().unwrap().poll_elapsed(cx).is_ready() {
Poll::Ready(())
} else {
Poll::Pending
}
}
})
.await;
}
}
/// Get read buffer
pub fn get_read_buf(&self) -> Poll<BytesVec> {
let inner = &self.0 .0;
if let Some(waker) = inner.read_task.take() {
let mut cx = Context::from_waker(&waker);
if let Poll::Ready(ReadStatus::Ready) = self.0.filter().poll_read_ready(&mut cx)
{
let mut buf = if inner.flags.get().is_read_buf_ready() {
// read buffer is still not read by dispatcher
// we cannot touch it
inner.pool.get().get_read_buf()
} else {
inner
.buffer
.get_read_source()
.unwrap_or_else(|| inner.pool.get().get_read_buf())
};
// make sure we've got room
let (hw, lw) = self.0.memory_pool().read_params().unpack();
let remaining = buf.remaining_mut();
if remaining < lw {
buf.reserve(hw - remaining);
}
return Poll::Ready(buf);
}
}
Poll::Pending
}
pub fn release_read_buf(&self, buf: BytesVec) {
let inner = &self.0 .0;
if let Some(mut first_buf) = inner.buffer.get_read_source() {
first_buf.extend_from_slice(&buf);
inner.buffer.set_read_source(&self.0, first_buf);
} else {
inner.buffer.set_read_source(&self.0, buf);
}
}
/// Set read buffer
pub fn set_read_buf(&self, result: io::Result<usize>, buf: BytesVec) -> Poll<()> {
let inner = &self.0 .0;
let (hw, _) = self.0.memory_pool().read_params().unpack();
if let Some(mut first_buf) = inner.buffer.get_read_source() {
first_buf.extend_from_slice(&buf);
inner.buffer.set_read_source(&self.0, first_buf);
} else {
inner.buffer.set_read_source(&self.0, buf);
}
match result {
Ok(0) => {
inner.io_stopped(None);
Poll::Ready(())
}
Ok(nbytes) => {
let filter = self.0.filter();
let res = filter
.process_read_buf(&self.0, &inner.buffer, 0, nbytes)
.and_then(|status| {
if status.nbytes > 0 {
// dest buffer has new data, wake up dispatcher
if inner.buffer.read_destination_size() >= hw {
log::trace!(
"{}: Io read buffer is too large {}, enable read back-pressure",
self.0.tag(),
nbytes
);
inner.insert_flags(Flags::BUF_R_READY | Flags::BUF_R_FULL);
} else {
inner.insert_flags(Flags::BUF_R_READY);
if nbytes >= hw {
// read task is paused because of read back-pressure
// but there is no new data in top most read buffer
// so we need to wake up read task to read more data
// otherwise read task would sleep forever
inner.read_task.wake();
}
}
log::trace!(
"{}: New {} bytes available, wakeup dispatcher",
self.0.tag(),
nbytes
);
if !inner.dispatch_task.wake_checked() {
log::error!("Dispatcher waker is not registered");
}
} else {
if nbytes >= hw {
// read task is paused because of read back-pressure
// but there is no new data in top most read buffer
// so we need to wake up read task to read more data
// otherwise read task would sleep forever
inner.read_task.wake();
}
if inner.flags.get().is_waiting_for_read() {
// in case of "notify" we must wake up dispatch task
// if we read any data from source
inner.dispatch_task.wake();
}
}
// while reading, filter wrote some data
// in that case filters need to process write buffers
// and potentialy wake write task
if status.need_write {
inner.write_task.wake();
filter.process_write_buf(&self.0, &inner.buffer, 0)
} else {
Ok(())
}
});
if let Err(err) = res {
inner.io_stopped(Some(err));
Poll::Ready(())
} else {
self.shutdown_filters();
Poll::Pending
}
}
Err(e) => {
inner.io_stopped(Some(e));
Poll::Ready(())
}
}
}
/// Get write buffer
pub fn get_write_buf(&self) -> Poll<BytesVec> {
let inner = &self.0 .0;
// check write readiness
if let Some(waker) = inner.write_task.take() {
let ready = self
.0
.filter()
.poll_write_ready(&mut Context::from_waker(&waker));
let buf = if matches!(
ready,
Poll::Ready(WriteStatus::Ready | WriteStatus::Shutdown)
) {
inner.buffer.get_write_destination().and_then(|buf| {
if buf.is_empty() {
None
} else {
Some(buf)
}
})
} else {
None
};
if let Some(buf) = buf {
return Poll::Ready(buf);
}
}
Poll::Pending
}
pub fn release_write_buf(&self, mut buf: BytesVec) {
let inner = &self.0 .0;
if let Some(b) = inner.buffer.get_write_destination() {
buf.extend_from_slice(&b);
self.0.memory_pool().release_write_buf(b);
}
inner.buffer.set_write_destination(buf);
// if write buffer is smaller than high watermark value, turn off back-pressure
let len = inner.buffer.write_destination_size();
let mut flags = inner.flags.get();
if len == 0 {
if flags.is_waiting_for_write() {
flags.waiting_for_write_is_done();
inner.dispatch_task.wake();
}
flags.insert(Flags::WR_PAUSED);
inner.flags.set(flags);
} else if flags.contains(Flags::BUF_W_BACKPRESSURE)
&& len < inner.pool.get().write_params_high() << 1
{
flags.remove(Flags::BUF_W_BACKPRESSURE);
inner.flags.set(flags);
inner.dispatch_task.wake();
}
inner.flags.set(flags);
}
/// Set write buffer
pub fn set_write_buf(&self, result: io::Result<usize>, mut buf: BytesVec) -> Poll<()> {
let result = match result {
Ok(0) => {
log::trace!("{}: Disconnected during flush", self.tag());
Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write frame to transport",
))
}
Ok(n) => {
if n == buf.len() {
buf.clear();
Ok(0)
} else {
buf.advance(n);
Ok(buf.len())
}
}
Err(e) => Err(e),
};
let inner = &self.0 .0;
// set buffer back
let result = match result {
Ok(0) => {
// log::debug!("{}: WROTE ALL {:?}", self.0.tag(), inner.buffer.write_destination_size());
self.0.memory_pool().release_write_buf(buf);
Ok(inner.buffer.write_destination_size())
}
Ok(_) => {
if let Some(b) = inner.buffer.get_write_destination() {
buf.extend_from_slice(&b);
self.0.memory_pool().release_write_buf(b);
}
let l = buf.len();
// log::debug!("{}: WROTE SOME {:?}", self.0.tag(), l);
inner.buffer.set_write_destination(buf);
Ok(l)
}
Err(e) => Err(e),
};
let mut flags = inner.flags.get();
match result {
Ok(0) => {
// all data has been written
flags.insert(Flags::WR_PAUSED);
if flags.is_task_waiting_for_write() {
flags.task_waiting_for_write_is_done();
inner.write_task.wake();
}
if flags.is_waiting_for_write() {
flags.waiting_for_write_is_done();
inner.dispatch_task.wake();
}
inner.flags.set(flags);
Poll::Ready(())
}
Ok(len) => {
// if write buffer is smaller than high watermark value, turn off back-pressure
if flags.contains(Flags::BUF_W_BACKPRESSURE)
&& len < inner.pool.get().write_params_high() << 1
{
flags.remove(Flags::BUF_W_BACKPRESSURE);
inner.flags.set(flags);
inner.dispatch_task.wake();
}
Poll::Pending
}
Err(e) => {
inner.io_stopped(Some(e));
Poll::Ready(())
}
}
}
/// Get read buffer
pub fn is_read_ready(&self) -> bool {
// check read readiness
if let Some(waker) = self.0 .0.read_task.take() {
let mut cx = Context::from_waker(&waker);
if let Poll::Ready(ReadStatus::Ready) = self.0.filter().poll_read_ready(&mut cx)
{
return true;
}
}
false
}
pub fn with_read_buf<F>(&self, f: F) -> Poll<()>
where
F: FnOnce(&mut BytesVec) -> Poll<io::Result<usize>>,
{
let inner = &self.0 .0;
let (hw, lw) = self.0.memory_pool().read_params().unpack();
let result = inner.buffer.with_read_source(&self.0, |buf| {
// make sure we've got room
let remaining = buf.remaining_mut();
if remaining < lw {
buf.reserve(hw - remaining);
}
f(buf)
});
// handle buffer changes
match result {
Poll::Ready(Ok(0)) => {
inner.io_stopped(None);
Poll::Ready(())
}
Poll::Ready(Ok(nbytes)) => {
let filter = self.0.filter();
let _ = filter
.process_read_buf(&self.0, &inner.buffer, 0, nbytes)
.and_then(|status| {
if status.nbytes > 0 {
// dest buffer has new data, wake up dispatcher
if inner.buffer.read_destination_size() >= hw {
log::trace!(
"{}: Io read buffer is too large {}, enable read back-pressure",
self.0.tag(),
nbytes
);
inner.insert_flags(Flags::BUF_R_READY | Flags::BUF_R_FULL);
} else {
inner.insert_flags(Flags::BUF_R_READY);
if nbytes >= hw {
// read task is paused because of read back-pressure
// but there is no new data in top most read buffer
// so we need to wake up read task to read more data
// otherwise read task would sleep forever
inner.read_task.wake();
}
}
log::trace!(
"{}: New {} bytes available, wakeup dispatcher",
self.0.tag(),
nbytes
);
if !inner.dispatch_task.wake_checked() {
log::error!("Dispatcher waker is not registered");
}
} else {
if nbytes >= hw {
// read task is paused because of read back-pressure
// but there is no new data in top most read buffer
// so we need to wake up read task to read more data
// otherwise read task would sleep forever
inner.read_task.wake();
}
if inner.flags.get().is_waiting_for_read() {
// in case of "notify" we must wake up dispatch task
// if we read any data from source
inner.dispatch_task.wake();
}
}
// while reading, filter wrote some data
// in that case filters need to process write buffers
// and potentialy wake write task
if status.need_write {
filter.process_write_buf(&self.0, &inner.buffer, 0)
} else {
Ok(())
}
})
.map_err(|err| {
inner.dispatch_task.wake();
inner.io_stopped(Some(err));
inner.insert_flags(Flags::BUF_R_READY);
});
Poll::Pending
}
Poll::Ready(Err(e)) => {
inner.io_stopped(Some(e));
Poll::Ready(())
}
Poll::Pending => {
self.shutdown_filters();
Poll::Pending
}
}
}
/// Get write buffer
pub fn with_write_buf<F>(&self, f: F) -> Poll<()>
where
F: FnOnce(&BytesVec) -> Poll<io::Result<usize>>,
{
let inner = &self.0 .0;
let result = inner.buffer.with_write_destination(&self.0, |buf| {
let Some(buf) =
buf.and_then(|buf| if buf.is_empty() { None } else { Some(buf) })
else {
return Poll::Ready(Ok(0));
};
match ready!(f(buf)) {
Ok(0) => {
log::trace!("{}: Disconnected during flush", self.tag());
Poll::Ready(Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write frame to transport",
)))
}
Ok(n) => {
if n == buf.len() {
buf.clear();
Poll::Ready(Ok(0))
} else {
buf.advance(n);
Poll::Ready(Ok(buf.len()))
}
}
Err(e) => Poll::Ready(Err(e)),
}
});
let mut flags = inner.flags.get();
let result = match result {
Poll::Pending => {
flags.remove(Flags::WR_PAUSED);
Poll::Pending
}
Poll::Ready(Ok(0)) => {
// all data has been written
flags.insert(Flags::WR_PAUSED);
if flags.is_task_waiting_for_write() {
flags.task_waiting_for_write_is_done();
inner.write_task.wake();
}
if flags.is_waiting_for_write() {
flags.waiting_for_write_is_done();
inner.dispatch_task.wake();
}
Poll::Ready(())
}
Poll::Ready(Ok(len)) => {
// if write buffer is smaller than high watermark value, turn off back-pressure
if flags.contains(Flags::BUF_W_BACKPRESSURE)
&& len < inner.pool.get().write_params_high() << 1
{
flags.remove(Flags::BUF_W_BACKPRESSURE);
inner.dispatch_task.wake();
}
Poll::Pending
}
Poll::Ready(Err(e)) => {
self.0 .0.io_stopped(Some(e));
Poll::Ready(())
}
};
inner.flags.set(flags);
result
}
fn shutdown_filters(&self) {
let io = &self.0;
let st = &self.0 .0;
if st.flags.get().contains(Flags::IO_STOPPING_FILTERS) {
let flags = st.flags.get();
if !flags.intersects(Flags::IO_STOPPED | Flags::IO_STOPPING) {
let filter = io.filter();
match filter.shutdown(io, &st.buffer, 0) {
Ok(Poll::Ready(())) => {
st.dispatch_task.wake();
st.insert_flags(Flags::IO_STOPPING);
}
Ok(Poll::Pending) => {
// check read buffer, if buffer is not consumed it is unlikely
// that filter will properly complete shutdown
if flags.contains(Flags::RD_PAUSED)
|| flags.contains(Flags::BUF_R_FULL | Flags::BUF_R_READY)
{
st.dispatch_task.wake();
st.insert_flags(Flags::IO_STOPPING);
}
}
Err(err) => {
st.io_stopped(Some(err));
}
}
if let Err(err) = filter.process_write_buf(io, &st.buffer, 0) {
st.io_stopped(Some(err));
}
}
}
}
}
impl Clone for IoContext {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}

View file

@ -27,7 +27,7 @@ where
S: ServiceFactory<IoBoxed, C>, S: ServiceFactory<IoBoxed, C>,
C: Clone, C: Clone,
{ {
chain_factory(fn_service(|io: Io<F>| Ready::Ok(IoBoxed::from(io)))) chain_factory(fn_service(|io: Io<F>| Ready::Ok(io.boxed())))
.map_init_err(|_| panic!()) .map_init_err(|_| panic!())
.and_then(srv) .and_then(srv)
} }

View file

@ -1,5 +1,9 @@
# Changes # Changes
## [0.1.4] - 2025-03-14
* Enable env_logger for test macro
## [0.1.2] - 2021-02-25 ## [0.1.2] - 2021-02-25
* Export runtime from ntex crate * Export runtime from ntex crate

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ntex-macros" name = "ntex-macros"
version = "0.1.3" version = "0.1.4"
description = "ntex proc macros" description = "ntex proc macros"
readme = "README.md" readme = "README.md"
authors = ["ntex contributors <team@ntex.rs>"] authors = ["ntex contributors <team@ntex.rs>"]
@ -18,4 +18,3 @@ proc-macro2 = "^1"
[dev-dependencies] [dev-dependencies]
ntex = "2" ntex = "2"
futures = "0.3" futures = "0.3"
env_logger = "0.11"

View file

@ -262,6 +262,7 @@ pub fn rt_test(_: TokenStream, item: TokenStream) -> TokenStream {
quote! { quote! {
#(#attrs)* #(#attrs)*
fn #name() #ret { fn #name() #ret {
ntex::util::enable_test_logging();
ntex::rt::System::new("test") ntex::rt::System::new("test")
.block_on(async { #body }) .block_on(async { #body })
} }
@ -271,6 +272,7 @@ pub fn rt_test(_: TokenStream, item: TokenStream) -> TokenStream {
#[test] #[test]
#(#attrs)* #(#attrs)*
fn #name() #ret { fn #name() #ret {
ntex::util::enable_test_logging();
ntex::rt::System::new("test") ntex::rt::System::new("test")
.block_on(async { #body }) .block_on(async { #body })
} }

View file

@ -1,5 +1,55 @@
# Changes # Changes
## [2.5.10] - 2025-03-28
* Better closed sockets handling
## [2.5.9] - 2025-03-27
* Handle closed sockets
## [2.5.8] - 2025-03-25
* Update neon runtime
## [2.5.7] - 2025-03-21
* Simplify neon poll impl
## [2.5.6] - 2025-03-20
* Redesign neon poll support
## [2.5.5] - 2025-03-17
* Add check for required io-uring opcodes
* Handle io-uring cancelation
## [2.5.4] - 2025-03-15
* Close FD in various case for poll driver
## [2.5.3] - 2025-03-14
* Fix operation cancelation handling for poll driver
## [2.5.2] - 2025-03-14
* Fix operation cancelation handling for io-uring driver
## [2.5.1] - 2025-03-14
* Fix socket connect for io-uring driver
## [2.5.0] - 2025-03-12
* Add neon runtime support
* Drop glommio support
* Drop async-std support
## [2.4.0] - 2024-09-25 ## [2.4.0] - 2024-09-25
* Update to glommio v0.9 * Update to glommio v0.9

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ntex-net" name = "ntex-net"
version = "2.4.0" version = "2.5.10"
authors = ["ntex contributors <team@ntex.rs>"] authors = ["ntex contributors <team@ntex.rs>"]
description = "ntexwork utils for ntex framework" description = "ntexwork utils for ntex framework"
keywords = ["network", "framework", "async", "futures"] keywords = ["network", "framework", "async", "futures"]
@ -24,28 +24,36 @@ tokio = ["ntex-rt/tokio", "ntex-tokio"]
# compio runtime # compio runtime
compio = ["ntex-rt/compio", "ntex-compio"] compio = ["ntex-rt/compio", "ntex-compio"]
# glommio runtime # neon runtime
glommio = ["ntex-rt/glommio", "ntex-glommio"] neon = ["ntex-rt/neon", "ntex-neon", "slab", "socket2"]
# async-std runtime polling = ["ntex-neon/polling", "dep:polling", "socket2"]
async-std = ["ntex-rt/async-std", "ntex-async-std"] io-uring = ["ntex-neon/io-uring", "dep:io-uring", "socket2"]
[dependencies] [dependencies]
ntex-service = "3.3" ntex-service = "3.3"
ntex-bytes = "0.1" ntex-bytes = "0.1"
ntex-http = "0.1" ntex-http = "0.1"
ntex-io = "2.8" ntex-io = "2.11.1"
ntex-rt = "0.4.21" ntex-rt = "0.4.25"
ntex-util = "2.5" ntex-util = "2.5"
ntex-tokio = { version = "0.5.3", optional = true } ntex-tokio = { version = "0.5.3", optional = true }
ntex-compio = { version = "0.2.4", optional = true } ntex-compio = { version = "0.2.4", optional = true }
ntex-glommio = { version = "0.5.2", optional = true } ntex-neon = { version = "0.1.15", optional = true }
ntex-async-std = { version = "0.5.1", optional = true }
log = "0.4" bitflags = { workspace = true }
thiserror = "1" cfg-if = { workspace = true }
log = { workspace = true }
libc = { workspace = true }
slab = { workspace = true, optional = true }
socket2 = { workspace = true, optional = true, features = ["all"] }
thiserror = { workspace = true }
# Linux specific dependencies
[target.'cfg(target_os = "linux")'.dependencies]
io-uring = { workspace = true, optional = true }
polling = { workspace = true, optional = true }
[dev-dependencies] [dev-dependencies]
ntex = "2" ntex = "2"
env_logger = "0.11"

View file

@ -6,63 +6,18 @@ pub use ntex_tokio::{from_tcp_stream, tcp_connect, tcp_connect_in};
#[cfg(all(unix, feature = "tokio"))] #[cfg(all(unix, feature = "tokio"))]
pub use ntex_tokio::{from_unix_stream, unix_connect, unix_connect_in}; pub use ntex_tokio::{from_unix_stream, unix_connect, unix_connect_in};
#[cfg(all( #[cfg(all(feature = "compio", not(feature = "tokio"), not(feature = "neon")))]
feature = "compio",
not(feature = "tokio"),
not(feature = "async-std"),
not(feature = "glommio")
))]
pub use ntex_compio::{from_tcp_stream, tcp_connect, tcp_connect_in}; pub use ntex_compio::{from_tcp_stream, tcp_connect, tcp_connect_in};
#[cfg(all( #[cfg(all(
unix, unix,
feature = "compio", feature = "compio",
not(feature = "tokio"), not(feature = "tokio"),
not(feature = "async-std"), not(feature = "neon")
not(feature = "glommio")
))] ))]
pub use ntex_compio::{from_unix_stream, unix_connect, unix_connect_in}; pub use ntex_compio::{from_unix_stream, unix_connect, unix_connect_in};
#[cfg(all( #[cfg(all(not(feature = "tokio"), not(feature = "compio"), not(feature = "neon")))]
feature = "async-std",
not(feature = "tokio"),
not(feature = "compio"),
not(feature = "glommio")
))]
pub use ntex_async_std::{from_tcp_stream, tcp_connect, tcp_connect_in};
#[cfg(all(
unix,
feature = "async-std",
not(feature = "tokio"),
not(feature = "compio"),
not(feature = "glommio")
))]
pub use ntex_async_std::{from_unix_stream, unix_connect, unix_connect_in};
#[cfg(all(
feature = "glommio",
not(feature = "tokio"),
not(feature = "compio"),
not(feature = "async-std")
))]
pub use ntex_glommio::{from_tcp_stream, tcp_connect, tcp_connect_in};
#[cfg(all(
unix,
feature = "glommio",
not(feature = "tokio"),
not(feature = "compio"),
not(feature = "async-std")
))]
pub use ntex_glommio::{from_unix_stream, unix_connect, unix_connect_in};
#[cfg(all(
not(feature = "tokio"),
not(feature = "compio"),
not(feature = "async-std"),
not(feature = "glommio")
))]
mod no_rt { mod no_rt {
use ntex_io::Io; use ntex_io::Io;
@ -127,10 +82,5 @@ mod no_rt {
} }
} }
#[cfg(all( #[cfg(all(not(feature = "tokio"), not(feature = "compio"), not(feature = "neon")))]
not(feature = "tokio"),
not(feature = "compio"),
not(feature = "async-std"),
not(feature = "glommio")
))]
pub use no_rt::*; pub use no_rt::*;

View file

@ -197,7 +197,7 @@ impl<T: Address> Future for TcpConnectorResponse<T> {
Poll::Ready(Ok(sock)) => { Poll::Ready(Ok(sock)) => {
let req = this.req.take().unwrap(); let req = this.req.take().unwrap();
log::trace!( log::trace!(
"{}: TCP connector - successfully connected to connecting to {:?} - {:?}", "{}: TCP connector - successfully connected to {:?} - {:?}",
this.tag, this.tag,
req.host(), req.host(),
sock.query::<types::PeerAddr>().get() sock.query::<types::PeerAddr>().get()

86
ntex-net/src/helpers.rs Normal file
View file

@ -0,0 +1,86 @@
use std::{io, net::SocketAddr, os::fd::FromRawFd, path::Path};
use ntex_neon::syscall;
use ntex_util::channel::oneshot::channel;
use socket2::{Protocol, SockAddr, Socket, Type};
pub(crate) fn pool_io_err<T, E>(result: std::result::Result<T, E>) -> io::Result<T> {
result.map_err(|_| io::Error::new(io::ErrorKind::Other, "Thread pool panic"))
}
pub(crate) async fn connect(addr: SocketAddr) -> io::Result<Socket> {
let addr = SockAddr::from(addr);
let domain = addr.domain().into();
connect_inner(addr, domain, Type::STREAM.into(), Protocol::TCP.into()).await
}
pub(crate) async fn connect_unix(path: impl AsRef<Path>) -> io::Result<Socket> {
let addr = SockAddr::unix(path)?;
connect_inner(addr, socket2::Domain::UNIX.into(), Type::STREAM.into(), 0).await
}
async fn connect_inner(
addr: SockAddr,
domain: i32,
socket_type: i32,
protocol: i32,
) -> io::Result<Socket> {
#[allow(unused_mut)]
let mut ty = socket_type;
#[cfg(any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "hurd",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
))]
{
ty |= libc::SOCK_CLOEXEC;
}
let fd = ntex_rt::spawn_blocking(move || syscall!(libc::socket(domain, ty, protocol)))
.await
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
.and_then(pool_io_err)?;
let (sender, rx) = channel();
crate::rt_impl::connect::ConnectOps::current().connect(fd, addr, sender)?;
rx.await
.map_err(|_| io::Error::new(io::ErrorKind::Other, "IO Driver is gone"))
.and_then(|item| item)?;
Ok(unsafe { Socket::from_raw_fd(fd) })
}
pub(crate) fn prep_socket(sock: Socket) -> io::Result<Socket> {
#[cfg(not(any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "hurd",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
target_os = "espidf",
target_os = "vita",
)))]
sock.set_cloexec(true)?;
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
))]
sock.set_nosigpipe(true)?;
sock.set_nonblocking(true)?;
Ok(sock)
}

View file

@ -1,5 +1,6 @@
//! Utility for async runtime abstraction //! Utility for async runtime abstraction
#![deny(rust_2018_idioms, unreachable_pub, missing_debug_implementations)] #![deny(rust_2018_idioms, unreachable_pub, missing_debug_implementations)]
#![allow(unused_variables, dead_code)]
mod compat; mod compat;
pub mod connect; pub mod connect;
@ -7,4 +8,25 @@ pub mod connect;
pub use ntex_io::Io; pub use ntex_io::Io;
pub use ntex_rt::{spawn, spawn_blocking}; pub use ntex_rt::{spawn, spawn_blocking};
pub use self::compat::*; cfg_if::cfg_if! {
if #[cfg(all(feature = "neon", target_os = "linux", feature = "io-uring"))] {
#[path = "rt_uring/mod.rs"]
mod rt_impl;
pub use self::rt_impl::{
from_tcp_stream, from_unix_stream, tcp_connect, tcp_connect_in, unix_connect,
unix_connect_in,
};
} else if #[cfg(all(unix, feature = "neon"))] {
#[path = "rt_polling/mod.rs"]
mod rt_impl;
pub use self::rt_impl::{
from_tcp_stream, from_unix_stream, tcp_connect, tcp_connect_in, unix_connect,
unix_connect_in,
};
} else {
pub use self::compat::*;
}
}
#[cfg(all(unix, feature = "neon"))]
mod helpers;

View file

@ -0,0 +1,111 @@
use std::os::fd::{AsRawFd, RawFd};
use std::{cell::RefCell, io, rc::Rc, task::Poll};
use ntex_neon::driver::{DriverApi, Event, Handler};
use ntex_neon::{syscall, Runtime};
use ntex_util::channel::oneshot::Sender;
use slab::Slab;
use socket2::SockAddr;
#[derive(Clone)]
pub(crate) struct ConnectOps(Rc<ConnectOpsInner>);
#[derive(Debug)]
enum Change {
Event(Event),
Error(io::Error),
}
struct ConnectOpsBatcher {
inner: Rc<ConnectOpsInner>,
}
struct Item {
fd: RawFd,
sender: Sender<io::Result<()>>,
}
struct ConnectOpsInner {
api: DriverApi,
connects: RefCell<Slab<Item>>,
}
impl ConnectOps {
pub(crate) fn current() -> Self {
Runtime::value(|rt| {
let mut inner = None;
rt.driver().register(|api| {
let ops = Rc::new(ConnectOpsInner {
api,
connects: RefCell::new(Slab::new()),
});
inner = Some(ops.clone());
Box::new(ConnectOpsBatcher { inner: ops })
});
ConnectOps(inner.unwrap())
})
}
pub(crate) fn connect(
&self,
fd: RawFd,
addr: SockAddr,
sender: Sender<io::Result<()>>,
) -> io::Result<usize> {
let result = syscall!(break libc::connect(fd, addr.as_ptr(), addr.len()));
if let Poll::Ready(res) = result {
res?;
}
let item = Item { fd, sender };
let id = self.0.connects.borrow_mut().insert(item);
self.0.api.attach(fd, id as u32, Some(Event::writable(0)));
Ok(id)
}
}
impl Handler for ConnectOpsBatcher {
fn event(&mut self, id: usize, event: Event) {
log::debug!("connect-fd is readable {:?}", id);
let mut connects = self.inner.connects.borrow_mut();
if connects.contains(id) {
let item = connects.remove(id);
if event.writable {
let mut err: libc::c_int = 0;
let mut err_len = std::mem::size_of::<libc::c_int>() as libc::socklen_t;
let res = syscall!(libc::getsockopt(
item.fd.as_raw_fd(),
libc::SOL_SOCKET,
libc::SO_ERROR,
&mut err as *mut _ as *mut _,
&mut err_len
));
let res = if err == 0 {
res.map(|_| ())
} else {
Err(io::Error::from_raw_os_error(err))
};
self.inner.api.detach(item.fd, id as u32);
let _ = item.sender.send(res);
}
}
}
fn error(&mut self, id: usize, err: io::Error) {
let mut connects = self.inner.connects.borrow_mut();
if connects.contains(id) {
let item = connects.remove(id);
let _ = item.sender.send(Err(err));
self.inner.api.detach(item.fd, id as u32);
}
}
}

View file

@ -0,0 +1,368 @@
use std::os::fd::{AsRawFd, RawFd};
use std::{cell::Cell, cell::RefCell, future::Future, io, mem, rc::Rc, task, task::Poll};
use ntex_neon::driver::{DriverApi, Event, Handler};
use ntex_neon::{syscall, Runtime};
use slab::Slab;
use ntex_bytes::BufMut;
use ntex_io::IoContext;
pub(crate) struct StreamCtl<T> {
id: u32,
inner: Rc<StreamOpsInner<T>>,
}
bitflags::bitflags! {
#[derive(Copy, Clone, Debug)]
struct Flags: u8 {
const RD = 0b0000_0001;
const WR = 0b0000_0010;
}
}
struct StreamItem<T> {
io: Option<T>,
fd: RawFd,
flags: Flags,
ref_count: u16,
context: IoContext,
}
pub(crate) struct StreamOps<T>(Rc<StreamOpsInner<T>>);
struct StreamOpsHandler<T> {
inner: Rc<StreamOpsInner<T>>,
}
struct StreamOpsInner<T> {
api: DriverApi,
delayd_drop: Cell<bool>,
feed: RefCell<Vec<u32>>,
streams: Cell<Option<Box<Slab<StreamItem<T>>>>>,
}
impl<T> StreamItem<T> {
fn tag(&self) -> &'static str {
self.context.tag()
}
}
impl<T: AsRawFd + 'static> StreamOps<T> {
pub(crate) fn current() -> Self {
Runtime::value(|rt| {
let mut inner = None;
rt.driver().register(|api| {
let ops = Rc::new(StreamOpsInner {
api,
feed: RefCell::new(Vec::new()),
delayd_drop: Cell::new(false),
streams: Cell::new(Some(Box::new(Slab::new()))),
});
inner = Some(ops.clone());
Box::new(StreamOpsHandler { inner: ops })
});
StreamOps(inner.unwrap())
})
}
pub(crate) fn register(&self, io: T, context: IoContext) -> StreamCtl<T> {
let fd = io.as_raw_fd();
let stream = self.0.with(move |streams| {
let item = StreamItem {
fd,
context,
io: Some(io),
ref_count: 1,
flags: Flags::empty(),
};
StreamCtl {
id: streams.insert(item) as u32,
inner: self.0.clone(),
}
});
self.0.api.attach(
fd,
stream.id,
Some(Event::new(0, false, false).with_interrupt()),
);
stream
}
}
impl<T> Clone for StreamOps<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T> Handler for StreamOpsHandler<T> {
fn event(&mut self, id: usize, ev: Event) {
self.inner.with(|streams| {
if !streams.contains(id) {
return;
}
let item = &mut streams[id];
if item.io.is_none() {
return;
}
log::debug!("{}: FD event {:?} event: {:?}", item.tag(), id, ev);
// handle HUP
if ev.is_interrupt() {
item.context.stopped(None);
close(id as u32, item, &self.inner.api, None, true);
return;
}
let mut renew_ev = Event::new(0, false, false).with_interrupt();
if ev.readable {
let res = item.context.with_read_buf(|buf| {
let chunk = buf.chunk_mut();
let result = task::ready!(syscall!(
break libc::read(item.fd, chunk.as_mut_ptr() as _, chunk.len())
));
if let Ok(size) = result {
log::debug!("{}: data {:?}, s: {:?}", item.tag(), item.fd, size);
unsafe { buf.advance_mut(size) };
}
Poll::Ready(result)
});
if res.is_pending() && item.context.is_read_ready() {
renew_ev.readable = true;
item.flags.insert(Flags::RD);
} else {
item.flags.remove(Flags::RD);
}
} else if item.flags.contains(Flags::RD) {
renew_ev.readable = true;
}
if ev.writable {
let result = item.context.with_write_buf(|buf| {
log::debug!("{}: write {:?} s: {:?}", item.tag(), item.fd, buf.len());
syscall!(break libc::write(item.fd, buf[..].as_ptr() as _, buf.len()))
});
if result.is_pending() {
renew_ev.writable = true;
item.flags.insert(Flags::WR);
} else {
item.flags.remove(Flags::WR);
}
} else if item.flags.contains(Flags::WR) {
renew_ev.writable = true;
}
self.inner.api.modify(item.fd, id as u32, renew_ev);
// delayed drops
if self.inner.delayd_drop.get() {
for id in self.inner.feed.borrow_mut().drain(..) {
let item = &mut streams[id as usize];
item.ref_count -= 1;
if item.ref_count == 0 {
let mut item = streams.remove(id as usize);
log::debug!(
"{}: Drop ({}), {:?}, has-io: {}",
item.tag(),
id,
item.fd,
item.io.is_some()
);
close(id, &mut item, &self.inner.api, None, true);
}
}
self.inner.delayd_drop.set(false);
}
});
}
fn error(&mut self, id: usize, err: io::Error) {
self.inner.with(|streams| {
if let Some(item) = streams.get_mut(id) {
log::debug!(
"{}: FD is failed ({}) {:?}, err: {:?}",
item.tag(),
id,
item.fd,
err
);
close(id as u32, item, &self.inner.api, Some(err), false);
}
})
}
}
impl<T> StreamOpsInner<T> {
fn with<F, R>(&self, f: F) -> R
where
F: FnOnce(&mut Slab<StreamItem<T>>) -> R,
{
let mut streams = self.streams.take().unwrap();
let result = f(&mut streams);
self.streams.set(Some(streams));
result
}
}
fn close<T>(
id: u32,
item: &mut StreamItem<T>,
api: &DriverApi,
error: Option<io::Error>,
shutdown: bool,
) -> Option<ntex_rt::JoinHandle<io::Result<i32>>> {
if let Some(io) = item.io.take() {
log::debug!("{}: Closing ({}), {:?}", item.tag(), id, item.fd);
mem::forget(io);
if let Some(err) = error {
item.context.stopped(Some(err));
}
let fd = item.fd;
api.detach(fd, id);
Some(ntex_rt::spawn_blocking(move || {
if shutdown {
let _ = syscall!(libc::shutdown(fd, libc::SHUT_RDWR));
}
syscall!(libc::close(fd))
}))
} else {
None
}
}
impl<T> StreamCtl<T> {
pub(crate) fn close(self) -> impl Future<Output = io::Result<()>> {
let id = self.id as usize;
let fut = self.inner.with(|streams| {
let item = &mut streams[id];
close(self.id, item, &self.inner.api, None, false)
});
async move {
if let Some(fut) = fut {
fut.await
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
.and_then(crate::helpers::pool_io_err)?;
}
Ok(())
}
}
pub(crate) fn with_io<F, R>(&self, f: F) -> R
where
F: FnOnce(Option<&T>) -> R,
{
self.inner
.with(|streams| f(streams[self.id as usize].io.as_ref()))
}
pub(crate) fn modify(&self, rd: bool, wr: bool) {
self.inner.with(|streams| {
let item = &mut streams[self.id as usize];
log::debug!(
"{}: Modify interest ({}), {:?} rd: {:?}, wr: {:?}",
item.tag(),
self.id,
item.fd,
rd,
wr
);
let mut event = Event::new(0, false, false).with_interrupt();
if rd {
if item.flags.contains(Flags::RD) {
event.readable = true;
} else {
let res = item.context.with_read_buf(|buf| {
let chunk = buf.chunk_mut();
let result = task::ready!(syscall!(
break libc::read(item.fd, chunk.as_mut_ptr() as _, chunk.len())
));
if let Ok(size) = result {
log::debug!(
"{}: read {:?}, s: {:?}",
item.tag(),
item.fd,
size
);
unsafe { buf.advance_mut(size) };
}
Poll::Ready(result)
});
if res.is_pending() && item.context.is_read_ready() {
event.readable = true;
item.flags.insert(Flags::RD);
}
}
}
if wr {
if item.flags.contains(Flags::WR) {
event.writable = true;
} else {
let result = item.context.with_write_buf(|buf| {
log::debug!(
"{}: Writing ({}), buf: {:?}",
item.tag(),
self.id,
buf.len()
);
syscall!(
break libc::write(item.fd, buf[..].as_ptr() as _, buf.len())
)
});
if result.is_pending() {
event.writable = true;
item.flags.insert(Flags::WR);
}
}
}
self.inner.api.modify(item.fd, self.id, event);
})
}
}
impl<T> Clone for StreamCtl<T> {
fn clone(&self) -> Self {
self.inner.with(|streams| {
streams[self.id as usize].ref_count += 1;
Self {
id: self.id,
inner: self.inner.clone(),
}
})
}
}
impl<T> Drop for StreamCtl<T> {
fn drop(&mut self) {
if let Some(mut streams) = self.inner.streams.take() {
let id = self.id as usize;
streams[id].ref_count -= 1;
if streams[id].ref_count == 0 {
let mut item = streams.remove(id);
log::debug!(
"{}: Drop io ({}), {:?}, has-io: {}",
item.tag(),
self.id,
item.fd,
item.io.is_some()
);
close(self.id, &mut item, &self.inner.api, None, true);
}
self.inner.streams.set(Some(streams));
} else {
self.inner.delayd_drop.set(true);
self.inner.feed.borrow_mut().push(self.id);
}
}
}

View file

@ -0,0 +1,101 @@
use std::{any, future::poll_fn, task::Poll};
use ntex_io::{
types, Handle, IoContext, IoStream, ReadContext, ReadStatus, WriteContext, WriteStatus,
};
use ntex_rt::spawn;
use socket2::Socket;
use super::driver::{StreamCtl, StreamOps};
impl IoStream for super::TcpStream {
fn start(self, read: ReadContext, _: WriteContext) -> Option<Box<dyn Handle>> {
let io = self.0;
let context = read.context();
let ctl = StreamOps::current().register(io, context.clone());
let ctl2 = ctl.clone();
spawn(async move { run(ctl, context).await });
Some(Box::new(HandleWrapper(ctl2)))
}
}
impl IoStream for super::UnixStream {
fn start(self, read: ReadContext, _: WriteContext) -> Option<Box<dyn Handle>> {
let io = self.0;
let context = read.context();
let ctl = StreamOps::current().register(io, context.clone());
spawn(async move { run(ctl, context).await });
None
}
}
struct HandleWrapper(StreamCtl<Socket>);
impl Handle for HandleWrapper {
fn query(&self, id: any::TypeId) -> Option<Box<dyn any::Any>> {
if id == any::TypeId::of::<types::PeerAddr>() {
let addr = self.0.with_io(|io| io.and_then(|io| io.peer_addr().ok()));
if let Some(addr) = addr.and_then(|addr| addr.as_socket()) {
return Some(Box::new(types::PeerAddr(addr)));
}
}
None
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum Status {
Shutdown,
Terminate,
}
async fn run<T>(ctl: StreamCtl<T>, context: IoContext) {
// Handle io read readiness
let st = poll_fn(|cx| {
let mut modify = false;
let mut readable = false;
let mut writable = false;
let read = match context.poll_read_ready(cx) {
Poll::Ready(ReadStatus::Ready) => {
modify = true;
readable = true;
Poll::Pending
}
Poll::Ready(ReadStatus::Terminate) => Poll::Ready(()),
Poll::Pending => {
modify = true;
Poll::Pending
}
};
let write = match context.poll_write_ready(cx) {
Poll::Ready(WriteStatus::Ready) => {
modify = true;
writable = true;
Poll::Pending
}
Poll::Ready(WriteStatus::Shutdown) => Poll::Ready(Status::Shutdown),
Poll::Ready(WriteStatus::Terminate) => Poll::Ready(Status::Terminate),
Poll::Pending => Poll::Pending,
};
if modify {
ctl.modify(readable, writable);
}
if read.is_pending() && write.is_pending() {
Poll::Pending
} else if write.is_ready() {
write
} else {
Poll::Ready(Status::Terminate)
}
})
.await;
ctl.modify(false, true);
context.shutdown(st == Status::Shutdown).await;
context.stopped(ctl.close().await.err());
}

View file

@ -0,0 +1,69 @@
use std::{io::Result, net, net::SocketAddr};
use ntex_bytes::PoolRef;
use ntex_io::Io;
use socket2::Socket;
pub(crate) mod connect;
mod driver;
mod io;
#[cfg(not(target_pointer_width = "64"))]
compile_error!("Only 64bit platforms are supported");
/// Tcp stream wrapper for neon TcpStream
struct TcpStream(socket2::Socket);
/// Tcp stream wrapper for neon UnixStream
struct UnixStream(socket2::Socket);
/// Opens a TCP connection to a remote host.
pub async fn tcp_connect(addr: SocketAddr) -> Result<Io> {
let sock = crate::helpers::connect(addr).await?;
Ok(Io::new(TcpStream(crate::helpers::prep_socket(sock)?)))
}
/// Opens a TCP connection to a remote host and use specified memory pool.
pub async fn tcp_connect_in(addr: SocketAddr, pool: PoolRef) -> Result<Io> {
let sock = crate::helpers::connect(addr).await?;
Ok(Io::with_memory_pool(
TcpStream(crate::helpers::prep_socket(sock)?),
pool,
))
}
/// Opens a unix stream connection.
pub async fn unix_connect<'a, P>(addr: P) -> Result<Io>
where
P: AsRef<std::path::Path> + 'a,
{
let sock = crate::helpers::connect_unix(addr).await?;
Ok(Io::new(UnixStream(crate::helpers::prep_socket(sock)?)))
}
/// Opens a unix stream connection and specified memory pool.
pub async fn unix_connect_in<'a, P>(addr: P, pool: PoolRef) -> Result<Io>
where
P: AsRef<std::path::Path> + 'a,
{
let sock = crate::helpers::connect_unix(addr).await?;
Ok(Io::with_memory_pool(
UnixStream(crate::helpers::prep_socket(sock)?),
pool,
))
}
/// Convert std TcpStream to TcpStream
pub fn from_tcp_stream(stream: net::TcpStream) -> Result<Io> {
stream.set_nodelay(true)?;
Ok(Io::new(TcpStream(crate::helpers::prep_socket(
Socket::from(stream),
)?)))
}
/// Convert std UnixStream to UnixStream
pub fn from_unix_stream(stream: std::os::unix::net::UnixStream) -> Result<Io> {
Ok(Io::new(UnixStream(crate::helpers::prep_socket(
Socket::from(stream),
)?)))
}

View file

@ -0,0 +1,91 @@
use std::{cell::RefCell, io, os::fd::RawFd, rc::Rc};
use io_uring::{opcode, types::Fd};
use ntex_neon::{driver::DriverApi, driver::Handler, Runtime};
use ntex_util::channel::oneshot::Sender;
use slab::Slab;
use socket2::SockAddr;
#[derive(Clone)]
pub(crate) struct ConnectOps(Rc<ConnectOpsInner>);
#[derive(Debug)]
enum Change {
Readable,
Writable,
Error(io::Error),
}
struct ConnectOpsHandler {
inner: Rc<ConnectOpsInner>,
}
type Operations = RefCell<Slab<(Box<SockAddr>, Sender<io::Result<()>>)>>;
struct ConnectOpsInner {
api: DriverApi,
ops: Operations,
}
impl ConnectOps {
pub(crate) fn current() -> Self {
Runtime::value(|rt| {
let mut inner = None;
rt.driver().register(|api| {
if !api.is_supported(opcode::Connect::CODE) {
panic!("opcode::Connect is required for io-uring support");
}
let ops = Rc::new(ConnectOpsInner {
api,
ops: RefCell::new(Slab::new()),
});
inner = Some(ops.clone());
Box::new(ConnectOpsHandler { inner: ops })
});
ConnectOps(inner.unwrap())
})
}
pub(crate) fn connect(
&self,
fd: RawFd,
addr: SockAddr,
sender: Sender<io::Result<()>>,
) -> io::Result<()> {
let addr2 = addr.clone();
let mut ops = self.0.ops.borrow_mut();
// addr must be stable, neon submits ops at the end of rt turn
let addr = Box::new(addr);
let (addr_ptr, addr_len) = (addr.as_ref().as_ptr(), addr.len());
let id = ops.insert((addr, sender));
self.0.api.submit(
id as u32,
opcode::Connect::new(Fd(fd), addr_ptr, addr_len).build(),
);
Ok(())
}
}
impl Handler for ConnectOpsHandler {
fn canceled(&mut self, user_data: usize) {
log::debug!("connect-op is canceled {:?}", user_data);
self.inner.ops.borrow_mut().remove(user_data);
}
fn completed(&mut self, user_data: usize, flags: u32, result: io::Result<i32>) {
let (addr, tx) = self.inner.ops.borrow_mut().remove(user_data);
log::debug!(
"connect-op is completed {:?} result: {:?}, addr: {:?}",
user_data,
result,
addr.as_socket()
);
let _ = tx.send(result.map(|_| ()));
}
}

View file

@ -0,0 +1,444 @@
use std::{cell::RefCell, io, mem, num::NonZeroU32, os, rc::Rc, task::Poll};
use io_uring::{opcode, squeue::Entry, types::Fd};
use ntex_neon::{driver::DriverApi, driver::Handler, Runtime};
use ntex_util::channel::oneshot;
use slab::Slab;
use ntex_bytes::{Buf, BufMut, BytesVec};
use ntex_io::IoContext;
pub(crate) struct StreamCtl<T> {
id: usize,
inner: Rc<StreamOpsInner<T>>,
}
bitflags::bitflags! {
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
struct Flags: u8 {
const RD_CANCELING = 0b0000_0001;
const RD_REISSUE = 0b0000_0010;
const WR_CANCELING = 0b0001_0000;
const WR_REISSUE = 0b0010_0000;
}
}
struct StreamItem<T> {
io: Option<T>,
fd: Fd,
context: IoContext,
ref_count: usize,
flags: Flags,
rd_op: Option<NonZeroU32>,
wr_op: Option<NonZeroU32>,
}
impl<T> StreamItem<T> {
fn tag(&self) -> &'static str {
self.context.tag()
}
}
enum Operation {
Recv {
id: usize,
buf: BytesVec,
context: IoContext,
},
Send {
id: usize,
buf: BytesVec,
context: IoContext,
},
Close {
tx: Option<oneshot::Sender<io::Result<i32>>>,
},
Nop,
}
pub(crate) struct StreamOps<T>(Rc<StreamOpsInner<T>>);
struct StreamOpsHandler<T> {
inner: Rc<StreamOpsInner<T>>,
}
struct StreamOpsInner<T> {
api: DriverApi,
feed: RefCell<Vec<usize>>,
storage: RefCell<StreamOpsStorage<T>>,
}
struct StreamOpsStorage<T> {
ops: Slab<Operation>,
streams: Slab<StreamItem<T>>,
}
impl<T: os::fd::AsRawFd + 'static> StreamOps<T> {
pub(crate) fn current() -> Self {
Runtime::value(|rt| {
let mut inner = None;
rt.driver().register(|api| {
if !api.is_supported(opcode::Recv::CODE) {
panic!("opcode::Recv is required for io-uring support");
}
if !api.is_supported(opcode::Send::CODE) {
panic!("opcode::Send is required for io-uring support");
}
if !api.is_supported(opcode::Close::CODE) {
panic!("opcode::Close is required for io-uring support");
}
let mut ops = Slab::new();
ops.insert(Operation::Nop);
let ops = Rc::new(StreamOpsInner {
api,
feed: RefCell::new(Vec::new()),
storage: RefCell::new(StreamOpsStorage {
ops,
streams: Slab::new(),
}),
});
inner = Some(ops.clone());
Box::new(StreamOpsHandler { inner: ops })
});
StreamOps(inner.unwrap())
})
}
pub(crate) fn register(&self, io: T, context: IoContext) -> StreamCtl<T> {
let item = StreamItem {
context,
fd: Fd(io.as_raw_fd()),
io: Some(io),
ref_count: 1,
rd_op: None,
wr_op: None,
flags: Flags::empty(),
};
let id = self.0.storage.borrow_mut().streams.insert(item);
StreamCtl {
id,
inner: self.0.clone(),
}
}
fn with<F, R>(&self, f: F) -> R
where
F: FnOnce(&mut StreamOpsStorage<T>) -> R,
{
f(&mut *self.0.storage.borrow_mut())
}
}
impl<T> Clone for StreamOps<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T> Handler for StreamOpsHandler<T> {
fn canceled(&mut self, user_data: usize) {
let mut storage = self.inner.storage.borrow_mut();
match storage.ops.remove(user_data) {
Operation::Recv { id, buf, context } => {
log::debug!("{}: Recv canceled {:?}", context.tag(), id);
context.release_read_buf(buf);
if let Some(item) = storage.streams.get_mut(id) {
item.rd_op.take();
item.flags.remove(Flags::RD_CANCELING);
if item.flags.contains(Flags::RD_REISSUE) {
item.flags.remove(Flags::RD_REISSUE);
let result = storage.recv(id, Some(context));
if let Some((id, op)) = result {
self.inner.api.submit(id, op);
}
}
}
}
Operation::Send { id, buf, context } => {
log::debug!("{}: Send canceled: {:?}", context.tag(), id);
context.release_write_buf(buf);
if let Some(item) = storage.streams.get_mut(id) {
item.wr_op.take();
item.flags.remove(Flags::WR_CANCELING);
if item.flags.contains(Flags::WR_REISSUE) {
item.flags.remove(Flags::WR_REISSUE);
let result = storage.send(id, Some(context));
if let Some((id, op)) = result {
self.inner.api.submit(id, op);
}
}
}
}
Operation::Nop | Operation::Close { .. } => {}
}
}
fn completed(&mut self, user_data: usize, flags: u32, result: io::Result<i32>) {
let mut storage = self.inner.storage.borrow_mut();
let op = storage.ops.remove(user_data);
match op {
Operation::Recv {
id,
mut buf,
context,
} => {
let result = result.map(|size| {
unsafe { buf.advance_mut(size as usize) };
size as usize
});
// reset op reference
if let Some(item) = storage.streams.get_mut(id) {
log::debug!(
"{}: Recv completed {:?}, res: {:?}, buf({})",
context.tag(),
item.fd,
result,
buf.remaining_mut()
);
item.rd_op.take();
}
// set read buf
let tag = context.tag();
if context.set_read_buf(result, buf).is_pending() {
if let Some((id, op)) = storage.recv(id, Some(context)) {
self.inner.api.submit(id, op);
}
} else {
log::debug!("{}: Recv to pause", tag);
}
}
Operation::Send { id, buf, context } => {
// reset op reference
let fd = if let Some(item) = storage.streams.get_mut(id) {
log::debug!(
"{}: Send completed: {:?}, res: {:?}, buf({})",
context.tag(),
item.fd,
result,
buf.len()
);
item.wr_op.take();
Some(item.fd)
} else {
None
};
// set read buf
let result = context.set_write_buf(result.map(|size| size as usize), buf);
if result.is_pending() {
log::debug!("{}: Need to send more: {:?}", context.tag(), fd);
if let Some((id, op)) = storage.send(id, Some(context)) {
self.inner.api.submit(id, op);
}
}
}
Operation::Close { tx } => {
if let Some(tx) = tx {
let _ = tx.send(result);
}
}
Operation::Nop => {}
}
// extra
for id in self.inner.feed.borrow_mut().drain(..) {
storage.streams[id].ref_count -= 1;
if storage.streams[id].ref_count == 0 {
let mut item = storage.streams.remove(id);
log::debug!("{}: Drop io ({}), {:?}", item.tag(), id, item.fd);
if let Some(io) = item.io.take() {
mem::forget(io);
let id = storage.ops.insert(Operation::Close { tx: None });
assert!(id < u32::MAX as usize);
self.inner
.api
.submit(id as u32, opcode::Close::new(item.fd).build());
}
}
}
}
}
impl<T> StreamOpsStorage<T> {
fn recv(&mut self, id: usize, context: Option<IoContext>) -> Option<(u32, Entry)> {
let item = &mut self.streams[id];
if item.rd_op.is_none() {
if let Poll::Ready(mut buf) = item.context.get_read_buf() {
log::debug!(
"{}: Recv resume ({}), {:?} rem: {:?}",
item.tag(),
id,
item.fd,
buf.remaining_mut()
);
let slice = buf.chunk_mut();
let op = opcode::Recv::new(item.fd, slice.as_mut_ptr(), slice.len() as u32)
.build();
let op_id = self.ops.insert(Operation::Recv {
id,
buf,
context: context.unwrap_or_else(|| item.context.clone()),
});
assert!(op_id < u32::MAX as usize);
item.rd_op = NonZeroU32::new(op_id as u32);
return Some((op_id as u32, op));
}
} else if item.flags.contains(Flags::RD_CANCELING) {
item.flags.insert(Flags::RD_REISSUE);
}
None
}
fn send(&mut self, id: usize, context: Option<IoContext>) -> Option<(u32, Entry)> {
let item = &mut self.streams[id];
if item.wr_op.is_none() {
if let Poll::Ready(buf) = item.context.get_write_buf() {
log::debug!(
"{}: Send resume ({}), {:?} len: {:?}",
item.tag(),
id,
item.fd,
buf.len()
);
let slice = buf.chunk();
let op =
opcode::Send::new(item.fd, slice.as_ptr(), slice.len() as u32).build();
let op_id = self.ops.insert(Operation::Send {
id,
buf,
context: context.unwrap_or_else(|| item.context.clone()),
});
assert!(op_id < u32::MAX as usize);
item.wr_op = NonZeroU32::new(op_id as u32);
return Some((op_id as u32, op));
}
} else if item.flags.contains(Flags::WR_CANCELING) {
item.flags.insert(Flags::WR_REISSUE);
}
None
}
}
impl<T> StreamCtl<T> {
pub(crate) async fn close(self) -> io::Result<()> {
let result = {
let mut storage = self.inner.storage.borrow_mut();
let (io, fd) = {
let item = &mut storage.streams[self.id];
(item.io.take(), item.fd)
};
if let Some(io) = io {
mem::forget(io);
let (tx, rx) = oneshot::channel();
let id = storage.ops.insert(Operation::Close { tx: Some(tx) });
assert!(id < u32::MAX as usize);
drop(storage);
self.inner
.api
.submit(id as u32, opcode::Close::new(fd).build());
Some(rx)
} else {
None
}
};
if let Some(rx) = result {
rx.await
.map_err(|_| io::Error::new(io::ErrorKind::Other, "gone"))
.and_then(|item| item)
.map(|_| ())
} else {
Ok(())
}
}
pub(crate) fn with_io<F, R>(&self, f: F) -> R
where
F: FnOnce(Option<&T>) -> R,
{
f(self.inner.storage.borrow().streams[self.id].io.as_ref())
}
pub(crate) fn resume_read(&self) {
let result = self.inner.storage.borrow_mut().recv(self.id, None);
if let Some((id, op)) = result {
self.inner.api.submit(id, op);
}
}
pub(crate) fn resume_write(&self) {
let result = self.inner.storage.borrow_mut().send(self.id, None);
if let Some((id, op)) = result {
self.inner.api.submit(id, op);
}
}
pub(crate) fn pause_read(&self) {
let mut storage = self.inner.storage.borrow_mut();
let item = &mut storage.streams[self.id];
if let Some(rd_op) = item.rd_op {
if !item.flags.contains(Flags::RD_CANCELING) {
log::debug!("{}: Recv to pause ({}), {:?}", item.tag(), self.id, item.fd);
item.flags.insert(Flags::RD_CANCELING);
self.inner.api.cancel(rd_op.get());
}
}
}
}
impl<T> Clone for StreamCtl<T> {
fn clone(&self) -> Self {
self.inner.storage.borrow_mut().streams[self.id].ref_count += 1;
Self {
id: self.id,
inner: self.inner.clone(),
}
}
}
impl<T> Drop for StreamCtl<T> {
fn drop(&mut self) {
if let Ok(mut storage) = self.inner.storage.try_borrow_mut() {
storage.streams[self.id].ref_count -= 1;
if storage.streams[self.id].ref_count == 0 {
let mut item = storage.streams.remove(self.id);
if let Some(io) = item.io.take() {
log::debug!("{}: Close io ({}), {:?}", item.tag(), self.id, item.fd);
mem::forget(io);
let id = storage.ops.insert(Operation::Close { tx: None });
assert!(id < u32::MAX as usize);
self.inner
.api
.submit(id as u32, opcode::Close::new(item.fd).build());
}
}
} else {
self.inner.feed.borrow_mut().push(self.id);
}
}
}

View file

@ -0,0 +1,95 @@
use std::{any, future::poll_fn, task::Poll};
use ntex_io::{
types, Handle, IoContext, IoStream, ReadContext, ReadStatus, WriteContext, WriteStatus,
};
use ntex_rt::spawn;
use socket2::Socket;
use super::driver::{StreamCtl, StreamOps};
impl IoStream for super::TcpStream {
fn start(self, read: ReadContext, _: WriteContext) -> Option<Box<dyn Handle>> {
let io = self.0;
let context = read.context();
let ctl = StreamOps::current().register(io, context.clone());
let ctl2 = ctl.clone();
spawn(async move { run(ctl, context).await });
Some(Box::new(HandleWrapper(ctl2)))
}
}
impl IoStream for super::UnixStream {
fn start(self, read: ReadContext, _: WriteContext) -> Option<Box<dyn Handle>> {
let io = self.0;
let context = read.context();
let ctl = StreamOps::current().register(io, context.clone());
spawn(async move { run(ctl, context).await });
None
}
}
struct HandleWrapper(StreamCtl<Socket>);
impl Handle for HandleWrapper {
fn query(&self, id: any::TypeId) -> Option<Box<dyn any::Any>> {
if id == any::TypeId::of::<types::PeerAddr>() {
let addr = self.0.with_io(|io| io.and_then(|io| io.peer_addr().ok()));
if let Some(addr) = addr.and_then(|addr| addr.as_socket()) {
return Some(Box::new(types::PeerAddr(addr)));
}
}
None
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum Status {
Shutdown,
Terminate,
}
async fn run<T>(ctl: StreamCtl<T>, context: IoContext) {
// Handle io readiness
let st = poll_fn(|cx| {
let read = match context.poll_read_ready(cx) {
Poll::Ready(ReadStatus::Ready) => {
ctl.resume_read();
Poll::Pending
}
Poll::Ready(ReadStatus::Terminate) => Poll::Ready(()),
Poll::Pending => {
ctl.pause_read();
Poll::Pending
}
};
let write = match context.poll_write_ready(cx) {
Poll::Ready(WriteStatus::Ready) => {
ctl.resume_write();
Poll::Pending
}
Poll::Ready(WriteStatus::Shutdown) => Poll::Ready(Status::Shutdown),
Poll::Ready(WriteStatus::Terminate) => Poll::Ready(Status::Terminate),
Poll::Pending => Poll::Pending,
};
if read.is_pending() && write.is_pending() {
Poll::Pending
} else if write.is_ready() {
write
} else {
Poll::Ready(Status::Terminate)
}
})
.await;
ctl.pause_read();
ctl.resume_write();
context.shutdown(st == Status::Shutdown).await;
let result = ctl.close().await;
context.stopped(result.err());
}

View file

@ -0,0 +1,66 @@
use std::{io::Result, net, net::SocketAddr};
use ntex_bytes::PoolRef;
use ntex_io::Io;
use socket2::Socket;
pub(crate) mod connect;
mod driver;
mod io;
/// Tcp stream wrapper for neon TcpStream
struct TcpStream(Socket);
/// Tcp stream wrapper for neon UnixStream
struct UnixStream(Socket);
/// Opens a TCP connection to a remote host.
pub async fn tcp_connect(addr: SocketAddr) -> Result<Io> {
let sock = crate::helpers::connect(addr).await?;
Ok(Io::new(TcpStream(crate::helpers::prep_socket(sock)?)))
}
/// Opens a TCP connection to a remote host and use specified memory pool.
pub async fn tcp_connect_in(addr: SocketAddr, pool: PoolRef) -> Result<Io> {
let sock = crate::helpers::connect(addr).await?;
Ok(Io::with_memory_pool(
TcpStream(crate::helpers::prep_socket(sock)?),
pool,
))
}
/// Opens a unix stream connection.
pub async fn unix_connect<'a, P>(addr: P) -> Result<Io>
where
P: AsRef<std::path::Path> + 'a,
{
let sock = crate::helpers::connect_unix(addr).await?;
Ok(Io::new(UnixStream(crate::helpers::prep_socket(sock)?)))
}
/// Opens a unix stream connection and specified memory pool.
pub async fn unix_connect_in<'a, P>(addr: P, pool: PoolRef) -> Result<Io>
where
P: AsRef<std::path::Path> + 'a,
{
let sock = crate::helpers::connect_unix(addr).await?;
Ok(Io::with_memory_pool(
UnixStream(crate::helpers::prep_socket(sock)?),
pool,
))
}
/// Convert std TcpStream to tokio's TcpStream
pub fn from_tcp_stream(stream: net::TcpStream) -> Result<Io> {
stream.set_nodelay(true)?;
Ok(Io::new(TcpStream(crate::helpers::prep_socket(
Socket::from(stream),
)?)))
}
/// Convert std UnixStream to tokio's UnixStream
pub fn from_unix_stream(stream: std::os::unix::net::UnixStream) -> Result<Io> {
Ok(Io::new(UnixStream(crate::helpers::prep_socket(
Socket::from(stream),
)?)))
}

View file

@ -1,9 +1,4 @@
#![deny( #![deny(warnings, unreachable_pub, missing_debug_implementations)]
rust_2018_idioms,
warnings,
unreachable_pub,
missing_debug_implementations
)]
#![warn(nonstandard_style, future_incompatible)] #![warn(nonstandard_style, future_incompatible)]
//! Resource path matching library. //! Resource path matching library.
@ -42,7 +37,7 @@ impl ResourcePath for String {
} }
} }
impl<'a> ResourcePath for &'a str { impl ResourcePath for &str {
fn path(&self) -> &str { fn path(&self) -> &str {
self self
} }
@ -54,7 +49,7 @@ impl ResourcePath for ntex_bytes::ByteString {
} }
} }
impl<'a, T: ResourcePath> ResourcePath for &'a T { impl<T: ResourcePath> ResourcePath for &T {
fn path(&self) -> &str { fn path(&self) -> &str {
(*self).path() (*self).path()
} }
@ -71,13 +66,13 @@ impl IntoPattern for String {
} }
} }
impl<'a> IntoPattern for &'a String { impl IntoPattern for &String {
fn patterns(&self) -> Vec<String> { fn patterns(&self) -> Vec<String> {
vec![self.as_str().to_string()] vec![self.as_str().to_string()]
} }
} }
impl<'a> IntoPattern for &'a str { impl IntoPattern for &str {
fn patterns(&self) -> Vec<String> { fn patterns(&self) -> Vec<String> {
vec![(*self).to_string()] vec![(*self).to_string()]
} }

View file

@ -63,5 +63,5 @@ fn from_hex(v: u8) -> Option<u8> {
#[inline] #[inline]
fn restore_ch(d1: u8, d2: u8) -> Option<u8> { fn restore_ch(d1: u8, d2: u8) -> Option<u8> {
from_hex(d1).and_then(|d1| from_hex(d2).map(move |d2| d1 << 4 | d2)) from_hex(d1).and_then(|d1| from_hex(d2).map(move |d2| (d1 << 4) | d2))
} }

View file

@ -1,5 +1,37 @@
# Changes # Changes
## [0.4.29] - 2025-03-26
* Add Arbiter::get_value() helper method
## [0.4.27] - 2025-03-14
* Add srbiters pings ttl
* Retrieves a list of all arbiters in the system
* Add "neon" runtime support
* Drop glommio support
* Drop async-std support
## [0.4.26] - 2025-03-12
* Add Arbiter::spawn_with()
## [0.4.25] - 2025-03-11
* Adds Send bound to arbiter exec (#514)
## [0.4.24] - 2025-01-03
* Relax runtime requirements
## [0.4.23] - 2024-12-10
* Remove Unpin requirements for Arbiter::spawn()
## [0.4.22] - 2024-12-01 ## [0.4.22] - 2024-12-01
* Depend on individual compio packages * Depend on individual compio packages

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ntex-rt" name = "ntex-rt"
version = "0.4.22" version = "0.4.29"
authors = ["ntex contributors <team@ntex.rs>"] authors = ["ntex contributors <team@ntex.rs>"]
description = "ntex runtime" description = "ntex runtime"
keywords = ["network", "framework", "async", "futures"] keywords = ["network", "framework", "async", "futures"]
@ -20,25 +20,21 @@ path = "src/lib.rs"
[features] [features]
default = [] default = []
# glommio support
glommio = ["glomm-io", "futures-channel"]
# tokio support # tokio support
tokio = ["tok-io"] tokio = ["tok-io"]
# compio support # compio support
compio = ["compio-driver", "compio-runtime"] compio = ["compio-driver", "compio-runtime"]
# async-std support # neon runtime
async-std = ["async_std/unstable"] neon = ["ntex-neon"]
[dependencies] [dependencies]
async-channel = "2" async-channel = "2"
futures-core = "0.3" futures-timer = "3.0"
log = "0.4"
oneshot = "0.1" oneshot = "0.1"
log = "0.4"
async_std = { version = "1", package = "async-std", optional = true }
compio-driver = { version = "0.6", optional = true } compio-driver = { version = "0.6", optional = true }
compio-runtime = { version = "0.6", optional = true } compio-runtime = { version = "0.6", optional = true }
tok-io = { version = "1", package = "tokio", default-features = false, features = [ tok-io = { version = "1", package = "tokio", default-features = false, features = [
@ -46,6 +42,4 @@ tok-io = { version = "1", package = "tokio", default-features = false, features
"net", "net",
], optional = true } ], optional = true }
[target.'cfg(target_os = "linux")'.dependencies] ntex-neon = { version = "0.1.14", optional = true }
glomm-io = { version = "0.9", package = "glommio", optional = true }
futures-channel = { version = "0.3", optional = true }

View file

@ -1,33 +1,21 @@
use std::{collections::HashSet, env}; use std::{collections::HashSet, env};
fn main() { fn main() {
let mut clippy = false;
let mut features = HashSet::<&'static str>::default(); let mut features = HashSet::<&'static str>::default();
for (key, val) in env::vars() { for (key, _) in env::vars() {
let _ = match key.as_ref() { let _ = match key.as_ref() {
"CARGO_FEATURE_COMPIO" => features.insert("compio"), "CARGO_FEATURE_COMPIO" => features.insert("compio"),
"CARGO_FEATURE_TOKIO" => features.insert("tokio"), "CARGO_FEATURE_TOKIO" => features.insert("tokio"),
"CARGO_FEATURE_GLOMMIO" => features.insert("glommio"), "CARGO_FEATURE_NEON" => features.insert("neon"),
"CARGO_FEATURE_ASYNC_STD" => features.insert("async-std"),
"CARGO_CFG_FEATURE" => {
if val.contains("cargo-clippy") {
clippy = true;
}
false
}
_ => false, _ => false,
}; };
} }
if !clippy { if features.len() > 1 {
if features.is_empty() { panic!(
panic!("Runtime must be selected '--feature=ntex/$runtime', available options are \"compio\", \"tokio\", \"async-std\", \"glommio\""); "Only one runtime feature could be selected, current selection {:?}",
} else if features.len() > 1 { features
panic!( );
"Only one runtime feature could be selected, current selection {:?}",
features
);
}
} }
} }

View file

@ -1,27 +1,22 @@
#![allow(clippy::let_underscore_future)] #![allow(clippy::let_underscore_future)]
use std::any::{Any, TypeId}; use std::any::{Any, TypeId};
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc};
use std::task::{ready, Context, Poll};
use std::{cell::RefCell, collections::HashMap, fmt, future::Future, pin::Pin, thread}; use std::{cell::RefCell, collections::HashMap, fmt, future::Future, pin::Pin, thread};
use async_channel::{unbounded, Receiver, Sender}; use async_channel::{unbounded, Receiver, Sender};
use futures_core::stream::Stream;
use crate::system::System; use crate::system::{FnExec, Id, System, SystemCommand};
thread_local!( thread_local!(
static ADDR: RefCell<Option<Arbiter>> = const { RefCell::new(None) }; static ADDR: RefCell<Option<Arbiter>> = const { RefCell::new(None) };
static STORAGE: RefCell<HashMap<TypeId, Box<dyn Any>>> = RefCell::new(HashMap::new()); static STORAGE: RefCell<HashMap<TypeId, Box<dyn Any>>> = RefCell::new(HashMap::new());
); );
type ServerCommandRx = Pin<Box<dyn Stream<Item = SystemCommand>>>;
type ArbiterCommandRx = Pin<Box<dyn Stream<Item = ArbiterCommand>>>;
pub(super) static COUNT: AtomicUsize = AtomicUsize::new(0); pub(super) static COUNT: AtomicUsize = AtomicUsize::new(0);
pub(super) enum ArbiterCommand { pub(super) enum ArbiterCommand {
Stop, Stop,
Execute(Box<dyn Future<Output = ()> + Unpin + Send>), Execute(Pin<Box<dyn Future<Output = ()> + Send>>),
ExecuteFn(Box<dyn FnExec>), ExecuteFn(Box<dyn FnExec>),
} }
@ -31,13 +26,16 @@ pub(super) enum ArbiterCommand {
/// When an Arbiter is created, it spawns a new OS thread, and /// When an Arbiter is created, it spawns a new OS thread, and
/// hosts an event loop. Some Arbiter functions execute on the current thread. /// hosts an event loop. Some Arbiter functions execute on the current thread.
pub struct Arbiter { pub struct Arbiter {
id: usize,
pub(crate) sys_id: usize,
name: Arc<String>,
sender: Sender<ArbiterCommand>, sender: Sender<ArbiterCommand>,
thread_handle: Option<thread::JoinHandle<()>>, thread_handle: Option<thread::JoinHandle<()>>,
} }
impl fmt::Debug for Arbiter { impl fmt::Debug for Arbiter {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Arbiter") write!(f, "Arbiter({:?})", self.name.as_ref())
} }
} }
@ -49,26 +47,20 @@ impl Default for Arbiter {
impl Clone for Arbiter { impl Clone for Arbiter {
fn clone(&self) -> Self { fn clone(&self) -> Self {
Self::with_sender(self.sender.clone()) Self::with_sender(self.sys_id, self.id, self.name.clone(), self.sender.clone())
} }
} }
impl Arbiter { impl Arbiter {
#[allow(clippy::borrowed_box)] #[allow(clippy::borrowed_box)]
pub(super) fn new_system() -> (Self, ArbiterController) { pub(super) fn new_system(name: String) -> (Self, ArbiterController) {
let (tx, rx) = unbounded(); let (tx, rx) = unbounded();
let arb = Arbiter::with_sender(tx); let arb = Arbiter::with_sender(0, 0, Arc::new(name), tx);
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone())); ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
STORAGE.with(|cell| cell.borrow_mut().clear()); STORAGE.with(|cell| cell.borrow_mut().clear());
( (arb, ArbiterController { rx, stop: None })
arb,
ArbiterController {
stop: None,
rx: Box::pin(rx),
},
)
} }
/// Returns the current thread's arbiter's address. If no Arbiter is present, then this /// Returns the current thread's arbiter's address. If no Arbiter is present, then this
@ -85,27 +77,37 @@ impl Arbiter {
let _ = self.sender.try_send(ArbiterCommand::Stop); let _ = self.sender.try_send(ArbiterCommand::Stop);
} }
/// Spawn new thread and run event loop in spawned thread. /// Spawn new thread and run runtime in spawned thread.
/// Returns address of newly created arbiter. /// Returns address of newly created arbiter.
pub fn new() -> Arbiter { pub fn new() -> Arbiter {
let name = format!("ntex-rt:worker:{}", COUNT.load(Ordering::Relaxed) + 1);
Arbiter::with_name(name)
}
/// Spawn new thread and run runtime in spawned thread.
/// Returns address of newly created arbiter.
pub fn with_name(name: String) -> Arbiter {
let id = COUNT.fetch_add(1, Ordering::Relaxed); let id = COUNT.fetch_add(1, Ordering::Relaxed);
let name = format!("ntex-rt:worker:{}", id);
let sys = System::current(); let sys = System::current();
let name2 = Arc::new(name.clone());
let config = sys.config(); let config = sys.config();
let (arb_tx, arb_rx) = unbounded(); let (arb_tx, arb_rx) = unbounded();
let arb_tx2 = arb_tx.clone(); let arb_tx2 = arb_tx.clone();
let builder = if sys.config().stack_size > 0 { let builder = if sys.config().stack_size > 0 {
thread::Builder::new() thread::Builder::new()
.name(name.clone()) .name(name)
.stack_size(sys.config().stack_size) .stack_size(sys.config().stack_size)
} else { } else {
thread::Builder::new().name(name.clone()) thread::Builder::new().name(name)
}; };
let name = name2.clone();
let sys_id = sys.id();
let handle = builder let handle = builder
.spawn(move || { .spawn(move || {
let arb = Arbiter::with_sender(arb_tx); let arb = Arbiter::with_sender(sys_id.0, id, name2, arb_tx);
let (stop, stop_rx) = oneshot::channel(); let (stop, stop_rx) = oneshot::channel();
STORAGE.with(|cell| cell.borrow_mut().clear()); STORAGE.with(|cell| cell.borrow_mut().clear());
@ -114,16 +116,19 @@ impl Arbiter {
config.block_on(async move { config.block_on(async move {
// start arbiter controller // start arbiter controller
let _ = crate::spawn(ArbiterController { let _ = crate::spawn(
stop: Some(stop), ArbiterController {
rx: Box::pin(arb_rx), stop: Some(stop),
}); rx: arb_rx,
}
.run(),
);
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone())); ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
// register arbiter // register arbiter
let _ = System::current() let _ = System::current()
.sys() .sys()
.try_send(SystemCommand::RegisterArbiter(id, arb)); .try_send(SystemCommand::RegisterArbiter(Id(id), arb));
// run loop // run loop
let _ = stop_rx.await; let _ = stop_rx.await;
@ -132,32 +137,84 @@ impl Arbiter {
// unregister arbiter // unregister arbiter
let _ = System::current() let _ = System::current()
.sys() .sys()
.try_send(SystemCommand::UnregisterArbiter(id)); .try_send(SystemCommand::UnregisterArbiter(Id(id)));
}) })
.unwrap_or_else(|err| { .unwrap_or_else(|err| {
panic!("Cannot spawn an arbiter's thread {:?}: {:?}", &name, err) panic!("Cannot spawn an arbiter's thread {:?}: {:?}", &name, err)
}); });
Arbiter { Arbiter {
id,
name,
sys_id: sys_id.0,
sender: arb_tx2, sender: arb_tx2,
thread_handle: Some(handle), thread_handle: Some(handle),
} }
} }
fn with_sender(
sys_id: usize,
id: usize,
name: Arc<String>,
sender: Sender<ArbiterCommand>,
) -> Self {
Self {
id,
sys_id,
name,
sender,
thread_handle: None,
}
}
/// Id of the arbiter
pub fn id(&self) -> Id {
Id(self.id)
}
/// Name of the arbiter
pub fn name(&self) -> &str {
self.name.as_ref()
}
/// Send a future to the Arbiter's thread, and spawn it. /// Send a future to the Arbiter's thread, and spawn it.
pub fn spawn<F>(&self, future: F) pub fn spawn<F>(&self, future: F)
where where
F: Future<Output = ()> + Send + Unpin + 'static, F: Future<Output = ()> + Send + 'static,
{ {
let _ = self let _ = self
.sender .sender
.try_send(ArbiterCommand::Execute(Box::new(future))); .try_send(ArbiterCommand::Execute(Box::pin(future)));
} }
#[rustfmt::skip]
/// Send a function to the Arbiter's thread and spawns it's resulting future.
/// This can be used to spawn non-send futures on the arbiter thread.
pub fn spawn_with<F, R, O>(
&self,
f: F
) -> impl Future<Output = Result<O, oneshot::RecvError>> + Send + 'static
where
F: FnOnce() -> R + Send + 'static,
R: Future<Output = O> + 'static,
O: Send + 'static,
{
let (tx, rx) = oneshot::channel();
let _ = self
.sender
.try_send(ArbiterCommand::ExecuteFn(Box::new(move || {
crate::spawn(async move {
let _ = tx.send(f().await);
});
})));
rx
}
#[rustfmt::skip]
/// Send a function to the Arbiter's thread. This function will be executed asynchronously. /// Send a function to the Arbiter's thread. This function will be executed asynchronously.
/// A future is created, and when resolved will contain the result of the function sent /// A future is created, and when resolved will contain the result of the function sent
/// to the Arbiters thread. /// to the Arbiters thread.
pub fn exec<F, R>(&self, f: F) -> impl Future<Output = Result<R, oneshot::RecvError>> pub fn exec<F, R>(&self, f: F) -> impl Future<Output = Result<R, oneshot::RecvError>> + Send + 'static
where where
F: FnOnce() -> R + Send + 'static, F: FnOnce() -> R + Send + 'static,
R: Send + 'static, R: Send + 'static,
@ -229,11 +286,23 @@ impl Arbiter {
}) })
} }
fn with_sender(sender: Sender<ArbiterCommand>) -> Self { /// Get a type previously inserted to this runtime or create new one.
Self { pub fn get_value<T, F>(f: F) -> T
sender, where
thread_handle: None, T: Clone + 'static,
} F: FnOnce() -> T,
{
STORAGE.with(move |cell| {
let mut st = cell.borrow_mut();
if let Some(boxed) = st.get(&TypeId::of::<T>()) {
if let Some(val) = (&**boxed as &(dyn Any + 'static)).downcast_ref::<T>() {
return val.clone();
}
}
let val = f();
st.insert(TypeId::of::<T>(), Box::new(val.clone()));
val
})
} }
/// Wait for the event loop to stop by joining the underlying thread (if have Some). /// Wait for the event loop to stop by joining the underlying thread (if have Some).
@ -246,9 +315,17 @@ impl Arbiter {
} }
} }
impl Eq for Arbiter {}
impl PartialEq for Arbiter {
fn eq(&self, other: &Self) -> bool {
self.id == other.id && self.sys_id == other.sys_id
}
}
pub(crate) struct ArbiterController { pub(crate) struct ArbiterController {
stop: Option<oneshot::Sender<i32>>, stop: Option<oneshot::Sender<i32>>,
rx: ArbiterCommandRx, rx: Receiver<ArbiterCommand>,
} }
impl Drop for ArbiterController { impl Drop for ArbiterController {
@ -264,118 +341,28 @@ impl Drop for ArbiterController {
} }
} }
impl Future for ArbiterController { impl ArbiterController {
type Output = (); pub(super) async fn run(mut self) {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop { loop {
match Pin::new(&mut self.rx).poll_next(cx) { match self.rx.recv().await {
Poll::Ready(None) => return Poll::Ready(()), Ok(ArbiterCommand::Stop) => {
Poll::Ready(Some(item)) => match item { if let Some(stop) = self.stop.take() {
ArbiterCommand::Stop => { let _ = stop.send(0);
if let Some(stop) = self.stop.take() { };
let _ = stop.send(0); break;
};
return Poll::Ready(());
}
ArbiterCommand::Execute(fut) => {
let _ = crate::spawn(fut);
}
ArbiterCommand::ExecuteFn(f) => {
f.call_box();
}
},
Poll::Pending => return Poll::Pending,
}
}
}
}
#[derive(Debug)]
pub(super) enum SystemCommand {
Exit(i32),
RegisterArbiter(usize, Arbiter),
UnregisterArbiter(usize),
}
pub(super) struct SystemArbiter {
stop: Option<oneshot::Sender<i32>>,
commands: ServerCommandRx,
arbiters: HashMap<usize, Arbiter>,
}
impl SystemArbiter {
pub(super) fn new(
stop: oneshot::Sender<i32>,
commands: Receiver<SystemCommand>,
) -> Self {
SystemArbiter {
commands: Box::pin(commands),
stop: Some(stop),
arbiters: HashMap::new(),
}
}
}
impl fmt::Debug for SystemArbiter {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SystemArbiter")
.field("arbiters", &self.arbiters)
.finish()
}
}
impl Future for SystemArbiter {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
let cmd = ready!(Pin::new(&mut self.commands).poll_next(cx));
log::debug!("Received system command: {:?}", cmd);
match cmd {
None => {
log::debug!("System stopped");
return Poll::Ready(());
} }
Some(cmd) => match cmd { Ok(ArbiterCommand::Execute(fut)) => {
SystemCommand::Exit(code) => { let _ = crate::spawn(fut);
log::debug!("Stopping system with {} code", code); }
Ok(ArbiterCommand::ExecuteFn(f)) => {
// stop arbiters f.call_box();
for arb in self.arbiters.values() { }
arb.stop(); Err(_) => break,
}
// stop event loop
if let Some(stop) = self.stop.take() {
let _ = stop.send(code);
}
}
SystemCommand::RegisterArbiter(name, hnd) => {
self.arbiters.insert(name, hnd);
}
SystemCommand::UnregisterArbiter(name) => {
self.arbiters.remove(&name);
}
},
} }
} }
} }
} }
pub(super) trait FnExec: Send + 'static {
fn call_box(self: Box<Self>);
}
impl<F> FnExec for F
where
F: FnOnce() + Send + 'static,
{
#[allow(clippy::boxed_local)]
fn call_box(self: Box<Self>) {
(*self)()
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -387,6 +374,7 @@ mod tests {
assert!(Arbiter::get_item::<&'static str, _, _>(|s| *s == "test")); assert!(Arbiter::get_item::<&'static str, _, _>(|s| *s == "test"));
assert!(Arbiter::get_mut_item::<&'static str, _, _>(|s| *s == "test")); assert!(Arbiter::get_mut_item::<&'static str, _, _>(|s| *s == "test"));
assert!(Arbiter::contains_item::<&'static str>()); assert!(Arbiter::contains_item::<&'static str>());
assert!(Arbiter::get_value(|| 64u64) == 64);
assert!(format!("{:?}", Arbiter::current()).contains("Arbiter")); assert!(format!("{:?}", Arbiter::current()).contains("Arbiter"));
} }
} }

View file

@ -1,9 +1,9 @@
use std::{future::Future, io, pin::Pin, sync::Arc}; use std::{future::Future, io, marker::PhantomData, pin::Pin, rc::Rc, sync::Arc};
use async_channel::unbounded; use async_channel::unbounded;
use crate::arbiter::{Arbiter, ArbiterController, SystemArbiter}; use crate::arbiter::{Arbiter, ArbiterController};
use crate::{system::SystemConfig, System}; use crate::system::{System, SystemCommand, SystemConfig, SystemSupport};
/// Builder struct for a ntex runtime. /// Builder struct for a ntex runtime.
/// ///
@ -17,6 +17,8 @@ pub struct Builder {
stop_on_panic: bool, stop_on_panic: bool,
/// New thread stack size /// New thread stack size
stack_size: usize, stack_size: usize,
/// Arbiters ping interval
ping_interval: usize,
/// Block on fn /// Block on fn
block_on: Option<Arc<dyn Fn(Pin<Box<dyn Future<Output = ()>>>) + Sync + Send>>, block_on: Option<Arc<dyn Fn(Pin<Box<dyn Future<Output = ()>>>) + Sync + Send>>,
} }
@ -28,6 +30,7 @@ impl Builder {
stop_on_panic: false, stop_on_panic: false,
stack_size: 0, stack_size: 0,
block_on: None, block_on: None,
ping_interval: 1000,
} }
} }
@ -52,6 +55,15 @@ impl Builder {
self self
} }
/// Sets ping interval for spawned arbiters.
///
/// Interval is in milliseconds. By default 5000 milliseconds is set.
/// To disable pings set value to zero.
pub fn ping_interval(mut self, interval: usize) -> Self {
self.ping_interval = interval;
self
}
/// Use custom block_on function /// Use custom block_on function
pub fn block_on<F>(mut self, block_on: F) -> Self pub fn block_on<F>(mut self, block_on: F) -> Self
where where
@ -74,18 +86,20 @@ impl Builder {
stop_on_panic: self.stop_on_panic, stop_on_panic: self.stop_on_panic,
}; };
let (arb, arb_controller) = Arbiter::new_system(); let (arb, controller) = Arbiter::new_system(self.name.clone());
let system = System::construct(sys_sender, arb, config); let _ = sys_sender.try_send(SystemCommand::RegisterArbiter(arb.id(), arb.clone()));
let system = System::construct(sys_sender, arb.clone(), config);
// system arbiter // system arbiter
let arb = SystemArbiter::new(stop_tx, sys_receiver); let support = SystemSupport::new(stop_tx, sys_receiver, self.ping_interval);
// init system arbiter and run configuration method // init system arbiter and run configuration method
SystemRunner { SystemRunner {
stop, stop,
arb, support,
arb_controller, controller,
system, system,
_t: PhantomData,
} }
} }
} }
@ -94,9 +108,10 @@ impl Builder {
#[must_use = "SystemRunner must be run"] #[must_use = "SystemRunner must be run"]
pub struct SystemRunner { pub struct SystemRunner {
stop: oneshot::Receiver<i32>, stop: oneshot::Receiver<i32>,
arb: SystemArbiter, support: SystemSupport,
arb_controller: ArbiterController, controller: ArbiterController,
system: System, system: System,
_t: PhantomData<Rc<()>>,
} }
impl SystemRunner { impl SystemRunner {
@ -113,15 +128,14 @@ impl SystemRunner {
/// This function will start event loop and will finish once the /// This function will start event loop and will finish once the
/// `System::stop()` function is called. /// `System::stop()` function is called.
#[inline]
pub fn run<F>(self, f: F) -> io::Result<()> pub fn run<F>(self, f: F) -> io::Result<()>
where where
F: FnOnce() -> io::Result<()> + 'static, F: FnOnce() -> io::Result<()> + 'static,
{ {
let SystemRunner { let SystemRunner {
controller,
stop, stop,
arb, support,
arb_controller,
system, system,
.. ..
} = self; } = self;
@ -130,8 +144,8 @@ impl SystemRunner {
system.config().block_on(async move { system.config().block_on(async move {
f()?; f()?;
let _ = crate::spawn(arb); let _ = crate::spawn(support.run());
let _ = crate::spawn(arb_controller); let _ = crate::spawn(controller.run());
match stop.await { match stop.await {
Ok(code) => { Ok(code) => {
if code != 0 { if code != 0 {
@ -149,22 +163,21 @@ impl SystemRunner {
} }
/// Execute a future and wait for result. /// Execute a future and wait for result.
#[inline]
pub fn block_on<F, R>(self, fut: F) -> R pub fn block_on<F, R>(self, fut: F) -> R
where where
F: Future<Output = R> + 'static, F: Future<Output = R> + 'static,
R: 'static, R: 'static,
{ {
let SystemRunner { let SystemRunner {
arb, controller,
arb_controller, support,
system, system,
.. ..
} = self; } = self;
system.config().block_on(async move { system.config().block_on(async move {
let _ = crate::spawn(arb); let _ = crate::spawn(support.run());
let _ = crate::spawn(arb_controller); let _ = crate::spawn(controller.run());
fut.await fut.await
}) })
} }
@ -177,16 +190,16 @@ impl SystemRunner {
R: 'static, R: 'static,
{ {
let SystemRunner { let SystemRunner {
arb, controller,
arb_controller, support,
.. ..
} = self; } = self;
// run loop // run loop
tok_io::task::LocalSet::new() tok_io::task::LocalSet::new()
.run_until(async move { .run_until(async move {
let _ = crate::spawn(arb); let _ = crate::spawn(support.run());
let _ = crate::spawn(arb_controller); let _ = crate::spawn(controller.run());
fut.await fut.await
}) })
.await .await
@ -242,6 +255,7 @@ mod tests {
thread::spawn(move || { thread::spawn(move || {
let runner = crate::System::build() let runner = crate::System::build()
.stop_on_panic(true) .stop_on_panic(true)
.ping_interval(25)
.block_on(|fut| { .block_on(|fut| {
let rt = tok_io::runtime::Builder::new_current_thread() let rt = tok_io::runtime::Builder::new_current_thread()
.enable_all() .enable_all()
@ -270,6 +284,18 @@ mod tests {
.unwrap(); .unwrap();
assert_eq!(id, id2); assert_eq!(id, id2);
let (tx, rx) = mpsc::channel();
sys.arbiter().spawn(async move {
futures_timer::Delay::new(std::time::Duration::from_millis(100)).await;
let recs = System::list_arbiter_pings(Arbiter::current().id(), |recs| {
recs.unwrap().clone()
});
let _ = tx.send(recs);
});
let recs = rx.recv().unwrap();
assert!(!recs.is_empty());
sys.stop(); sys.stop();
} }
} }

View file

@ -8,7 +8,7 @@ mod system;
pub use self::arbiter::Arbiter; pub use self::arbiter::Arbiter;
pub use self::builder::{Builder, SystemRunner}; pub use self::builder::{Builder, SystemRunner};
pub use self::system::System; pub use self::system::{Id, PingRecord, System};
thread_local! { thread_local! {
static CB: RefCell<(TBefore, TEnter, TExit, TAfter)> = RefCell::new(( static CB: RefCell<(TBefore, TEnter, TExit, TAfter)> = RefCell::new((
@ -112,6 +112,8 @@ mod tokio {
/// ///
/// This function panics if ntex system is not running. /// This function panics if ntex system is not running.
#[inline] #[inline]
#[doc(hidden)]
#[deprecated]
pub fn spawn_fn<F, R>(f: F) -> tok_io::task::JoinHandle<R::Output> pub fn spawn_fn<F, R>(f: F) -> tok_io::task::JoinHandle<R::Output>
where where
F: FnOnce() -> R + 'static, F: FnOnce() -> R + 'static,
@ -196,6 +198,8 @@ mod compio {
/// ///
/// This function panics if ntex system is not running. /// This function panics if ntex system is not running.
#[inline] #[inline]
#[doc(hidden)]
#[deprecated]
pub fn spawn_fn<F, R>(f: F) -> JoinHandle<R::Output> pub fn spawn_fn<F, R>(f: F) -> JoinHandle<R::Output>
where where
F: FnOnce() -> R + 'static, F: FnOnce() -> R + 'static,
@ -248,15 +252,38 @@ mod compio {
} }
#[allow(dead_code)] #[allow(dead_code)]
#[cfg(feature = "async-std")] #[cfg(feature = "neon")]
mod asyncstd { mod neon {
use std::future::{poll_fn, Future}; use std::task::{ready, Context, Poll};
use std::{fmt, pin::Pin, task::ready, task::Context, task::Poll}; use std::{fmt, future::poll_fn, future::Future, pin::Pin};
use ntex_neon::Runtime;
/// Runs the provided future, blocking the current thread until the future /// Runs the provided future, blocking the current thread until the future
/// completes. /// completes.
pub fn block_on<F: Future<Output = ()>>(fut: F) { pub fn block_on<F: Future<Output = ()>>(fut: F) {
async_std::task::block_on(fut); let rt = Runtime::new().unwrap();
log::info!(
"Starting neon runtime, driver {:?}",
rt.driver().tp().name()
);
rt.block_on(fut);
}
/// Spawns a blocking task.
///
/// The task will be spawned onto a thread pool specifically dedicated
/// to blocking tasks. This is useful to prevent long-running synchronous
/// operations from blocking the main futures executor.
pub fn spawn_blocking<F, T>(f: F) -> JoinHandle<T>
where
F: FnOnce() -> T + Send + Sync + 'static,
T: Send + 'static,
{
JoinHandle {
fut: Some(ntex_neon::spawn_blocking(f)),
}
} }
/// Spawn a future on the current thread. This does not create a new Arbiter /// Spawn a future on the current thread. This does not create a new Arbiter
@ -267,29 +294,29 @@ mod asyncstd {
/// ///
/// This function panics if ntex system is not running. /// This function panics if ntex system is not running.
#[inline] #[inline]
pub fn spawn<F>(mut f: F) -> JoinHandle<F::Output> pub fn spawn<F>(f: F) -> Task<F::Output>
where where
F: Future + 'static, F: Future + 'static,
{ {
let ptr = crate::CB.with(|cb| (cb.borrow().0)()); let ptr = crate::CB.with(|cb| (cb.borrow().0)());
JoinHandle { let task = ntex_neon::spawn(async move {
fut: async_std::task::spawn_local(async move { if let Some(ptr) = ptr {
if let Some(ptr) = ptr { let mut f = std::pin::pin!(f);
let mut f = unsafe { Pin::new_unchecked(&mut f) }; let result = poll_fn(|ctx| {
let result = poll_fn(|ctx| { let new_ptr = crate::CB.with(|cb| (cb.borrow().1)(ptr));
let new_ptr = crate::CB.with(|cb| (cb.borrow().1)(ptr)); let result = f.as_mut().poll(ctx);
let result = f.as_mut().poll(ctx); crate::CB.with(|cb| (cb.borrow().2)(new_ptr));
crate::CB.with(|cb| (cb.borrow().2)(new_ptr));
result
})
.await;
crate::CB.with(|cb| (cb.borrow().3)(ptr));
result result
} else { })
f.await .await;
} crate::CB.with(|cb| (cb.borrow().3)(ptr));
}), result
} } else {
f.await
}
});
Task { task: Some(task) }
} }
/// Executes a future on the current thread. This does not create a new Arbiter /// Executes a future on the current thread. This does not create a new Arbiter
@ -300,7 +327,9 @@ mod asyncstd {
/// ///
/// This function panics if ntex system is not running. /// This function panics if ntex system is not running.
#[inline] #[inline]
pub fn spawn_fn<F, R>(f: F) -> JoinHandle<R::Output> #[doc(hidden)]
#[deprecated]
pub fn spawn_fn<F, R>(f: F) -> Task<R::Output>
where where
F: FnOnce() -> R + 'static, F: FnOnce() -> R + 'static,
R: Future + 'static, R: Future + 'static,
@ -308,18 +337,32 @@ mod asyncstd {
spawn(async move { f().await }) spawn(async move { f().await })
} }
/// Spawns a blocking task. /// A spawned task.
/// pub struct Task<T> {
/// The task will be spawned onto a thread pool specifically dedicated task: Option<ntex_neon::Task<T>>,
/// to blocking tasks. This is useful to prevent long-running synchronous }
/// operations from blocking the main futures executor.
pub fn spawn_blocking<F, T>(f: F) -> JoinHandle<T> impl<T> Task<T> {
where pub fn is_finished(&self) -> bool {
F: FnOnce() -> T + Send + 'static, if let Some(hnd) = &self.task {
T: Send + 'static, hnd.is_finished()
{ } else {
JoinHandle { true
fut: async_std::task::spawn_blocking(f), }
}
}
impl<T> Drop for Task<T> {
fn drop(&mut self) {
self.task.take().unwrap().detach();
}
}
impl<T> Future for Task<T> {
type Output = Result<T, JoinError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(Ok(ready!(Pin::new(self.task.as_mut().unwrap()).poll(cx))))
} }
} }
@ -335,128 +378,24 @@ mod asyncstd {
impl std::error::Error for JoinError {} impl std::error::Error for JoinError {}
pub struct JoinHandle<T> { pub struct JoinHandle<T> {
fut: async_std::task::JoinHandle<T>, fut: Option<ntex_neon::JoinHandle<T>>,
}
impl<T> JoinHandle<T> {
pub fn is_finished(&self) -> bool {
self.fut.is_none()
}
} }
impl<T> Future for JoinHandle<T> { impl<T> Future for JoinHandle<T> {
type Output = Result<T, JoinError>; type Output = Result<T, JoinError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(Ok(ready!(Pin::new(&mut self.fut).poll(cx)))) Poll::Ready(
} ready!(Pin::new(self.fut.as_mut().unwrap()).poll(cx))
} .map_err(|_| JoinError)
} .and_then(|result| result.map_err(|_| JoinError)),
)
#[allow(dead_code)]
#[cfg(all(feature = "glommio", target_os = "linux"))]
mod glommio {
use std::future::{poll_fn, Future};
use std::{pin::Pin, task::Context, task::Poll};
use futures_channel::oneshot::Canceled;
use glomm_io::task;
pub type JoinError = Canceled;
/// Runs the provided future, blocking the current thread until the future
/// completes.
pub fn block_on<F: Future<Output = ()>>(fut: F) {
let ex = glomm_io::LocalExecutor::default();
ex.run(async move {
let _ = fut.await;
})
}
/// Spawn a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for spawning futures on the current
/// thread.
///
/// # Panics
///
/// This function panics if ntex system is not running.
#[inline]
pub fn spawn<F>(mut f: F) -> JoinHandle<F::Output>
where
F: Future + 'static,
F::Output: 'static,
{
let ptr = crate::CB.with(|cb| (cb.borrow().0)());
JoinHandle {
fut: Either::Left(
glomm_io::spawn_local(async move {
if let Some(ptr) = ptr {
glomm_io::executor().yield_now().await;
let mut f = unsafe { Pin::new_unchecked(&mut f) };
let result = poll_fn(|ctx| {
let new_ptr = crate::CB.with(|cb| (cb.borrow().1)(ptr));
let result = f.as_mut().poll(ctx);
crate::CB.with(|cb| (cb.borrow().2)(new_ptr));
result
})
.await;
crate::CB.with(|cb| (cb.borrow().3)(ptr));
result
} else {
glomm_io::executor().yield_now().await;
f.await
}
})
.detach(),
),
}
}
/// Executes a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for executing futures on the current
/// thread.
///
/// # Panics
///
/// This function panics if ntex system is not running.
#[inline]
pub fn spawn_fn<F, R>(f: F) -> JoinHandle<R::Output>
where
F: FnOnce() -> R + 'static,
R: Future + 'static,
{
spawn(async move { f().await })
}
pub fn spawn_blocking<F, R>(f: F) -> JoinHandle<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
let fut = glomm_io::executor().spawn_blocking(f);
JoinHandle {
fut: Either::Right(Box::pin(async move { Ok(fut.await) })),
}
}
enum Either<T1, T2> {
Left(T1),
Right(T2),
}
/// Blocking operation completion future. It resolves with results
/// of blocking function execution.
#[allow(clippy::type_complexity)]
pub struct JoinHandle<T> {
fut:
Either<task::JoinHandle<T>, Pin<Box<dyn Future<Output = Result<T, Canceled>>>>>,
}
impl<T> Future for JoinHandle<T> {
type Output = Result<T, Canceled>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.fut {
Either::Left(ref mut f) => match Pin::new(f).poll(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(res) => Poll::Ready(res.ok_or(Canceled)),
},
Either::Right(ref mut f) => Pin::new(f).poll(cx),
}
} }
} }
} }
@ -464,22 +403,14 @@ mod glommio {
#[cfg(feature = "tokio")] #[cfg(feature = "tokio")]
pub use self::tokio::*; pub use self::tokio::*;
#[cfg(feature = "async-std")]
pub use self::asyncstd::*;
#[cfg(feature = "glommio")]
pub use self::glommio::*;
#[cfg(feature = "compio")] #[cfg(feature = "compio")]
pub use self::compio::*; pub use self::compio::*;
#[cfg(feature = "neon")]
pub use self::neon::*;
#[allow(dead_code)] #[allow(dead_code)]
#[cfg(all( #[cfg(all(not(feature = "tokio"), not(feature = "compio"), not(feature = "neon")))]
not(feature = "tokio"),
not(feature = "async-std"),
not(feature = "compio"),
not(feature = "glommio")
))]
mod no_rt { mod no_rt {
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::{fmt, future::Future, marker::PhantomData, pin::Pin}; use std::{fmt, future::Future, marker::PhantomData, pin::Pin};
@ -538,10 +469,5 @@ mod no_rt {
impl std::error::Error for JoinError {} impl std::error::Error for JoinError {}
} }
#[cfg(all( #[cfg(all(not(feature = "tokio"), not(feature = "compio"), not(feature = "neon")))]
not(feature = "tokio"),
not(feature = "async-std"),
not(feature = "compio"),
not(feature = "glommio")
))]
pub use self::no_rt::*; pub use self::no_rt::*;

View file

@ -1,13 +1,31 @@
use std::collections::{HashMap, VecDeque};
use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc}; use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc};
use std::time::{Duration, Instant};
use std::{cell::RefCell, fmt, future::Future, pin::Pin, rc::Rc}; use std::{cell::RefCell, fmt, future::Future, pin::Pin, rc::Rc};
use async_channel::Sender; use async_channel::{Receiver, Sender};
use futures_timer::Delay;
use super::arbiter::{Arbiter, SystemCommand}; use super::arbiter::Arbiter;
use super::builder::{Builder, SystemRunner}; use super::builder::{Builder, SystemRunner};
static SYSTEM_COUNT: AtomicUsize = AtomicUsize::new(0); static SYSTEM_COUNT: AtomicUsize = AtomicUsize::new(0);
thread_local!(
static ARBITERS: RefCell<Arbiters> = RefCell::new(Arbiters::default());
static PINGS: RefCell<HashMap<Id, VecDeque<PingRecord>>> =
RefCell::new(HashMap::default());
);
#[derive(Default)]
struct Arbiters {
all: HashMap<Id, Arbiter>,
list: Vec<Arbiter>,
}
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct Id(pub(crate) usize);
/// System is a runtime manager. /// System is a runtime manager.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct System { pub struct System {
@ -33,14 +51,17 @@ impl System {
/// Constructs new system and sets it as current /// Constructs new system and sets it as current
pub(super) fn construct( pub(super) fn construct(
sys: Sender<SystemCommand>, sys: Sender<SystemCommand>,
arbiter: Arbiter, mut arbiter: Arbiter,
config: SystemConfig, config: SystemConfig,
) -> Self { ) -> Self {
let id = SYSTEM_COUNT.fetch_add(1, Ordering::SeqCst);
arbiter.sys_id = id;
let sys = System { let sys = System {
id,
sys, sys,
config, config,
arbiter, arbiter,
id: SYSTEM_COUNT.fetch_add(1, Ordering::SeqCst),
}; };
System::set_current(sys.clone()); System::set_current(sys.clone());
sys sys
@ -79,8 +100,8 @@ impl System {
} }
/// System id /// System id
pub fn id(&self) -> usize { pub fn id(&self) -> Id {
self.id Id(self.id)
} }
/// Stop the system /// Stop the system
@ -104,6 +125,34 @@ impl System {
&self.arbiter &self.arbiter
} }
/// Retrieves a list of all arbiters in the system.
///
/// This method should be called from the thread where the system has been initialized,
/// typically the "main" thread.
pub fn list_arbiters<F, R>(f: F) -> R
where
F: FnOnce(&[Arbiter]) -> R,
{
ARBITERS.with(|arbs| f(arbs.borrow().list.as_ref()))
}
/// Retrieves a list of last pings records for specified arbiter.
///
/// This method should be called from the thread where the system has been initialized,
/// typically the "main" thread.
pub fn list_arbiter_pings<F, R>(id: Id, f: F) -> R
where
F: FnOnce(Option<&VecDeque<PingRecord>>) -> R,
{
PINGS.with(|pings| {
if let Some(recs) = pings.borrow().get(&id) {
f(Some(recs))
} else {
f(None)
}
})
}
pub(super) fn sys(&self) -> &Sender<SystemCommand> { pub(super) fn sys(&self) -> &Sender<SystemCommand> {
&self.sys &self.sys
} }
@ -150,3 +199,173 @@ impl fmt::Debug for SystemConfig {
.finish() .finish()
} }
} }
#[derive(Debug)]
pub(super) enum SystemCommand {
Exit(i32),
RegisterArbiter(Id, Arbiter),
UnregisterArbiter(Id),
}
pub(super) struct SystemSupport {
stop: Option<oneshot::Sender<i32>>,
commands: Receiver<SystemCommand>,
ping_interval: Duration,
}
impl SystemSupport {
pub(super) fn new(
stop: oneshot::Sender<i32>,
commands: Receiver<SystemCommand>,
ping_interval: usize,
) -> Self {
Self {
commands,
stop: Some(stop),
ping_interval: Duration::from_millis(ping_interval as u64),
}
}
pub(super) async fn run(mut self) {
ARBITERS.with(move |arbs| {
let mut arbiters = arbs.borrow_mut();
arbiters.all.clear();
arbiters.list.clear();
});
loop {
match self.commands.recv().await {
Ok(SystemCommand::Exit(code)) => {
log::debug!("Stopping system with {} code", code);
// stop arbiters
ARBITERS.with(move |arbs| {
let mut arbiters = arbs.borrow_mut();
for arb in arbiters.list.drain(..) {
arb.stop();
}
arbiters.all.clear();
});
// stop event loop
if let Some(stop) = self.stop.take() {
let _ = stop.send(code);
}
}
Ok(SystemCommand::RegisterArbiter(id, hnd)) => {
crate::spawn(ping_arbiter(hnd.clone(), self.ping_interval));
ARBITERS.with(move |arbs| {
let mut arbiters = arbs.borrow_mut();
arbiters.all.insert(id, hnd.clone());
arbiters.list.push(hnd);
});
}
Ok(SystemCommand::UnregisterArbiter(id)) => {
ARBITERS.with(move |arbs| {
let mut arbiters = arbs.borrow_mut();
if let Some(hnd) = arbiters.all.remove(&id) {
for (idx, arb) in arbiters.list.iter().enumerate() {
if &hnd == arb {
arbiters.list.remove(idx);
break;
}
}
}
});
}
Err(_) => {
log::debug!("System stopped");
return;
}
}
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct PingRecord {
/// Ping start time
pub start: Instant,
/// Round-trip time, if value is not set then ping is in process
pub rtt: Option<Duration>,
}
async fn ping_arbiter(arb: Arbiter, interval: Duration) {
loop {
Delay::new(interval).await;
// check if arbiter is still active
let is_alive = ARBITERS.with(|arbs| arbs.borrow().all.contains_key(&arb.id()));
if !is_alive {
PINGS.with(|pings| pings.borrow_mut().remove(&arb.id()));
break;
}
// calc ttl
let start = Instant::now();
PINGS.with(|pings| {
let mut p = pings.borrow_mut();
let recs = p.entry(arb.id()).or_default();
recs.push_front(PingRecord { start, rtt: None });
recs.truncate(10);
});
let result = arb
.spawn_with(|| async {
yield_to().await;
})
.await;
if result.is_err() {
break;
}
PINGS.with(|pings| {
pings
.borrow_mut()
.get_mut(&arb.id())
.unwrap()
.front_mut()
.unwrap()
.rtt = Some(Instant::now() - start);
});
}
}
async fn yield_to() {
use std::task::{Context, Poll};
struct Yield {
completed: bool,
}
impl Future for Yield {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
if self.completed {
return Poll::Ready(());
}
self.completed = true;
cx.waker().wake_by_ref();
Poll::Pending
}
}
Yield { completed: false }.await;
}
pub(super) trait FnExec: Send + 'static {
fn call_box(self: Box<Self>);
}
impl<F> FnExec for F
where
F: FnOnce() + Send + 'static,
{
#[allow(clippy::boxed_local)]
fn call_box(self: Box<Self>) {
(*self)()
}
}

View file

@ -1,5 +1,29 @@
# Changes # Changes
## [2.7.3] - 2025-03-28
* Better worker availability handling
## [2.7.2] - 2025-03-27
* Handle paused state
## [2.7.1] - 2025-02-28
* Fix set core affinity out of worker start #508
## [2.7.0] - 2025-01-31
* Cpu affinity support for workers
## [2.6.2] - 2024-12-30
* Fix error log
## [2.6.1] - 2024-12-26
* Tune shutdown logging
## [2.6.0] - 2024-12-04 ## [2.6.0] - 2024-12-04
* Use updated Service trait * Use updated Service trait

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ntex-server" name = "ntex-server"
version = "2.6.0" version = "2.7.4"
authors = ["ntex contributors <team@ntex.rs>"] authors = ["ntex contributors <team@ntex.rs>"]
description = "Server for ntex framework" description = "Server for ntex framework"
keywords = ["network", "framework", "async", "futures"] keywords = ["network", "framework", "async", "futures"]
@ -22,12 +22,13 @@ ntex-service = "3.4"
ntex-rt = "0.4" ntex-rt = "0.4"
ntex-util = "2.8" ntex-util = "2.8"
async-channel = "2" async-channel = { workspace = true }
async-broadcast = "0.7" atomic-waker = { workspace = true }
polling = "3.3" core_affinity = { workspace = true }
log = "0.4" oneshot = { workspace = true }
socket2 = "0.5" polling = { workspace = true }
oneshot = { version = "0.1", default-features = false, features = ["async"] } log = { workspace = true }
socket2 = { workspace = true }
[dev-dependencies] [dev-dependencies]
ntex = "2" ntex = "2"

View file

@ -2,6 +2,7 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::{cell::Cell, cell::RefCell, collections::VecDeque, rc::Rc, sync::Arc}; use std::{cell::Cell, cell::RefCell, collections::VecDeque, rc::Rc, sync::Arc};
use async_channel::{unbounded, Receiver, Sender}; use async_channel::{unbounded, Receiver, Sender};
use core_affinity::CoreId;
use ntex_rt::System; use ntex_rt::System;
use ntex_util::future::join_all; use ntex_util::future::join_all;
use ntex_util::time::{sleep, timeout, Millis}; use ntex_util::time::{sleep, timeout, Millis};
@ -69,9 +70,16 @@ impl<F: ServerConfiguration> ServerManager<F> {
// handle cmd // handle cmd
let _ = ntex_rt::spawn(handle_cmd(mgr.clone(), rx)); let _ = ntex_rt::spawn(handle_cmd(mgr.clone(), rx));
// Retrieve the IDs of all active CPU cores.
let mut cores = if cfg.affinity {
core_affinity::get_core_ids().unwrap_or_default()
} else {
Vec::new()
};
// start workers // start workers
for _ in 0..mgr.0.cfg.num { for _ in 0..mgr.0.cfg.num {
start_worker(mgr.clone()); start_worker(mgr.clone(), cores.pop());
} }
let srv = Server::new(tx, shared); let srv = Server::new(tx, shared);
@ -128,10 +136,10 @@ impl<F: ServerConfiguration> ServerManager<F> {
} }
} }
fn start_worker<F: ServerConfiguration>(mgr: ServerManager<F>) { fn start_worker<F: ServerConfiguration>(mgr: ServerManager<F>, cid: Option<CoreId>) {
let _ = ntex_rt::spawn(async move { let _ = ntex_rt::spawn(async move {
let id = mgr.next_id(); let id = mgr.next_id();
let mut wrk = Worker::start(id, mgr.factory()); let mut wrk = Worker::start(id, mgr.factory(), cid);
loop { loop {
match wrk.status() { match wrk.status() {
@ -141,7 +149,7 @@ fn start_worker<F: ServerConfiguration>(mgr: ServerManager<F>) {
mgr.unavailable(wrk); mgr.unavailable(wrk);
sleep(RESTART_DELAY).await; sleep(RESTART_DELAY).await;
if !mgr.stopping() { if !mgr.stopping() {
wrk = Worker::start(id, mgr.factory()); wrk = Worker::start(id, mgr.factory(), cid);
} else { } else {
return; return;
} }
@ -172,7 +180,7 @@ impl<F: ServerConfiguration> HandleCmdState<F> {
fn process(&mut self, mut item: F::Item) { fn process(&mut self, mut item: F::Item) {
loop { loop {
if !self.workers.is_empty() { if !self.workers.is_empty() {
if self.next > self.workers.len() { if self.next >= self.workers.len() {
self.next = self.workers.len() - 1; self.next = self.workers.len() - 1;
} }
match self.workers[self.next].send(item) { match self.workers[self.next].send(item) {
@ -203,10 +211,9 @@ impl<F: ServerConfiguration> HandleCmdState<F> {
match upd { match upd {
Update::Available(worker) => { Update::Available(worker) => {
self.workers.push(worker); self.workers.push(worker);
self.workers.sort();
if self.workers.len() == 1 { if self.workers.len() == 1 {
self.mgr.resume(); self.mgr.resume();
} else {
self.workers.sort();
} }
} }
Update::Unavailable(worker) => { Update::Unavailable(worker) => {
@ -225,6 +232,9 @@ impl<F: ServerConfiguration> HandleCmdState<F> {
if let Err(item) = self.workers[0].send(item) { if let Err(item) = self.workers[0].send(item) {
self.backlog.push_back(item); self.backlog.push_back(item);
self.workers.remove(0); self.workers.remove(0);
if self.workers.is_empty() {
self.mgr.pause();
}
break; break;
} }
} }
@ -262,10 +272,10 @@ impl<F: ServerConfiguration> HandleCmdState<F> {
for tx in notify { for tx in notify {
let _ = tx.send(()); let _ = tx.send(());
} }
sleep(STOP_DELAY).await;
// stop system if server was spawned // stop system if server was spawned
if self.mgr.0.cfg.stop_runtime { if self.mgr.0.cfg.stop_runtime {
sleep(STOP_DELAY).await;
System::current().stop(); System::current().stop();
} }
} }

View file

@ -92,12 +92,14 @@ impl AcceptLoop {
/// Start accept loop /// Start accept loop
pub fn start(mut self, socks: Vec<(Token, Listener)>, srv: Server) { pub fn start(mut self, socks: Vec<(Token, Listener)>, srv: Server) {
let (tx, rx_start) = oneshot::channel();
let (rx, poll) = self let (rx, poll) = self
.inner .inner
.take() .take()
.expect("AcceptLoop cannot be used multiple times"); .expect("AcceptLoop cannot be used multiple times");
Accept::start( Accept::start(
tx,
rx, rx,
poll, poll,
socks, socks,
@ -105,6 +107,8 @@ impl AcceptLoop {
self.notify.clone(), self.notify.clone(),
self.status_handler.take(), self.status_handler.take(),
); );
let _ = rx_start.recv();
} }
} }
@ -121,6 +125,7 @@ impl fmt::Debug for AcceptLoop {
struct Accept { struct Accept {
poller: Arc<Poller>, poller: Arc<Poller>,
rx: mpsc::Receiver<AcceptorCommand>, rx: mpsc::Receiver<AcceptorCommand>,
tx: Option<oneshot::Sender<()>>,
sockets: Vec<ServerSocketInfo>, sockets: Vec<ServerSocketInfo>,
srv: Server, srv: Server,
notify: AcceptNotify, notify: AcceptNotify,
@ -131,6 +136,7 @@ struct Accept {
impl Accept { impl Accept {
fn start( fn start(
tx: oneshot::Sender<()>,
rx: mpsc::Receiver<AcceptorCommand>, rx: mpsc::Receiver<AcceptorCommand>,
poller: Arc<Poller>, poller: Arc<Poller>,
socks: Vec<(Token, Listener)>, socks: Vec<(Token, Listener)>,
@ -145,11 +151,12 @@ impl Accept {
.name("ntex-server accept loop".to_owned()) .name("ntex-server accept loop".to_owned())
.spawn(move || { .spawn(move || {
System::set_current(sys); System::set_current(sys);
Accept::new(rx, poller, socks, srv, notify, status_handler).poll() Accept::new(tx, rx, poller, socks, srv, notify, status_handler).poll()
}); });
} }
fn new( fn new(
tx: oneshot::Sender<()>,
rx: mpsc::Receiver<AcceptorCommand>, rx: mpsc::Receiver<AcceptorCommand>,
poller: Arc<Poller>, poller: Arc<Poller>,
socks: Vec<(Token, Listener)>, socks: Vec<(Token, Listener)>,
@ -175,6 +182,7 @@ impl Accept {
notify, notify,
srv, srv,
status_handler, status_handler,
tx: Some(tx),
backpressure: true, backpressure: true,
backlog: VecDeque::new(), backlog: VecDeque::new(),
} }
@ -192,19 +200,23 @@ impl Accept {
// Create storage for events // Create storage for events
let mut events = Events::with_capacity(NonZeroUsize::new(512).unwrap()); let mut events = Events::with_capacity(NonZeroUsize::new(512).unwrap());
let mut timeout = Some(Duration::ZERO);
loop { loop {
if let Err(e) = self.poller.wait(&mut events, None) { if let Err(e) = self.poller.wait(&mut events, timeout) {
if e.kind() == io::ErrorKind::Interrupted { if e.kind() != io::ErrorKind::Interrupted {
continue;
} else {
panic!("Cannot wait for events in poller: {}", e) panic!("Cannot wait for events in poller: {}", e)
} }
} else if timeout.is_some() {
timeout = None;
let _ = self.tx.take().unwrap().send(());
} }
for event in events.iter() { for idx in 0..self.sockets.len() {
let readd = self.accept(event.key); if self.sockets[idx].registered.get() {
if readd { let readd = self.accept(idx);
self.add_source(event.key); if readd {
self.add_source(idx);
}
} }
} }
@ -215,13 +227,13 @@ impl Accept {
for info in self.sockets.drain(..) { for info in self.sockets.drain(..) {
info.sock.remove_source() info.sock.remove_source()
} }
log::info!("Accept loop has been stopped");
if let Some(rx) = rx { if let Some(rx) = rx {
thread::sleep(EXIT_TIMEOUT); thread::sleep(EXIT_TIMEOUT);
let _ = rx.send(()); let _ = rx.send(());
} }
log::trace!("Accept loop has been stopped");
break; break;
} }
} }
@ -295,25 +307,25 @@ impl Accept {
Ok(cmd) => match cmd { Ok(cmd) => match cmd {
AcceptorCommand::Stop(rx) => { AcceptorCommand::Stop(rx) => {
if !self.backpressure { if !self.backpressure {
log::trace!("Stopping accept loop"); log::info!("Stopping accept loop");
self.backpressure(true); self.backpressure(true);
} }
break Either::Right(Some(rx)); break Either::Right(Some(rx));
} }
AcceptorCommand::Terminate => { AcceptorCommand::Terminate => {
log::trace!("Stopping accept loop"); log::info!("Stopping accept loop");
self.backpressure(true); self.backpressure(true);
break Either::Right(None); break Either::Right(None);
} }
AcceptorCommand::Pause => { AcceptorCommand::Pause => {
if !self.backpressure { if !self.backpressure {
log::trace!("Pausing accept loop"); log::info!("Pausing accept loop");
self.backpressure(true); self.backpressure(true);
} }
} }
AcceptorCommand::Resume => { AcceptorCommand::Resume => {
if self.backpressure { if self.backpressure {
log::trace!("Resuming accept loop"); log::info!("Resuming accept loop");
self.backpressure(false); self.backpressure(false);
} }
} }
@ -325,10 +337,11 @@ impl Accept {
break match err { break match err {
mpsc::TryRecvError::Empty => Either::Left(()), mpsc::TryRecvError::Empty => Either::Left(()),
mpsc::TryRecvError::Disconnected => { mpsc::TryRecvError::Disconnected => {
log::error!("Dropping accept loop");
self.backpressure(true); self.backpressure(true);
Either::Right(None) Either::Right(None)
} }
} };
} }
} }
} }

View file

@ -110,6 +110,14 @@ impl ServerBuilder {
self self
} }
/// Enable cpu affinity
///
/// By default affinity is disabled.
pub fn enable_affinity(mut self) -> Self {
self.pool = self.pool.enable_affinity();
self
}
/// Timeout for graceful workers shutdown. /// Timeout for graceful workers shutdown.
/// ///
/// After receiving a stop signal, workers have this much time to finish /// After receiving a stop signal, workers have this much time to finish
@ -360,7 +368,7 @@ pub fn bind_addr<S: net::ToSocketAddrs>(
Err(e) Err(e)
} else { } else {
Err(io::Error::new( Err(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::InvalidInput,
"Cannot bind to address.", "Cannot bind to address.",
)) ))
} }

View file

@ -59,8 +59,13 @@ where
.workers(1) .workers(1)
.disable_signals() .disable_signals()
.run(); .run();
tx.send((system, local_addr, server))
.expect("Failed to send Server to TestServer"); ntex_rt::spawn(async move {
ntex_util::time::sleep(ntex_util::time::Millis(75)).await;
tx.send((system, local_addr, server))
.expect("Failed to send Server to TestServer");
});
Ok(()) Ok(())
}) })
}); });

View file

@ -11,6 +11,7 @@ pub struct WorkerPool {
pub(crate) no_signals: bool, pub(crate) no_signals: bool,
pub(crate) stop_runtime: bool, pub(crate) stop_runtime: bool,
pub(crate) shutdown_timeout: Millis, pub(crate) shutdown_timeout: Millis,
pub(crate) affinity: bool,
} }
impl Default for WorkerPool { impl Default for WorkerPool {
@ -22,12 +23,18 @@ impl Default for WorkerPool {
impl WorkerPool { impl WorkerPool {
/// Create new Server builder instance /// Create new Server builder instance
pub fn new() -> Self { pub fn new() -> Self {
let num = core_affinity::get_core_ids()
.map(|v| v.len())
.unwrap_or_else(|| {
std::thread::available_parallelism().map_or(2, std::num::NonZeroUsize::get)
});
WorkerPool { WorkerPool {
num: std::thread::available_parallelism() num,
.map_or(2, std::num::NonZeroUsize::get),
no_signals: false, no_signals: false,
stop_runtime: false, stop_runtime: false,
shutdown_timeout: DEFAULT_SHUTDOWN_TIMEOUT, shutdown_timeout: DEFAULT_SHUTDOWN_TIMEOUT,
affinity: false,
} }
} }
@ -68,6 +75,14 @@ impl WorkerPool {
self self
} }
/// Enable core affinity
///
/// By default affinity is disabled.
pub fn enable_affinity(mut self) -> Self {
self.affinity = true;
self
}
/// Starts processing incoming items and return server controller. /// Starts processing incoming items and return server controller.
pub fn run<F: ServerConfiguration>(self, factory: F) -> Server<F::Item> { pub fn run<F: ServerConfiguration>(self, factory: F) -> Server<F::Item> {
crate::manager::ServerManager::start(self, factory) crate::manager::ServerManager::start(self, factory)

View file

@ -2,8 +2,9 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::task::{ready, Context, Poll}; use std::task::{ready, Context, Poll};
use std::{cmp, future::poll_fn, future::Future, hash, pin::Pin, sync::Arc}; use std::{cmp, future::poll_fn, future::Future, hash, pin::Pin, sync::Arc};
use async_broadcast::{self as bus, broadcast};
use async_channel::{unbounded, Receiver, Sender}; use async_channel::{unbounded, Receiver, Sender};
use atomic_waker::AtomicWaker;
use core_affinity::CoreId;
use ntex_rt::{spawn, Arbiter}; use ntex_rt::{spawn, Arbiter};
use ntex_service::{Pipeline, PipelineBinding, Service, ServiceFactory}; use ntex_service::{Pipeline, PipelineBinding, Service, ServiceFactory};
@ -77,7 +78,7 @@ pub struct WorkerStop(oneshot::Receiver<bool>);
impl<T> Worker<T> { impl<T> Worker<T> {
/// Start worker. /// Start worker.
pub fn start<F>(id: WorkerId, cfg: F) -> Worker<T> pub fn start<F>(id: WorkerId, cfg: F, cid: Option<CoreId>) -> Worker<T>
where where
T: Send + 'static, T: Send + 'static,
F: ServerConfiguration<Item = T>, F: ServerConfiguration<Item = T>,
@ -87,15 +88,21 @@ impl<T> Worker<T> {
let (avail, avail_tx) = WorkerAvailability::create(); let (avail, avail_tx) = WorkerAvailability::create();
Arbiter::default().exec_fn(move || { Arbiter::default().exec_fn(move || {
if let Some(cid) = cid {
if core_affinity::set_for_current(cid) {
log::info!("Set affinity to {:?} for worker {:?}", cid, id);
}
}
let _ = spawn(async move { let _ = spawn(async move {
log::info!("Starting worker {:?}", id); log::info!("Starting worker {:?}", id);
log::debug!("Creating server instance in {:?}", id); log::debug!("Creating server instance in {:?}", id);
let factory = cfg.create().await; let factory = cfg.create().await;
log::debug!("Server instance has been created in {:?}", id);
match create(id, rx1, rx2, factory, avail_tx).await { match create(id, rx1, rx2, factory, avail_tx).await {
Ok((svc, wrk)) => { Ok((svc, wrk)) => {
log::debug!("Server instance has been created in {:?}", id);
run_worker(svc, wrk).await; run_worker(svc, wrk).await;
} }
Err(e) => { Err(e) => {
@ -144,10 +151,8 @@ impl<T> Worker<T> {
if self.failed.load(Ordering::Acquire) { if self.failed.load(Ordering::Acquire) {
WorkerStatus::Failed WorkerStatus::Failed
} else { } else {
// cleanup updates self.avail.wait_for_update().await;
while self.avail.notify.try_recv().is_ok() {} if self.avail.failed() {
if self.avail.notify.recv_direct().await.is_err() {
self.failed.store(true, Ordering::Release); self.failed.store(true, Ordering::Release);
} }
self.status() self.status()
@ -189,52 +194,85 @@ impl Future for WorkerStop {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
struct WorkerAvailability { struct WorkerAvailability {
notify: bus::Receiver<()>, inner: Arc<Inner>,
available: Arc<AtomicBool>,
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
struct WorkerAvailabilityTx { struct WorkerAvailabilityTx {
notify: bus::Sender<()>, inner: Arc<Inner>,
available: Arc<AtomicBool>, }
#[derive(Debug)]
struct Inner {
waker: AtomicWaker,
updated: AtomicBool,
available: AtomicBool,
failed: AtomicBool,
} }
impl WorkerAvailability { impl WorkerAvailability {
fn create() -> (Self, WorkerAvailabilityTx) { fn create() -> (Self, WorkerAvailabilityTx) {
let (mut tx, rx) = broadcast(16); let inner = Arc::new(Inner {
tx.set_overflow(true); waker: AtomicWaker::new(),
updated: AtomicBool::new(false),
available: AtomicBool::new(false),
failed: AtomicBool::new(false),
});
let avail = WorkerAvailability { let avail = WorkerAvailability {
notify: rx, inner: inner.clone(),
available: Arc::new(AtomicBool::new(false)),
};
let avail_tx = WorkerAvailabilityTx {
notify: tx,
available: avail.available.clone(),
}; };
let avail_tx = WorkerAvailabilityTx { inner };
(avail, avail_tx) (avail, avail_tx)
} }
fn failed(&self) -> bool {
self.inner.failed.load(Ordering::Acquire)
}
fn available(&self) -> bool { fn available(&self) -> bool {
self.available.load(Ordering::Acquire) self.inner.available.load(Ordering::Acquire)
}
async fn wait_for_update(&self) {
poll_fn(|cx| {
if self.inner.updated.load(Ordering::Acquire) {
self.inner.updated.store(false, Ordering::Release);
Poll::Ready(())
} else {
self.inner.waker.register(cx.waker());
Poll::Pending
}
})
.await;
} }
} }
impl WorkerAvailabilityTx { impl WorkerAvailabilityTx {
fn set(&self, val: bool) { fn set(&self, val: bool) {
let old = self.available.swap(val, Ordering::Release); let old = self.inner.available.swap(val, Ordering::Release);
if !old && val { if old != val {
let _ = self.notify.try_broadcast(()); self.inner.updated.store(true, Ordering::Release);
self.inner.waker.wake();
} }
} }
} }
impl Drop for WorkerAvailabilityTx {
fn drop(&mut self) {
self.inner.failed.store(true, Ordering::Release);
self.inner.updated.store(true, Ordering::Release);
self.inner.available.store(false, Ordering::Release);
self.inner.waker.wake();
}
}
/// Service worker /// Service worker
/// ///
/// Worker accepts message via unbounded channel and starts processing. /// Worker accepts message via unbounded channel and starts processing.
struct WorkerSt<T, F: ServiceFactory<T>> { struct WorkerSt<T, F: ServiceFactory<T>> {
id: WorkerId, id: WorkerId,
rx: Pin<Box<dyn Stream<Item = T>>>, rx: Receiver<T>,
stop: Pin<Box<dyn Stream<Item = Shutdown>>>, stop: Pin<Box<dyn Stream<Item = Shutdown>>>,
factory: F, factory: F,
availability: WorkerAvailabilityTx, availability: WorkerAvailabilityTx,
@ -246,25 +284,43 @@ where
F: ServiceFactory<T> + 'static, F: ServiceFactory<T> + 'static,
{ {
loop { loop {
let mut recv = std::pin::pin!(wrk.rx.recv());
let fut = poll_fn(|cx| { let fut = poll_fn(|cx| {
ready!(svc.poll_ready(cx)?); match svc.poll_ready(cx) {
Poll::Ready(Ok(())) => {
if let Some(item) = ready!(Pin::new(&mut wrk.rx).poll_next(cx)) { wrk.availability.set(true);
let fut = svc.call(item); }
let _ = spawn(async move { Poll::Ready(Err(err)) => {
let _ = fut.await; wrk.availability.set(false);
}); return Poll::Ready(Err(err));
}
Poll::Pending => {
wrk.availability.set(false);
return Poll::Pending;
}
}
match ready!(recv.as_mut().poll(cx)) {
Ok(item) => {
let fut = svc.call(item);
let _ = spawn(async move {
let _ = fut.await;
});
Poll::Ready(Ok::<_, F::Error>(true))
}
Err(_) => {
log::error!("Server is gone");
Poll::Ready(Ok(false))
}
} }
Poll::Ready(Ok::<(), F::Error>(()))
}); });
match select(fut, stream_recv(&mut wrk.stop)).await { match select(fut, stream_recv(&mut wrk.stop)).await {
Either::Left(Ok(())) => continue, Either::Left(Ok(true)) => continue,
Either::Left(Err(_)) => { Either::Left(Err(_)) => {
let _ = ntex_rt::spawn(async move { let _ = ntex_rt::spawn(async move {
svc.shutdown().await; svc.shutdown().await;
}); });
wrk.availability.set(false);
} }
Either::Right(Some(Shutdown { timeout, result })) => { Either::Right(Some(Shutdown { timeout, result })) => {
wrk.availability.set(false); wrk.availability.set(false);
@ -278,7 +334,8 @@ where
stop_svc(wrk.id, svc, timeout, Some(result)).await; stop_svc(wrk.id, svc, timeout, Some(result)).await;
return; return;
} }
Either::Right(None) => { Either::Left(Ok(false)) | Either::Right(None) => {
wrk.availability.set(false);
stop_svc(wrk.id, svc, STOP_TIMEOUT, None).await; stop_svc(wrk.id, svc, STOP_TIMEOUT, None).await;
return; return;
} }
@ -288,7 +345,6 @@ where
loop { loop {
match select(wrk.factory.create(()), stream_recv(&mut wrk.stop)).await { match select(wrk.factory.create(()), stream_recv(&mut wrk.stop)).await {
Either::Left(Ok(service)) => { Either::Left(Ok(service)) => {
wrk.availability.set(true);
svc = Pipeline::new(service).bind(); svc = Pipeline::new(service).bind();
break; break;
} }
@ -329,8 +385,6 @@ where
{ {
availability.set(false); availability.set(false);
let factory = factory?; let factory = factory?;
let rx = Box::pin(rx);
let mut stop = Box::pin(stop); let mut stop = Box::pin(stop);
let svc = match select(factory.create(()), stream_recv(&mut stop)).await { let svc = match select(factory.create(()), stream_recv(&mut stop)).await {
@ -349,9 +403,9 @@ where
svc, svc,
WorkerSt { WorkerSt {
id, id,
rx,
factory, factory,
availability, availability,
rx: Box::pin(rx),
stop: Box::pin(stop), stop: Box::pin(stop),
}, },
)) ))

View file

@ -256,7 +256,7 @@ pub trait ServiceFactory<Req, Cfg = ()> {
} }
} }
impl<'a, S, Req> Service<Req> for &'a S impl<S, Req> Service<Req> for &S
where where
S: Service<Req>, S: Service<Req>,
{ {

View file

@ -1,5 +1,9 @@
# Changes # Changes
## [2.4.0] - 2024-12-30
* Enable rustls/std feature
## [2.3.0] - 2024-11-04 ## [2.3.0] - 2024-11-04
* Use updated Service trait * Use updated Service trait

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ntex-tls" name = "ntex-tls"
version = "2.3.0" version = "2.4.0"
authors = ["ntex contributors <team@ntex.rs>"] authors = ["ntex contributors <team@ntex.rs>"]
description = "An implementation of SSL streams for ntex backed by OpenSSL" description = "An implementation of SSL streams for ntex backed by OpenSSL"
keywords = ["network", "framework", "async", "futures"] keywords = ["network", "framework", "async", "futures"]
@ -22,14 +22,14 @@ default = []
openssl = ["tls_openssl"] openssl = ["tls_openssl"]
# rustls support # rustls support
rustls = ["tls_rust"] rustls = ["tls_rust", "tls_rust/std"]
rustls-ring = ["tls_rust", "tls_rust/ring", "tls_rust/std"] rustls-ring = ["tls_rust", "tls_rust/ring", "tls_rust/std"]
[dependencies] [dependencies]
ntex-bytes = "0.1" ntex-bytes = "0.1"
ntex-io = "2.3" ntex-io = "2.3"
ntex-util = "2.5" ntex-util = "2.5"
ntex-service = "3.3" ntex-service = "3.4"
ntex-net = "2" ntex-net = "2"
log = "0.4" log = "0.4"

View file

@ -13,9 +13,8 @@ async fn main() -> io::Result<()> {
println!("Started openssl echp server: 127.0.0.1:8443"); println!("Started openssl echp server: 127.0.0.1:8443");
// load ssl keys // load ssl keys
let cert_file = let cert_file = &mut BufReader::new(File::open("../examples/cert.pem").unwrap());
&mut BufReader::new(File::open("../ntex-tls/examples/cert.pem").unwrap()); let key_file = &mut BufReader::new(File::open("../examples/key.pem").unwrap());
let key_file = &mut BufReader::new(File::open("../ntex-tls/examples/key.pem").unwrap());
let keys = rustls_pemfile::private_key(key_file).unwrap().unwrap(); let keys = rustls_pemfile::private_key(key_file).unwrap().unwrap();
let cert_chain = rustls_pemfile::certs(cert_file) let cert_chain = rustls_pemfile::certs(cert_file)
.collect::<Result<Vec<_>, _>>() .collect::<Result<Vec<_>, _>>()

View file

@ -8,18 +8,18 @@ use tls_openssl::ssl::{self, SslFiletype, SslMethod};
#[ntex::main] #[ntex::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
//std::env::set_var("RUST_LOG", "trace"); std::env::set_var("RUST_LOG", "trace");
//env_logger::init(); let _ = env_logger::try_init();
println!("Started openssl web server: 127.0.0.1:8443"); println!("Started openssl web server: 127.0.0.1:8443");
// load ssl keys // load ssl keys
let mut builder = ssl::SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); let mut builder = ssl::SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder builder
.set_private_key_file("../tests/key.pem", SslFiletype::PEM) .set_private_key_file("./examples/key.pem", SslFiletype::PEM)
.unwrap(); .unwrap();
builder builder
.set_certificate_chain_file("../tests/cert.pem") .set_certificate_chain_file("./examples/cert.pem")
.unwrap(); .unwrap();
// h2 alpn config // h2 alpn config

View file

@ -51,11 +51,11 @@ impl<T: Address> SslConnector<T> {
log::trace!("{}: SSL Handshake start for: {:?}", io.tag(), host); log::trace!("{}: SSL Handshake start for: {:?}", io.tag(), host);
match openssl.configure() { match openssl.configure() {
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e).into()), Err(e) => Err(io::Error::new(io::ErrorKind::InvalidInput, e).into()),
Ok(config) => { Ok(config) => {
let ssl = config let ssl = config
.into_ssl(&host) .into_ssl(&host)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?;
let tag = io.tag(); let tag = io.tag();
match connect_io(io, ssl).await { match connect_io(io, ssl).await {
Ok(io) => { Ok(io) => {
@ -64,7 +64,10 @@ impl<T: Address> SslConnector<T> {
} }
Err(e) => { Err(e) => {
log::trace!("{}: SSL Handshake error: {:?}", tag, e); log::trace!("{}: SSL Handshake error: {:?}", tag, e);
Err(io::Error::new(io::ErrorKind::Other, format!("{}", e)).into()) Err(
io::Error::new(io::ErrorKind::InvalidInput, format!("{}", e))
.into(),
)
} }
} }
} }

View file

@ -250,7 +250,9 @@ async fn handle_result<T, F>(
ssl::ErrorCode::WANT_READ => { ssl::ErrorCode::WANT_READ => {
let res = io.read_notify().await; let res = io.read_notify().await;
match res? { match res? {
None => Err(io::Error::new(io::ErrorKind::Other, "disconnected")), None => {
Err(io::Error::new(io::ErrorKind::NotConnected, "disconnected"))
}
_ => Ok(None), _ => Ok(None),
} }
} }

View file

@ -24,7 +24,7 @@ pub struct PeerCertChain<'a>(pub Vec<CertificateDer<'a>>);
pub(crate) struct Wrapper<'a, 'b>(&'a WriteBuf<'b>); pub(crate) struct Wrapper<'a, 'b>(&'a WriteBuf<'b>);
impl<'a, 'b> io::Read for Wrapper<'a, 'b> { impl io::Read for Wrapper<'_, '_> {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> { fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
self.0.with_read_buf(|buf| { self.0.with_read_buf(|buf| {
buf.with_src(|buf| { buf.with_src(|buf| {
@ -41,7 +41,7 @@ impl<'a, 'b> io::Read for Wrapper<'a, 'b> {
} }
} }
impl<'a, 'b> io::Write for Wrapper<'a, 'b> { impl io::Write for Wrapper<'_, '_> {
fn write(&mut self, src: &[u8]) -> io::Result<usize> { fn write(&mut self, src: &[u8]) -> io::Result<usize> {
self.0.with_dst(|buf| buf.extend_from_slice(src)); self.0.with_dst(|buf| buf.extend_from_slice(src));
Ok(src.len()) Ok(src.len())

View file

@ -1,5 +1,19 @@
# Changes # Changes
## [2.10.0] - 2025-03-12
* Add "Inplace" channel
* Expose "yield_to" helper
## [2.9.0] - 2025-01-15
* Add EitherService/EitherServiceFactory
* Add retry middleware
* Add future on drop handler
## [2.8.0] - 2024-12-04 ## [2.8.0] - 2024-12-04
* Use updated Service trait * Use updated Service trait

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ntex-util" name = "ntex-util"
version = "2.8.0" version = "2.10.0"
authors = ["ntex contributors <team@ntex.rs>"] authors = ["ntex contributors <team@ntex.rs>"]
description = "Utilities for ntex framework" description = "Utilities for ntex framework"
keywords = ["network", "framework", "async", "futures"] keywords = ["network", "framework", "async", "futures"]

View file

@ -0,0 +1,81 @@
//! A futures-aware bounded(1) channel.
use std::{cell::Cell, fmt, future::poll_fn, task::Context, task::Poll};
use crate::task::LocalWaker;
/// Creates a new futures-aware, channel.
pub fn channel<T>() -> Inplace<T> {
Inplace {
value: Cell::new(None),
rx_task: LocalWaker::new(),
}
}
/// A futures-aware bounded(1) channel.
pub struct Inplace<T> {
value: Cell<Option<T>>,
rx_task: LocalWaker,
}
// The channels do not ever project Pin to the inner T
impl<T> Unpin for Inplace<T> {}
impl<T> fmt::Debug for Inplace<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Inplace<T>")
}
}
impl<T> Inplace<T> {
/// Set a successful result.
///
/// If the value is successfully enqueued for the remote end to receive,
/// then `Ok(())` is returned. If previose value is not consumed
/// then `Err` is returned with the value provided.
pub fn send(&self, val: T) -> Result<(), T> {
if let Some(v) = self.value.take() {
self.value.set(Some(v));
Err(val)
} else {
self.value.set(Some(val));
self.rx_task.wake();
Ok(())
}
}
/// Wait until the oneshot is ready and return value
pub async fn recv(&self) -> T {
poll_fn(|cx| self.poll_recv(cx)).await
}
/// Polls the oneshot to determine if value is ready
pub fn poll_recv(&self, cx: &mut Context<'_>) -> Poll<T> {
// If we've got a value, then skip the logic below as we're done.
if let Some(val) = self.value.take() {
return Poll::Ready(val);
}
// Check if sender is dropped and return error if it is.
self.rx_task.register(cx.waker());
Poll::Pending
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::future::lazy;
#[ntex_macros::rt_test2]
async fn test_inplace() {
let ch = channel();
assert_eq!(lazy(|cx| ch.poll_recv(cx)).await, Poll::Pending);
assert!(ch.send(1).is_ok());
assert!(ch.send(2) == Err(2));
assert_eq!(lazy(|cx| ch.poll_recv(cx)).await, Poll::Ready(1));
assert!(ch.send(1).is_ok());
assert_eq!(ch.recv().await, 1);
}
}

View file

@ -2,6 +2,7 @@
mod cell; mod cell;
pub mod condition; pub mod condition;
pub mod inplace;
pub mod mpsc; pub mod mpsc;
pub mod oneshot; pub mod oneshot;
pub mod pool; pub mod pool;

View file

@ -7,12 +7,14 @@ pub use futures_sink::Sink;
mod either; mod either;
mod join; mod join;
mod lazy; mod lazy;
mod on_drop;
mod ready; mod ready;
mod select; mod select;
pub use self::either::Either; pub use self::either::Either;
pub use self::join::{join, join_all}; pub use self::join::{join, join_all};
pub use self::lazy::{lazy, Lazy}; pub use self::lazy::{lazy, Lazy};
pub use self::on_drop::{OnDropFn, OnDropFuture, OnDropFutureExt};
pub use self::ready::Ready; pub use self::ready::Ready;
pub use self::select::select; pub use self::select::select;

View file

@ -0,0 +1,104 @@
use std::{cell::Cell, fmt, future::Future, pin::Pin, task::Context, task::Poll};
/// Execute fn during drop
pub struct OnDropFn<F: FnOnce()> {
f: Cell<Option<F>>,
}
impl<F: FnOnce()> OnDropFn<F> {
pub fn new(f: F) -> Self {
Self {
f: Cell::new(Some(f)),
}
}
/// Cancel fn execution
pub fn cancel(&self) {
self.f.take();
}
}
impl<F: FnOnce()> fmt::Debug for OnDropFn<F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OnDropFn")
.field("f", &std::any::type_name::<F>())
.finish()
}
}
impl<F: FnOnce()> Drop for OnDropFn<F> {
fn drop(&mut self) {
if let Some(f) = self.f.take() {
f()
}
}
}
/// Trait adds future on_drop support
pub trait OnDropFutureExt: Future + Sized {
fn on_drop<F: FnOnce()>(self, on_drop: F) -> OnDropFuture<Self, F> {
OnDropFuture::new(self, on_drop)
}
}
impl<F: Future> OnDropFutureExt for F {}
pin_project_lite::pin_project! {
pub struct OnDropFuture<Ft: Future, F: FnOnce()> {
#[pin]
fut: Ft,
on_drop: OnDropFn<F>
}
}
impl<Ft: Future, F: FnOnce()> OnDropFuture<Ft, F> {
pub fn new(fut: Ft, on_drop: F) -> Self {
Self {
fut,
on_drop: OnDropFn::new(on_drop),
}
}
}
impl<Ft: Future, F: FnOnce()> Future for OnDropFuture<Ft, F> {
type Output = Ft::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
match this.fut.poll(cx) {
Poll::Ready(r) => {
this.on_drop.cancel();
Poll::Ready(r)
}
Poll::Pending => Poll::Pending,
}
}
}
#[cfg(test)]
mod test {
use std::future::{pending, poll_fn};
use super::*;
#[ntex_macros::rt_test2]
async fn on_drop() {
let f = OnDropFn::new(|| ());
assert!(format!("{:?}", f).contains("OnDropFn"));
f.cancel();
assert!(f.f.get().is_none());
let mut dropped = false;
let mut f = pending::<()>().on_drop(|| {
dropped = true;
});
poll_fn(|cx| {
let _ = Pin::new(&mut f).poll(cx);
Poll::Ready(())
})
.await;
drop(f);
assert!(dropped);
}
}

View file

@ -0,0 +1,239 @@
//! Either service allows to use different services for handling request
use std::{fmt, task::Context};
use ntex_service::{Service, ServiceCtx, ServiceFactory};
use crate::future::Either;
#[derive(Clone)]
/// Either service
///
/// Either service allows to use different services for handling requests
pub struct EitherService<SLeft, SRight> {
svc: Either<SLeft, SRight>,
}
#[derive(Clone)]
/// Either service factory
///
/// Either service allows to use different services for handling requests
pub struct EitherServiceFactory<ChooseFn, SFLeft, SFRight> {
left: SFLeft,
right: SFRight,
choose_left_fn: ChooseFn,
}
impl<ChooseFn, SFLeft, SFRight> EitherServiceFactory<ChooseFn, SFLeft, SFRight> {
/// Create `Either` service factory
pub fn new(choose_left_fn: ChooseFn, sf_left: SFLeft, sf_right: SFRight) -> Self {
EitherServiceFactory {
choose_left_fn,
left: sf_left,
right: sf_right,
}
}
}
impl<ChooseFn, SFLeft, SFRight> fmt::Debug
for EitherServiceFactory<ChooseFn, SFLeft, SFRight>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("EitherServiceFactory")
.field("left", &std::any::type_name::<SFLeft>())
.field("right", &std::any::type_name::<SFRight>())
.field("choose_fn", &std::any::type_name::<ChooseFn>())
.finish()
}
}
impl<R, C, ChooseFn, SFLeft, SFRight> ServiceFactory<R, C>
for EitherServiceFactory<ChooseFn, SFLeft, SFRight>
where
ChooseFn: Fn(&C) -> bool,
SFLeft: ServiceFactory<R, C>,
SFRight: ServiceFactory<
R,
C,
Response = SFLeft::Response,
InitError = SFLeft::InitError,
Error = SFLeft::Error,
>,
{
type Response = SFLeft::Response;
type Error = SFLeft::Error;
type InitError = SFLeft::InitError;
type Service = EitherService<SFLeft::Service, SFRight::Service>;
async fn create(&self, cfg: C) -> Result<Self::Service, Self::InitError> {
let choose_left = (self.choose_left_fn)(&cfg);
if choose_left {
let svc = self.left.create(cfg).await?;
Ok(EitherService {
svc: Either::Left(svc),
})
} else {
let svc = self.right.create(cfg).await?;
Ok(EitherService {
svc: Either::Right(svc),
})
}
}
}
impl<SLeft, SRight> EitherService<SLeft, SRight> {
/// Create `Either` service
pub fn left(svc: SLeft) -> Self {
EitherService {
svc: Either::Left(svc),
}
}
/// Create `Either` service
pub fn right(svc: SRight) -> Self {
EitherService {
svc: Either::Right(svc),
}
}
}
impl<SLeft, SRight> fmt::Debug for EitherService<SLeft, SRight> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("EitherService")
.field("left", &std::any::type_name::<SLeft>())
.field("right", &std::any::type_name::<SRight>())
.finish()
}
}
impl<Req, SLeft, SRight> Service<Req> for EitherService<SLeft, SRight>
where
SLeft: Service<Req>,
SRight: Service<Req, Response = SLeft::Response, Error = SLeft::Error>,
{
type Response = SLeft::Response;
type Error = SLeft::Error;
#[inline]
async fn ready(&self, ctx: ServiceCtx<'_, Self>) -> Result<(), Self::Error> {
match self.svc {
Either::Left(ref svc) => ctx.ready(svc).await,
Either::Right(ref svc) => ctx.ready(svc).await,
}
}
#[inline]
async fn shutdown(&self) {
match self.svc {
Either::Left(ref svc) => svc.shutdown().await,
Either::Right(ref svc) => svc.shutdown().await,
}
}
#[inline]
async fn call(
&self,
req: Req,
ctx: ServiceCtx<'_, Self>,
) -> Result<Self::Response, Self::Error> {
match self.svc {
Either::Left(ref svc) => ctx.call(svc, req).await,
Either::Right(ref svc) => ctx.call(svc, req).await,
}
}
#[inline]
fn poll(&self, cx: &mut Context<'_>) -> Result<(), Self::Error> {
match self.svc {
Either::Left(ref svc) => svc.poll(cx),
Either::Right(ref svc) => svc.poll(cx),
}
}
}
#[cfg(test)]
mod tests {
use ntex_service::{Pipeline, ServiceFactory};
use super::*;
#[derive(Copy, Clone, Debug, PartialEq)]
struct Svc1;
impl Service<()> for Svc1 {
type Response = &'static str;
type Error = ();
async fn call(&self, _: (), _: ServiceCtx<'_, Self>) -> Result<&'static str, ()> {
Ok("svc1")
}
}
#[derive(Clone)]
struct Svc1Factory;
impl ServiceFactory<(), &'static str> for Svc1Factory {
type Response = &'static str;
type Error = ();
type InitError = ();
type Service = Svc1;
async fn create(&self, _: &'static str) -> Result<Self::Service, Self::InitError> {
Ok(Svc1)
}
}
#[derive(Copy, Clone, Debug, PartialEq)]
struct Svc2;
impl Service<()> for Svc2 {
type Response = &'static str;
type Error = ();
async fn call(&self, _: (), _: ServiceCtx<'_, Self>) -> Result<&'static str, ()> {
Ok("svc2")
}
}
#[derive(Clone)]
struct Svc2Factory;
impl ServiceFactory<(), &'static str> for Svc2Factory {
type Response = &'static str;
type Error = ();
type InitError = ();
type Service = Svc2;
async fn create(&self, _: &'static str) -> Result<Self::Service, Self::InitError> {
Ok(Svc2)
}
}
type Either = EitherService<Svc1, Svc2>;
type EitherFactory<F> = EitherServiceFactory<F, Svc1Factory, Svc2Factory>;
#[ntex_macros::rt_test2]
async fn test_success() {
let svc = Pipeline::new(Either::left(Svc1).clone());
assert_eq!(svc.call(()).await, Ok("svc1"));
assert_eq!(svc.ready().await, Ok(()));
svc.shutdown().await;
let svc = Pipeline::new(Either::right(Svc2).clone());
assert_eq!(svc.call(()).await, Ok("svc2"));
assert_eq!(svc.ready().await, Ok(()));
svc.shutdown().await;
assert!(format!("{:?}", svc).contains("EitherService"));
}
#[ntex_macros::rt_test2]
async fn test_factory() {
let factory =
EitherFactory::new(|s: &&'static str| *s == "svc1", Svc1Factory, Svc2Factory)
.clone();
assert!(format!("{:?}", factory).contains("EitherServiceFactory"));
let svc = factory.pipeline("svc1").await.unwrap();
assert_eq!(svc.call(()).await, Ok("svc1"));
let svc = factory.pipeline("other").await.unwrap();
assert_eq!(svc.call(()).await, Ok("svc2"));
}
}

View file

@ -1,8 +1,10 @@
pub mod buffer; pub mod buffer;
pub mod either;
mod extensions; mod extensions;
pub mod inflight; pub mod inflight;
pub mod keepalive; pub mod keepalive;
pub mod onerequest; pub mod onerequest;
pub mod retry;
pub mod timeout; pub mod timeout;
pub mod variant; pub mod variant;

View file

@ -0,0 +1,177 @@
#![allow(async_fn_in_trait)]
use ntex_service::{Middleware, Service, ServiceCtx};
/// Trait defines retry policy
pub trait Policy<Req, S: Service<Req>>: Sized + Clone {
async fn retry(&mut self, req: &Req, res: &Result<S::Response, S::Error>) -> bool;
fn clone_request(&self, req: &Req) -> Option<Req>;
}
#[derive(Clone, Debug)]
/// Retry middleware
///
/// Retry middleware allows to retry service call
pub struct Retry<P> {
policy: P,
}
#[derive(Clone, Debug)]
/// Retry service
///
/// Retry service allows to retry service call
pub struct RetryService<P, S> {
policy: P,
service: S,
}
impl<P> Retry<P> {
/// Create retry middleware
pub fn new(policy: P) -> Self {
Retry { policy }
}
}
impl<P: Clone, S> Middleware<S> for Retry<P> {
type Service = RetryService<P, S>;
fn create(&self, service: S) -> Self::Service {
RetryService {
service,
policy: self.policy.clone(),
}
}
}
impl<P, S> RetryService<P, S> {
/// Create retry service
pub fn new(policy: P, service: S) -> Self {
RetryService { policy, service }
}
}
impl<P, S, R> Service<R> for RetryService<P, S>
where
P: Policy<R, S>,
S: Service<R>,
{
type Response = S::Response;
type Error = S::Error;
ntex_service::forward_poll!(service);
ntex_service::forward_ready!(service);
ntex_service::forward_shutdown!(service);
async fn call(
&self,
mut request: R,
ctx: ServiceCtx<'_, Self>,
) -> Result<S::Response, S::Error> {
let mut policy = self.policy.clone();
let mut cloned = policy.clone_request(&request);
loop {
let result = ctx.call(&self.service, request).await;
cloned = if let Some(req) = cloned.take() {
if policy.retry(&req, &result).await {
request = req;
policy.clone_request(&request)
} else {
return result;
}
} else {
return result;
}
}
}
}
#[derive(Copy, Clone, Debug)]
/// Default retry policy
///
/// This policy retries on any error. By default retry count is 3
pub struct DefaultRetryPolicy(u16);
impl DefaultRetryPolicy {
/// Create default retry policy
pub fn new(retry: u16) -> Self {
DefaultRetryPolicy(retry)
}
}
impl Default for DefaultRetryPolicy {
fn default() -> Self {
DefaultRetryPolicy::new(3)
}
}
impl<R, S> Policy<R, S> for DefaultRetryPolicy
where
R: Clone,
S: Service<R>,
{
async fn retry(&mut self, _: &R, res: &Result<S::Response, S::Error>) -> bool {
if res.is_err() {
if self.0 == 0 {
false
} else {
self.0 -= 1;
true
}
} else {
false
}
}
fn clone_request(&self, req: &R) -> Option<R> {
Some(req.clone())
}
}
#[cfg(test)]
mod tests {
use std::{cell::Cell, rc::Rc};
use ntex_service::{apply, fn_factory, Pipeline, ServiceFactory};
use super::*;
#[derive(Clone, Debug, PartialEq)]
struct TestService(Rc<Cell<usize>>);
impl Service<()> for TestService {
type Response = ();
type Error = ();
async fn call(&self, _: (), _: ServiceCtx<'_, Self>) -> Result<(), ()> {
let cnt = self.0.get();
if cnt == 0 {
Ok(())
} else {
self.0.set(cnt - 1);
Err(())
}
}
}
#[ntex_macros::rt_test2]
async fn test_retry() {
let cnt = Rc::new(Cell::new(5));
let svc = Pipeline::new(
RetryService::new(DefaultRetryPolicy::default(), TestService(cnt.clone()))
.clone(),
);
assert_eq!(svc.call(()).await, Err(()));
assert_eq!(svc.ready().await, Ok(()));
svc.shutdown().await;
assert_eq!(cnt.get(), 1);
let factory = apply(
Retry::new(DefaultRetryPolicy::new(3)).clone(),
fn_factory(|| async { Ok::<_, ()>(TestService(Rc::new(Cell::new(2)))) }),
);
let srv = factory.pipeline(&()).await.unwrap();
assert_eq!(srv.call(()).await, Ok(()));
}
}

View file

@ -210,7 +210,7 @@ mod tests {
#[ntex_macros::rt_test2] #[ntex_macros::rt_test2]
#[allow(clippy::redundant_clone)] #[allow(clippy::redundant_clone)]
async fn test_timeout_newservice() { async fn test_timeout_middleware() {
let resolution = Duration::from_millis(100); let resolution = Duration::from_millis(100);
let wait_time = Duration::from_millis(500); let wait_time = Duration::from_millis(500);

View file

@ -91,7 +91,6 @@ impl fmt::Debug for LocalWaker {
} }
} }
#[doc(hidden)]
/// Yields execution back to the current runtime. /// Yields execution back to the current runtime.
pub async fn yield_to() { pub async fn yield_to() {
use std::{future::Future, pin::Pin, task::Context, task::Poll}; use std::{future::Future, pin::Pin, task::Context, task::Poll};

View file

@ -1,5 +1,41 @@
# Changes # Changes
## [2.12.4] - 2025-03-28
* http: Return PayloadError::Incomplete on server disconnect
* web: Expose WebStack for external wrapper support in downstream crates #542
## [2.12.3] - 2025-03-22
* web: Export web::app_service::AppService #534
* http: Add delay for test server availability, could cause connect race
## [2.12.2] - 2025-03-15
* http: Allow to run publish future to completion in case error
* http: Remove brotli support
## [2.12.1] - 2025-03-14
* Allow to disable test logging (no-test-logging features)
## [2.12.0] - 2025-03-12
* Add neon runtime support
* Check test server availability before using it
* Drop glommio support
* Drop async-std support
## [2.11.0] - 2025-01-31
* Cpu affinity support for server
## [2.10.0] - 2024-12-04 ## [2.10.0] - 2024-12-04
* Use updated Service trait * Use updated Service trait

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ntex" name = "ntex"
version = "2.10.0" version = "2.12.4"
authors = ["ntex contributors <team@ntex.rs>"] authors = ["ntex contributors <team@ntex.rs>"]
description = "Framework for composable network services" description = "Framework for composable network services"
readme = "README.md" readme = "README.md"
@ -18,7 +18,7 @@ edition = "2021"
rust-version = "1.75" rust-version = "1.75"
[package.metadata.docs.rs] [package.metadata.docs.rs]
features = ["tokio", "openssl", "rustls", "compress", "cookie", "ws", "brotli", "ntex-tls/rustls-ring"] features = ["tokio", "openssl", "rustls", "compress", "cookie", "ws", "ntex-tls/rustls-ring"]
[lib] [lib]
name = "ntex" name = "ntex"
@ -45,20 +45,20 @@ url = ["url-pkg"]
# tokio runtime # tokio runtime
tokio = ["ntex-net/tokio"] tokio = ["ntex-net/tokio"]
# glommio runtime
glommio = ["ntex-net/glommio"]
# async-std runtime
async-std = ["ntex-net/async-std"]
# compio runtime # compio runtime
compio = ["ntex-net/compio"] compio = ["ntex-net/compio"]
# neon runtime
neon = ["ntex-net/neon"]
# neon runtime
neon-uring = ["ntex-net/neon", "ntex-net/io-uring"]
# websocket support # websocket support
ws = ["dep:sha-1"] ws = ["dep:sha-1"]
# brotli2 support # disable [ntex::test] logging configuration
brotli = ["dep:brotli2"] no-test-logging = []
[dependencies] [dependencies]
ntex-codec = "0.6" ntex-codec = "0.6"
@ -68,11 +68,11 @@ ntex-service = "3.4"
ntex-macros = "0.1" ntex-macros = "0.1"
ntex-util = "2.8" ntex-util = "2.8"
ntex-bytes = "0.1.27" ntex-bytes = "0.1.27"
ntex-server = "2.6" ntex-server = "2.7.4"
ntex-h2 = "1.4" ntex-h2 = "1.8.6"
ntex-rt = "0.4.22" ntex-rt = "0.4.27"
ntex-io = "2.9" ntex-io = "2.11"
ntex-net = "2.4" ntex-net = "2.5.10"
ntex-tls = "2.3" ntex-tls = "2.3"
base64 = "0.22" base64 = "0.22"
@ -83,6 +83,7 @@ pin-project-lite = "0.2"
regex = { version = "1.11", default-features = false, features = ["std"] } regex = { version = "1.11", default-features = false, features = ["std"] }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
sha-1 = { version = "0.10", optional = true } sha-1 = { version = "0.10", optional = true }
env_logger = { version = "0.11", default-features = false }
thiserror = "1" thiserror = "1"
nanorand = { version = "0.7", default-features = false, features = [ nanorand = { version = "0.7", default-features = false, features = [
"std", "std",
@ -108,13 +109,12 @@ tls-rustls = { version = "0.23", package = "rustls", optional = true, default-fe
webpki-roots = { version = "0.26", optional = true } webpki-roots = { version = "0.26", optional = true }
# compression # compression
brotli2 = { version = "0.3.2", optional = true }
flate2 = { version = "1.0", optional = true } flate2 = { version = "1.0", optional = true }
[dev-dependencies] [dev-dependencies]
env_logger = "0.11"
rand = "0.8" rand = "0.8"
time = "0.3" time = "0.3"
oneshot = "0.1"
futures-util = "0.3" futures-util = "0.3"
tls-openssl = { version = "0.10", package = "openssl" } tls-openssl = { version = "0.10", package = "openssl" }
tls-rustls = { version = "0.23", package = "rustls", features = ["ring", "std"], default-features = false } tls-rustls = { version = "0.23", package = "rustls", features = ["ring", "std"], default-features = false }

View file

@ -1,13 +1,11 @@
use std::{ use std::{future::poll_fn, io, io::Write, pin::Pin, task, task::Poll, time::Instant};
future::poll_fn, io, io::Write, pin::Pin, task::Context, task::Poll, time::Instant,
};
use crate::http::body::{BodySize, MessageBody}; use crate::http::body::{BodySize, MessageBody};
use crate::http::error::PayloadError; use crate::http::error::PayloadError;
use crate::http::h1;
use crate::http::header::{HeaderMap, HeaderValue, HOST}; use crate::http::header::{HeaderMap, HeaderValue, HOST};
use crate::http::message::{RequestHeadType, ResponseHead}; use crate::http::message::{RequestHeadType, ResponseHead};
use crate::http::payload::{Payload, PayloadStream}; use crate::http::payload::{Payload, PayloadStream};
use crate::http::{h1, Version};
use crate::io::{IoBoxed, RecvError}; use crate::io::{IoBoxed, RecvError};
use crate::time::{timeout_checked, Millis}; use crate::time::{timeout_checked, Millis};
use crate::util::{ready, BufMut, Bytes, BytesMut, Stream}; use crate::util::{ready, BufMut, Bytes, BytesMut, Stream};
@ -101,7 +99,13 @@ where
Ok((head, Payload::None)) Ok((head, Payload::None))
} }
_ => { _ => {
let pl: PayloadStream = Box::pin(PlStream::new(io, codec, created, pool)); let pl: PayloadStream = Box::pin(PlStream::new(
io,
codec,
created,
pool,
head.version == Version::HTTP_10,
));
Ok((head, pl.into())) Ok((head, pl.into()))
} }
} }
@ -137,6 +141,7 @@ pub(super) struct PlStream {
io: Option<IoBoxed>, io: Option<IoBoxed>,
codec: h1::ClientPayloadCodec, codec: h1::ClientPayloadCodec,
created: Instant, created: Instant,
http_10: bool,
pool: Option<Acquired>, pool: Option<Acquired>,
} }
@ -146,12 +151,14 @@ impl PlStream {
codec: h1::ClientCodec, codec: h1::ClientCodec,
created: Instant, created: Instant,
pool: Option<Acquired>, pool: Option<Acquired>,
http_10: bool,
) -> Self { ) -> Self {
PlStream { PlStream {
io: Some(io), io: Some(io),
codec: codec.into_payload_codec(), codec: codec.into_payload_codec(),
created, created,
pool, pool,
http_10,
} }
} }
} }
@ -161,41 +168,46 @@ impl Stream for PlStream {
fn poll_next( fn poll_next(
mut self: Pin<&mut Self>, mut self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut task::Context<'_>,
) -> Poll<Option<Self::Item>> { ) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut(); let mut this = self.as_mut();
loop { loop {
return Poll::Ready(Some( let item = ready!(this.io.as_ref().unwrap().poll_recv(&this.codec, cx));
match ready!(this.io.as_ref().unwrap().poll_recv(&this.codec, cx)) { return Poll::Ready(Some(match item {
Ok(chunk) => { Ok(chunk) => {
if let Some(chunk) = chunk { if let Some(chunk) = chunk {
Ok(chunk) Ok(chunk)
} else { } else {
release_connection( release_connection(
this.io.take().unwrap(), this.io.take().unwrap(),
!this.codec.keepalive(), !this.codec.keepalive(),
this.created, this.created,
this.pool.take(), this.pool.take(),
); );
return Poll::Ready(None); return Poll::Ready(None);
}
} }
Err(RecvError::KeepAlive) => { }
Err(io::Error::new(io::ErrorKind::TimedOut, "Keep-alive").into()) Err(RecvError::KeepAlive) => {
Err(io::Error::new(io::ErrorKind::TimedOut, "Keep-alive").into())
}
Err(RecvError::Stop) => {
Err(io::Error::new(io::ErrorKind::Other, "Dispatcher stopped").into())
}
Err(RecvError::WriteBackpressure) => {
ready!(this.io.as_ref().unwrap().poll_flush(cx, false))?;
continue;
}
Err(RecvError::Decoder(err)) => Err(err),
Err(RecvError::PeerGone(Some(err))) => {
Err(PayloadError::Incomplete(Some(err)))
}
Err(RecvError::PeerGone(None)) => {
if this.http_10 {
return Poll::Ready(None);
} }
Err(RecvError::Stop) => { Err(PayloadError::Incomplete(None))
Err(io::Error::new(io::ErrorKind::Other, "Dispatcher stopped") }
.into()) }));
}
Err(RecvError::WriteBackpressure) => {
ready!(this.io.as_ref().unwrap().poll_flush(cx, false))?;
continue;
}
Err(RecvError::Decoder(err)) => Err(err),
Err(RecvError::PeerGone(Some(err))) => Err(err.into()),
Err(RecvError::PeerGone(None)) => return Poll::Ready(None),
},
));
} }
} }
} }

View file

@ -187,14 +187,17 @@ async fn get_response(
err err
); );
pl.set_error( pl.set_error(
io::Error::new(io::ErrorKind::Other, err) io::Error::new(
.into(), io::ErrorKind::UnexpectedEof,
err,
)
.into(),
); );
} }
_ => { _ => {
pl.set_error( pl.set_error(
io::Error::new( io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Unsupported,
"unexpected h2 message", "unexpected h2 message",
) )
.into(), .into(),
@ -216,7 +219,7 @@ async fn get_response(
} }
} }
_ => Err(SendRequestError::Error(Box::new(io::Error::new( _ => Err(SendRequestError::Error(Box::new(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Unsupported,
"unexpected h2 message", "unexpected h2 message",
)))), )))),
} }

View file

@ -387,8 +387,8 @@ impl Future for ReadBody {
let this = self.get_mut(); let this = self.get_mut();
loop { loop {
return match Pin::new(&mut this.stream).poll_next(cx)? { return match Pin::new(&mut this.stream).poll_next(cx) {
Poll::Ready(Some(chunk)) => { Poll::Ready(Some(Ok(chunk))) => {
if this.limit > 0 && (this.buf.len() + chunk.len()) > this.limit { if this.limit > 0 && (this.buf.len() + chunk.len()) > this.limit {
Poll::Ready(Err(PayloadError::Overflow)) Poll::Ready(Err(PayloadError::Overflow))
} else { } else {
@ -397,6 +397,7 @@ impl Future for ReadBody {
} }
} }
Poll::Ready(None) => Poll::Ready(Ok(this.buf.split().freeze())), Poll::Ready(None) => Poll::Ready(Ok(this.buf.split().freeze())),
Poll::Ready(Some(Err(err))) => Poll::Ready(Err(err)),
Poll::Pending => { Poll::Pending => {
if this.timeout.poll_elapsed(cx).is_ready() { if this.timeout.poll_elapsed(cx).is_ready() {
Poll::Ready(Err(PayloadError::Incomplete(Some( Poll::Ready(Err(PayloadError::Incomplete(Some(

View file

@ -1,7 +1,5 @@
use std::{future::Future, io, io::Write, pin::Pin, task::Context, task::Poll}; use std::{future::Future, io, io::Write, pin::Pin, task::Context, task::Poll};
#[cfg(feature = "brotli")]
use brotli2::write::BrotliDecoder;
use flate2::write::{GzDecoder, ZlibDecoder}; use flate2::write::{GzDecoder, ZlibDecoder};
use super::Writer; use super::Writer;
@ -27,10 +25,6 @@ where
#[inline] #[inline]
pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> { pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> {
let decoder = match encoding { let decoder = match encoding {
#[cfg(feature = "brotli")]
ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(BrotliDecoder::new(
Writer::new(),
)))),
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new( ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
ZlibDecoder::new(Writer::new()), ZlibDecoder::new(Writer::new()),
))), ))),
@ -137,25 +131,11 @@ where
enum ContentDecoder { enum ContentDecoder {
Deflate(Box<ZlibDecoder<Writer>>), Deflate(Box<ZlibDecoder<Writer>>),
Gzip(Box<GzDecoder<Writer>>), Gzip(Box<GzDecoder<Writer>>),
#[cfg(feature = "brotli")]
Br(Box<BrotliDecoder<Writer>>),
} }
impl ContentDecoder { impl ContentDecoder {
fn feed_eof(&mut self) -> io::Result<Option<Bytes>> { fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
match self { match self {
#[cfg(feature = "brotli")]
ContentDecoder::Br(ref mut decoder) => match decoder.flush() {
Ok(()) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() { ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() {
Ok(_) => { Ok(_) => {
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
@ -183,19 +163,6 @@ impl ContentDecoder {
fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> { fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
match self { match self {
#[cfg(feature = "brotli")]
ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) { ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => { Ok(_) => {
decoder.flush()?; decoder.flush()?;

View file

@ -1,8 +1,6 @@
//! Stream encoder //! Stream encoder
use std::{fmt, future::Future, io, io::Write, pin::Pin, task::Context, task::Poll}; use std::{fmt, future::Future, io, io::Write, pin::Pin, task::Context, task::Poll};
#[cfg(feature = "brotli")]
use brotli2::write::BrotliEncoder;
use flate2::write::{GzEncoder, ZlibEncoder}; use flate2::write::{GzEncoder, ZlibEncoder};
use crate::http::body::{Body, BodySize, MessageBody, ResponseBody}; use crate::http::body::{Body, BodySize, MessageBody, ResponseBody};
@ -117,7 +115,7 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
Poll::Ready(Ok(Err(e))) => return Poll::Ready(Some(Err(Box::new(e)))), Poll::Ready(Ok(Err(e))) => return Poll::Ready(Some(Err(Box::new(e)))),
Poll::Ready(Err(_)) => { Poll::Ready(Err(_)) => {
return Poll::Ready(Some(Err(Box::new(io::Error::new( return Poll::Ready(Some(Err(Box::new(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Interrupted,
"Canceled", "Canceled",
))))); )))));
} }
@ -191,23 +189,11 @@ fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
enum ContentEncoder { enum ContentEncoder {
Deflate(ZlibEncoder<Writer>), Deflate(ZlibEncoder<Writer>),
Gzip(GzEncoder<Writer>), Gzip(GzEncoder<Writer>),
#[cfg(feature = "brotli")]
Br(BrotliEncoder<Writer>),
} }
impl ContentEncoder { impl ContentEncoder {
fn can_encode(encoding: ContentEncoding) -> bool { fn can_encode(encoding: ContentEncoding) -> bool {
#[cfg(feature = "brotli")] matches!(encoding, ContentEncoding::Deflate | ContentEncoding::Gzip)
{
matches!(
encoding,
ContentEncoding::Deflate | ContentEncoding::Gzip | ContentEncoding::Br
)
}
#[cfg(not(feature = "brotli"))]
{
matches!(encoding, ContentEncoding::Deflate | ContentEncoding::Gzip)
}
} }
fn encoder(encoding: ContentEncoding) -> Option<Self> { fn encoder(encoding: ContentEncoding) -> Option<Self> {
@ -220,18 +206,12 @@ impl ContentEncoder {
Writer::new(), Writer::new(),
flate2::Compression::fast(), flate2::Compression::fast(),
))), ))),
#[cfg(feature = "brotli")]
ContentEncoding::Br => {
Some(ContentEncoder::Br(BrotliEncoder::new(Writer::new(), 3)))
}
_ => None, _ => None,
} }
} }
fn take(&mut self) -> Bytes { fn take(&mut self) -> Bytes {
match *self { match *self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(),
ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(), ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(),
ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(), ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(),
} }
@ -239,11 +219,6 @@ impl ContentEncoder {
fn finish(self) -> Result<Bytes, io::Error> { fn finish(self) -> Result<Bytes, io::Error> {
match self { match self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
ContentEncoder::Gzip(encoder) => match encoder.finish() { ContentEncoder::Gzip(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()), Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err), Err(err) => Err(err),
@ -257,14 +232,6 @@ impl ContentEncoder {
fn write(&mut self, data: &[u8]) -> Result<(), io::Error> { fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
match *self { match *self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
log::trace!("Error decoding br encoding: {}", err);
Err(err)
}
},
ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) { ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(err) => { Err(err) => {
@ -288,8 +255,6 @@ impl fmt::Debug for ContentEncoder {
match self { match self {
ContentEncoder::Deflate(_) => write!(f, "ContentEncoder::Deflate"), ContentEncoder::Deflate(_) => write!(f, "ContentEncoder::Deflate"),
ContentEncoder::Gzip(_) => write!(f, "ContentEncoder::Gzip"), ContentEncoder::Gzip(_) => write!(f, "ContentEncoder::Gzip"),
#[cfg(feature = "brotli")]
ContentEncoder::Br(_) => write!(f, "ContentEncoder::Br"),
} }
} }
} }

View file

@ -29,7 +29,7 @@ pub trait ResponseError: fmt::Display + fmt::Debug {
} }
} }
impl<'a, T: ResponseError> ResponseError for &'a T { impl<T: ResponseError> ResponseError for &T {
fn error_response(&self) -> Response { fn error_response(&self) -> Response {
(*self).error_response() (*self).error_response()
} }
@ -217,7 +217,7 @@ pub enum BlockingError<E: fmt::Debug> {
impl From<crate::rt::JoinError> for PayloadError { impl From<crate::rt::JoinError> for PayloadError {
fn from(_: crate::rt::JoinError) -> Self { fn from(_: crate::rt::JoinError) -> Self {
PayloadError::Io(io::Error::new( PayloadError::Io(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Interrupted,
"Operation is canceled", "Operation is canceled",
)) ))
} }
@ -228,7 +228,7 @@ impl From<BlockingError<io::Error>> for PayloadError {
match err { match err {
BlockingError::Error(e) => PayloadError::Io(e), BlockingError::Error(e) => PayloadError::Io(e),
BlockingError::Canceled => PayloadError::Io(io::Error::new( BlockingError::Canceled => PayloadError::Io(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Interrupted,
"Operation is canceled", "Operation is canceled",
)), )),
} }

View file

@ -1,5 +1,5 @@
//! HTTP/1 protocol dispatcher //! HTTP/1 protocol dispatcher
use std::{error, future, io, marker, pin::Pin, rc::Rc, task::Context, task::Poll}; use std::{error, future, io, marker, mem, pin::Pin, rc::Rc, task::Context, task::Poll};
use crate::io::{Decoded, Filter, Io, IoStatusUpdate, RecvError}; use crate::io::{Decoded, Filter, Io, IoStatusUpdate, RecvError};
use crate::service::{PipelineCall, Service}; use crate::service::{PipelineCall, Service};
@ -144,7 +144,20 @@ where
inner.send_response(res, body) inner.send_response(res, body)
} }
Poll::Ready(Err(err)) => inner.control(Control::err(err)), Poll::Ready(Err(err)) => inner.control(Control::err(err)),
Poll::Pending => ready!(inner.poll_request(cx)), Poll::Pending => {
// state changed because of error.
// spawn current publish future to runtime
// so it could complete error handling
let st = ready!(inner.poll_request(cx));
if inner.payload.is_some() {
if let State::CallPublish { fut } =
mem::replace(&mut *this.st, State::ReadRequest)
{
crate::rt::spawn(fut);
}
}
st
}
}, },
// handle control service responses // handle control service responses
State::CallControl { fut } => match Pin::new(fut).poll(cx) { State::CallControl { fut } => match Pin::new(fut).poll(cx) {
@ -339,7 +352,7 @@ where
.io .io
.encode(Message::Item((msg, body.size())), &self.codec) .encode(Message::Item((msg, body.size())), &self.codec)
.map_err(|err| { .map_err(|err| {
if let Some(mut payload) = self.payload.take() { if let Some(ref mut payload) = self.payload {
payload.1.set_error(PayloadError::Incomplete(None)); payload.1.set_error(PayloadError::Incomplete(None));
} }
err err
@ -438,7 +451,7 @@ where
} }
fn set_payload_error(&mut self, err: PayloadError) { fn set_payload_error(&mut self, err: PayloadError) {
if let Some(mut payload) = self.payload.take() { if let Some(ref mut payload) = self.payload {
payload.1.set_error(err); payload.1.set_error(err);
} }
} }

View file

@ -3,8 +3,7 @@ use std::rc::{Rc, Weak};
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::{cell::RefCell, collections::VecDeque, pin::Pin}; use std::{cell::RefCell, collections::VecDeque, pin::Pin};
use crate::http::error::PayloadError; use crate::{http::error::PayloadError, task::LocalWaker, util::Bytes, util::Stream};
use crate::{task::LocalWaker, util::Bytes, util::Stream};
/// max buffer size 32k /// max buffer size 32k
const MAX_BUFFER_SIZE: usize = 32_768; const MAX_BUFFER_SIZE: usize = 32_768;
@ -119,7 +118,7 @@ impl PayloadSender {
// we check only if Payload (other side) is alive, // we check only if Payload (other side) is alive,
// otherwise always return true (consume payload) // otherwise always return true (consume payload)
if let Some(shared) = self.inner.upgrade() { if let Some(shared) = self.inner.upgrade() {
if shared.borrow().need_read { if shared.borrow().flags.contains(Flags::NEED_READ) {
PayloadStatus::Read PayloadStatus::Read
} else { } else {
shared.borrow_mut().io_task.register(cx.waker()); shared.borrow_mut().io_task.register(cx.waker());
@ -131,12 +130,20 @@ impl PayloadSender {
} }
} }
bitflags::bitflags! {
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
struct Flags: u8 {
const EOF = 0b0000_0001;
const ERROR = 0b0000_0010;
const NEED_READ = 0b0000_0100;
}
}
#[derive(Debug)] #[derive(Debug)]
struct Inner { struct Inner {
len: usize, len: usize,
eof: bool, flags: Flags,
err: Option<PayloadError>, err: Option<PayloadError>,
need_read: bool,
items: VecDeque<Bytes>, items: VecDeque<Bytes>,
task: LocalWaker, task: LocalWaker,
io_task: LocalWaker, io_task: LocalWaker,
@ -144,12 +151,16 @@ struct Inner {
impl Inner { impl Inner {
fn new(eof: bool) -> Self { fn new(eof: bool) -> Self {
let flags = if eof {
Flags::EOF | Flags::NEED_READ
} else {
Flags::NEED_READ
};
Inner { Inner {
eof, flags,
len: 0, len: 0,
err: None, err: None,
items: VecDeque::new(), items: VecDeque::new(),
need_read: true,
task: LocalWaker::new(), task: LocalWaker::new(),
io_task: LocalWaker::new(), io_task: LocalWaker::new(),
} }
@ -157,18 +168,23 @@ impl Inner {
fn set_error(&mut self, err: PayloadError) { fn set_error(&mut self, err: PayloadError) {
self.err = Some(err); self.err = Some(err);
self.flags.insert(Flags::ERROR);
self.task.wake() self.task.wake()
} }
fn feed_eof(&mut self) { fn feed_eof(&mut self) {
self.eof = true; self.flags.insert(Flags::EOF);
self.task.wake() self.task.wake()
} }
fn feed_data(&mut self, data: Bytes) { fn feed_data(&mut self, data: Bytes) {
self.len += data.len(); self.len += data.len();
self.items.push_back(data); self.items.push_back(data);
self.need_read = self.len < MAX_BUFFER_SIZE; if self.len < MAX_BUFFER_SIZE {
self.flags.insert(Flags::NEED_READ);
} else {
self.flags.remove(Flags::NEED_READ);
}
self.task.wake(); self.task.wake();
} }
@ -178,19 +194,25 @@ impl Inner {
) -> Poll<Option<Result<Bytes, PayloadError>>> { ) -> Poll<Option<Result<Bytes, PayloadError>>> {
if let Some(data) = self.items.pop_front() { if let Some(data) = self.items.pop_front() {
self.len -= data.len(); self.len -= data.len();
self.need_read = self.len < MAX_BUFFER_SIZE; if self.len < MAX_BUFFER_SIZE {
self.flags.insert(Flags::NEED_READ);
} else {
self.flags.remove(Flags::NEED_READ);
}
if self.need_read && !self.eof { if self.flags.contains(Flags::NEED_READ)
&& !self.flags.intersects(Flags::EOF | Flags::ERROR)
{
self.task.register(cx.waker()); self.task.register(cx.waker());
} }
self.io_task.wake(); self.io_task.wake();
Poll::Ready(Some(Ok(data))) Poll::Ready(Some(Ok(data)))
} else if let Some(err) = self.err.take() { } else if let Some(err) = self.err.take() {
Poll::Ready(Some(Err(err))) Poll::Ready(Some(Err(err)))
} else if self.eof { } else if self.flags.intersects(Flags::EOF | Flags::ERROR) {
Poll::Ready(None) Poll::Ready(None)
} else { } else {
self.need_read = true; self.flags.insert(Flags::NEED_READ);
self.task.register(cx.waker()); self.task.register(cx.waker());
self.io_task.wake(); self.io_task.wake();
Poll::Pending Poll::Pending

Some files were not shown because too many files have changed in this diff Show more