mirror of
https://github.com/ntex-rs/ntex.git
synced 2025-04-03 21:07:39 +03:00
Better worker availability handling (#546)
This commit is contained in:
parent
8f2d5056c9
commit
e9a1284151
9 changed files with 86 additions and 48 deletions
|
@ -46,7 +46,10 @@ ntex-compio = { path = "ntex-compio" }
|
||||||
ntex-tokio = { path = "ntex-tokio" }
|
ntex-tokio = { path = "ntex-tokio" }
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
|
async-channel = "2"
|
||||||
async-task = "4.5.0"
|
async-task = "4.5.0"
|
||||||
|
atomic-waker = "1.1"
|
||||||
|
core_affinity = "0.8"
|
||||||
bitflags = "2"
|
bitflags = "2"
|
||||||
cfg_aliases = "0.2.1"
|
cfg_aliases = "0.2.1"
|
||||||
cfg-if = "1.0.0"
|
cfg-if = "1.0.0"
|
||||||
|
@ -57,7 +60,8 @@ fxhash = "0.2"
|
||||||
libc = "0.2.164"
|
libc = "0.2.164"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
io-uring = "0.7.4"
|
io-uring = "0.7.4"
|
||||||
polling = "3.3.0"
|
oneshot = "0.1"
|
||||||
|
polling = "3.7.4"
|
||||||
nohash-hasher = "0.2.0"
|
nohash-hasher = "0.2.0"
|
||||||
scoped-tls = "1.0.1"
|
scoped-tls = "1.0.1"
|
||||||
slab = "0.4.9"
|
slab = "0.4.9"
|
||||||
|
|
|
@ -40,7 +40,7 @@ ntex-util = "2.5"
|
||||||
|
|
||||||
ntex-tokio = { version = "0.5.3", optional = true }
|
ntex-tokio = { version = "0.5.3", optional = true }
|
||||||
ntex-compio = { version = "0.2.4", optional = true }
|
ntex-compio = { version = "0.2.4", optional = true }
|
||||||
ntex-neon = { version = "0.1.13", optional = true }
|
ntex-neon = { version = "0.1.14", optional = true }
|
||||||
|
|
||||||
bitflags = { workspace = true }
|
bitflags = { workspace = true }
|
||||||
cfg-if = { workspace = true }
|
cfg-if = { workspace = true }
|
||||||
|
|
|
@ -42,4 +42,4 @@ tok-io = { version = "1", package = "tokio", default-features = false, features
|
||||||
"net",
|
"net",
|
||||||
], optional = true }
|
], optional = true }
|
||||||
|
|
||||||
ntex-neon = { version = "0.1.11", optional = true }
|
ntex-neon = { version = "0.1.14", optional = true }
|
||||||
|
|
|
@ -1,5 +1,9 @@
|
||||||
# Changes
|
# Changes
|
||||||
|
|
||||||
|
## [2.7.3] - 2025-03-28
|
||||||
|
|
||||||
|
* Better worker availability handling
|
||||||
|
|
||||||
## [2.7.2] - 2025-03-27
|
## [2.7.2] - 2025-03-27
|
||||||
|
|
||||||
* Handle paused state
|
* Handle paused state
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "ntex-server"
|
name = "ntex-server"
|
||||||
version = "2.7.2"
|
version = "2.7.3"
|
||||||
authors = ["ntex contributors <team@ntex.rs>"]
|
authors = ["ntex contributors <team@ntex.rs>"]
|
||||||
description = "Server for ntex framework"
|
description = "Server for ntex framework"
|
||||||
keywords = ["network", "framework", "async", "futures"]
|
keywords = ["network", "framework", "async", "futures"]
|
||||||
|
@ -22,13 +22,13 @@ ntex-service = "3.4"
|
||||||
ntex-rt = "0.4"
|
ntex-rt = "0.4"
|
||||||
ntex-util = "2.8"
|
ntex-util = "2.8"
|
||||||
|
|
||||||
async-channel = "2"
|
async-channel = { workspace = true }
|
||||||
async-broadcast = "0.7"
|
atomic-waker = { workspace = true }
|
||||||
core_affinity = "0.8"
|
core_affinity = { workspace = true }
|
||||||
polling = "3.3"
|
oneshot = { workspace = true }
|
||||||
log = "0.4"
|
polling = { workspace = true }
|
||||||
socket2 = "0.5"
|
log = { workspace = true }
|
||||||
oneshot = { version = "0.1", default-features = false, features = ["async"] }
|
socket2 = { workspace = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
ntex = "2"
|
ntex = "2"
|
||||||
|
|
|
@ -55,7 +55,7 @@ impl<F: ServerConfiguration> ServerManager<F> {
|
||||||
|
|
||||||
let no_signals = cfg.no_signals;
|
let no_signals = cfg.no_signals;
|
||||||
let shared = Arc::new(ServerShared {
|
let shared = Arc::new(ServerShared {
|
||||||
paused: AtomicBool::new(false),
|
paused: AtomicBool::new(true),
|
||||||
});
|
});
|
||||||
let mgr = ServerManager(Rc::new(Inner {
|
let mgr = ServerManager(Rc::new(Inner {
|
||||||
cfg,
|
cfg,
|
||||||
|
@ -139,7 +139,6 @@ impl<F: ServerConfiguration> ServerManager<F> {
|
||||||
fn start_worker<F: ServerConfiguration>(mgr: ServerManager<F>, cid: Option<CoreId>) {
|
fn start_worker<F: ServerConfiguration>(mgr: ServerManager<F>, cid: Option<CoreId>) {
|
||||||
let _ = ntex_rt::spawn(async move {
|
let _ = ntex_rt::spawn(async move {
|
||||||
let id = mgr.next_id();
|
let id = mgr.next_id();
|
||||||
|
|
||||||
let mut wrk = Worker::start(id, mgr.factory(), cid);
|
let mut wrk = Worker::start(id, mgr.factory(), cid);
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
|
@ -212,10 +211,9 @@ impl<F: ServerConfiguration> HandleCmdState<F> {
|
||||||
match upd {
|
match upd {
|
||||||
Update::Available(worker) => {
|
Update::Available(worker) => {
|
||||||
self.workers.push(worker);
|
self.workers.push(worker);
|
||||||
if !self.workers.is_empty() {
|
|
||||||
self.mgr.resume();
|
|
||||||
} else {
|
|
||||||
self.workers.sort();
|
self.workers.sort();
|
||||||
|
if self.workers.len() == 1 {
|
||||||
|
self.mgr.resume();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Update::Unavailable(worker) => {
|
Update::Unavailable(worker) => {
|
||||||
|
@ -234,6 +232,9 @@ impl<F: ServerConfiguration> HandleCmdState<F> {
|
||||||
if let Err(item) = self.workers[0].send(item) {
|
if let Err(item) = self.workers[0].send(item) {
|
||||||
self.backlog.push_back(item);
|
self.backlog.push_back(item);
|
||||||
self.workers.remove(0);
|
self.workers.remove(0);
|
||||||
|
if self.workers.is_empty() {
|
||||||
|
self.mgr.pause();
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -203,14 +203,10 @@ impl Accept {
|
||||||
let mut timeout = Some(Duration::ZERO);
|
let mut timeout = Some(Duration::ZERO);
|
||||||
loop {
|
loop {
|
||||||
if let Err(e) = self.poller.wait(&mut events, timeout) {
|
if let Err(e) = self.poller.wait(&mut events, timeout) {
|
||||||
if e.kind() == io::ErrorKind::Interrupted {
|
if e.kind() != io::ErrorKind::Interrupted {
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
panic!("Cannot wait for events in poller: {}", e)
|
panic!("Cannot wait for events in poller: {}", e)
|
||||||
}
|
}
|
||||||
}
|
} else if timeout.is_some() {
|
||||||
|
|
||||||
if timeout.is_some() {
|
|
||||||
timeout = None;
|
timeout = None;
|
||||||
let _ = self.tx.take().unwrap().send(());
|
let _ = self.tx.take().unwrap().send(());
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,8 +2,8 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::task::{ready, Context, Poll};
|
use std::task::{ready, Context, Poll};
|
||||||
use std::{cmp, future::poll_fn, future::Future, hash, pin::Pin, sync::Arc};
|
use std::{cmp, future::poll_fn, future::Future, hash, pin::Pin, sync::Arc};
|
||||||
|
|
||||||
use async_broadcast::{self as bus, broadcast};
|
|
||||||
use async_channel::{unbounded, Receiver, Sender};
|
use async_channel::{unbounded, Receiver, Sender};
|
||||||
|
use atomic_waker::AtomicWaker;
|
||||||
use core_affinity::CoreId;
|
use core_affinity::CoreId;
|
||||||
|
|
||||||
use ntex_rt::{spawn, Arbiter};
|
use ntex_rt::{spawn, Arbiter};
|
||||||
|
@ -151,10 +151,8 @@ impl<T> Worker<T> {
|
||||||
if self.failed.load(Ordering::Acquire) {
|
if self.failed.load(Ordering::Acquire) {
|
||||||
WorkerStatus::Failed
|
WorkerStatus::Failed
|
||||||
} else {
|
} else {
|
||||||
// cleanup updates
|
self.avail.wait_for_update().await;
|
||||||
while self.avail.notify.try_recv().is_ok() {}
|
if self.avail.failed() {
|
||||||
|
|
||||||
if self.avail.notify.recv_direct().await.is_err() {
|
|
||||||
self.failed.store(true, Ordering::Release);
|
self.failed.store(true, Ordering::Release);
|
||||||
}
|
}
|
||||||
self.status()
|
self.status()
|
||||||
|
@ -196,46 +194,79 @@ impl Future for WorkerStop {
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
struct WorkerAvailability {
|
struct WorkerAvailability {
|
||||||
notify: bus::Receiver<()>,
|
inner: Arc<Inner>,
|
||||||
available: Arc<AtomicBool>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
struct WorkerAvailabilityTx {
|
struct WorkerAvailabilityTx {
|
||||||
notify: bus::Sender<()>,
|
inner: Arc<Inner>,
|
||||||
available: Arc<AtomicBool>,
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Inner {
|
||||||
|
waker: AtomicWaker,
|
||||||
|
updated: AtomicBool,
|
||||||
|
available: AtomicBool,
|
||||||
|
failed: AtomicBool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WorkerAvailability {
|
impl WorkerAvailability {
|
||||||
fn create() -> (Self, WorkerAvailabilityTx) {
|
fn create() -> (Self, WorkerAvailabilityTx) {
|
||||||
let (mut tx, rx) = broadcast(16);
|
let inner = Arc::new(Inner {
|
||||||
tx.set_overflow(true);
|
waker: AtomicWaker::new(),
|
||||||
|
updated: AtomicBool::new(false),
|
||||||
|
available: AtomicBool::new(false),
|
||||||
|
failed: AtomicBool::new(false),
|
||||||
|
});
|
||||||
|
|
||||||
let avail = WorkerAvailability {
|
let avail = WorkerAvailability {
|
||||||
notify: rx,
|
inner: inner.clone(),
|
||||||
available: Arc::new(AtomicBool::new(false)),
|
|
||||||
};
|
|
||||||
let avail_tx = WorkerAvailabilityTx {
|
|
||||||
notify: tx,
|
|
||||||
available: avail.available.clone(),
|
|
||||||
};
|
};
|
||||||
|
let avail_tx = WorkerAvailabilityTx { inner };
|
||||||
(avail, avail_tx)
|
(avail, avail_tx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn failed(&self) -> bool {
|
||||||
|
self.inner.failed.load(Ordering::Acquire)
|
||||||
|
}
|
||||||
|
|
||||||
fn available(&self) -> bool {
|
fn available(&self) -> bool {
|
||||||
self.available.load(Ordering::Acquire)
|
self.inner.available.load(Ordering::Acquire)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn wait_for_update(&self) {
|
||||||
|
poll_fn(|cx| {
|
||||||
|
if self.inner.updated.load(Ordering::Acquire) {
|
||||||
|
self.inner.updated.store(false, Ordering::Release);
|
||||||
|
Poll::Ready(())
|
||||||
|
} else {
|
||||||
|
self.inner.waker.register(cx.waker());
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WorkerAvailabilityTx {
|
impl WorkerAvailabilityTx {
|
||||||
fn set(&self, val: bool) {
|
fn set(&self, val: bool) {
|
||||||
let old = self.available.swap(val, Ordering::Release);
|
let old = self.inner.available.swap(val, Ordering::Release);
|
||||||
if !old && val {
|
if old != val {
|
||||||
let _ = self.notify.try_broadcast(());
|
self.inner.updated.store(true, Ordering::Release);
|
||||||
|
self.inner.waker.wake();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Drop for WorkerAvailabilityTx {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.inner.failed.store(true, Ordering::Release);
|
||||||
|
self.inner.updated.store(true, Ordering::Release);
|
||||||
|
self.inner.available.store(false, Ordering::Release);
|
||||||
|
self.inner.waker.wake();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Service worker
|
/// Service worker
|
||||||
///
|
///
|
||||||
/// Worker accepts message via unbounded channel and starts processing.
|
/// Worker accepts message via unbounded channel and starts processing.
|
||||||
|
@ -256,10 +287,13 @@ where
|
||||||
let mut recv = std::pin::pin!(wrk.rx.recv());
|
let mut recv = std::pin::pin!(wrk.rx.recv());
|
||||||
let fut = poll_fn(|cx| {
|
let fut = poll_fn(|cx| {
|
||||||
match svc.poll_ready(cx) {
|
match svc.poll_ready(cx) {
|
||||||
Poll::Ready(res) => {
|
Poll::Ready(Ok(())) => {
|
||||||
res?;
|
|
||||||
wrk.availability.set(true);
|
wrk.availability.set(true);
|
||||||
}
|
}
|
||||||
|
Poll::Ready(Err(err)) => {
|
||||||
|
wrk.availability.set(false);
|
||||||
|
return Poll::Ready(Err(err));
|
||||||
|
}
|
||||||
Poll::Pending => {
|
Poll::Pending => {
|
||||||
wrk.availability.set(false);
|
wrk.availability.set(false);
|
||||||
return Poll::Pending;
|
return Poll::Pending;
|
||||||
|
@ -287,7 +321,6 @@ where
|
||||||
let _ = ntex_rt::spawn(async move {
|
let _ = ntex_rt::spawn(async move {
|
||||||
svc.shutdown().await;
|
svc.shutdown().await;
|
||||||
});
|
});
|
||||||
wrk.availability.set(false);
|
|
||||||
}
|
}
|
||||||
Either::Right(Some(Shutdown { timeout, result })) => {
|
Either::Right(Some(Shutdown { timeout, result })) => {
|
||||||
wrk.availability.set(false);
|
wrk.availability.set(false);
|
||||||
|
@ -302,6 +335,7 @@ where
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
Either::Left(Ok(false)) | Either::Right(None) => {
|
Either::Left(Ok(false)) | Either::Right(None) => {
|
||||||
|
wrk.availability.set(false);
|
||||||
stop_svc(wrk.id, svc, STOP_TIMEOUT, None).await;
|
stop_svc(wrk.id, svc, STOP_TIMEOUT, None).await;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -311,7 +345,6 @@ where
|
||||||
loop {
|
loop {
|
||||||
match select(wrk.factory.create(()), stream_recv(&mut wrk.stop)).await {
|
match select(wrk.factory.create(()), stream_recv(&mut wrk.stop)).await {
|
||||||
Either::Left(Ok(service)) => {
|
Either::Left(Ok(service)) => {
|
||||||
wrk.availability.set(true);
|
|
||||||
svc = Pipeline::new(service).bind();
|
svc = Pipeline::new(service).bind();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,7 +68,7 @@ ntex-service = "3.4"
|
||||||
ntex-macros = "0.1"
|
ntex-macros = "0.1"
|
||||||
ntex-util = "2.8"
|
ntex-util = "2.8"
|
||||||
ntex-bytes = "0.1.27"
|
ntex-bytes = "0.1.27"
|
||||||
ntex-server = "2.7"
|
ntex-server = "2.7.3"
|
||||||
ntex-h2 = "1.8.6"
|
ntex-h2 = "1.8.6"
|
||||||
ntex-rt = "0.4.27"
|
ntex-rt = "0.4.27"
|
||||||
ntex-io = "2.11"
|
ntex-io = "2.11"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue