Better worker availability handling

This commit is contained in:
Nikolay Kim 2025-03-28 03:13:43 +01:00
parent 8f2d5056c9
commit 965e825662
6 changed files with 77 additions and 41 deletions

View file

@ -46,7 +46,10 @@ ntex-compio = { path = "ntex-compio" }
ntex-tokio = { path = "ntex-tokio" }
[workspace.dependencies]
async-channel = "2"
async-task = "4.5.0"
atomic-waker = "1.1"
core_affinity = "0.8"
bitflags = "2"
cfg_aliases = "0.2.1"
cfg-if = "1.0.0"
@ -57,7 +60,8 @@ fxhash = "0.2"
libc = "0.2.164"
log = "0.4"
io-uring = "0.7.4"
polling = "3.3.0"
oneshot = "0.1"
polling = "3.7.4"
nohash-hasher = "0.2.0"
scoped-tls = "1.0.1"
slab = "0.4.9"

View file

@ -1,5 +1,9 @@
# Changes
## [2.7.3] - 2025-03-28
* Better worker availability handling
## [2.7.2] - 2025-03-27
* Handle paused state

View file

@ -1,6 +1,6 @@
[package]
name = "ntex-server"
version = "2.7.2"
version = "2.7.3"
authors = ["ntex contributors <team@ntex.rs>"]
description = "Server for ntex framework"
keywords = ["network", "framework", "async", "futures"]
@ -22,13 +22,13 @@ ntex-service = "3.4"
ntex-rt = "0.4"
ntex-util = "2.8"
async-channel = "2"
async-broadcast = "0.7"
core_affinity = "0.8"
polling = "3.3"
log = "0.4"
socket2 = "0.5"
oneshot = { version = "0.1", default-features = false, features = ["async"] }
async-channel = { workspace = true }
atomic-waker = { workspace = true }
core_affinity = { workspace = true }
oneshot = { workspace = true }
polling = { workspace = true }
log = { workspace = true }
socket2 = { workspace = true }
[dev-dependencies]
ntex = "2"

View file

@ -55,7 +55,7 @@ impl<F: ServerConfiguration> ServerManager<F> {
let no_signals = cfg.no_signals;
let shared = Arc::new(ServerShared {
paused: AtomicBool::new(false),
paused: AtomicBool::new(true),
});
let mgr = ServerManager(Rc::new(Inner {
cfg,
@ -139,7 +139,6 @@ impl<F: ServerConfiguration> ServerManager<F> {
fn start_worker<F: ServerConfiguration>(mgr: ServerManager<F>, cid: Option<CoreId>) {
let _ = ntex_rt::spawn(async move {
let id = mgr.next_id();
let mut wrk = Worker::start(id, mgr.factory(), cid);
loop {
@ -212,10 +211,9 @@ impl<F: ServerConfiguration> HandleCmdState<F> {
match upd {
Update::Available(worker) => {
self.workers.push(worker);
if !self.workers.is_empty() {
self.workers.sort();
if self.workers.len() == 1 {
self.mgr.resume();
} else {
self.workers.sort();
}
}
Update::Unavailable(worker) => {
@ -234,6 +232,9 @@ impl<F: ServerConfiguration> HandleCmdState<F> {
if let Err(item) = self.workers[0].send(item) {
self.backlog.push_back(item);
self.workers.remove(0);
if self.workers.is_empty() {
self.mgr.pause();
}
break;
}
}

View file

@ -203,14 +203,10 @@ impl Accept {
let mut timeout = Some(Duration::ZERO);
loop {
if let Err(e) = self.poller.wait(&mut events, timeout) {
if e.kind() == io::ErrorKind::Interrupted {
continue;
} else {
if e.kind() != io::ErrorKind::Interrupted {
panic!("Cannot wait for events in poller: {}", e)
}
}
if timeout.is_some() {
} else if timeout.is_some() {
timeout = None;
let _ = self.tx.take().unwrap().send(());
}

View file

@ -2,8 +2,8 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::task::{ready, Context, Poll};
use std::{cmp, future::poll_fn, future::Future, hash, pin::Pin, sync::Arc};
use async_broadcast::{self as bus, broadcast};
use async_channel::{unbounded, Receiver, Sender};
use atomic_waker::AtomicWaker;
use core_affinity::CoreId;
use ntex_rt::{spawn, Arbiter};
@ -151,10 +151,8 @@ impl<T> Worker<T> {
if self.failed.load(Ordering::Acquire) {
WorkerStatus::Failed
} else {
// cleanup updates
while self.avail.notify.try_recv().is_ok() {}
if self.avail.notify.recv_direct().await.is_err() {
self.avail.wait_for_update().await;
if self.avail.failed() {
self.failed.store(true, Ordering::Release);
}
self.status()
@ -196,46 +194,79 @@ impl Future for WorkerStop {
#[derive(Debug, Clone)]
struct WorkerAvailability {
notify: bus::Receiver<()>,
available: Arc<AtomicBool>,
inner: Arc<Inner>,
}
#[derive(Debug, Clone)]
struct WorkerAvailabilityTx {
notify: bus::Sender<()>,
available: Arc<AtomicBool>,
inner: Arc<Inner>,
}
#[derive(Debug)]
struct Inner {
waker: AtomicWaker,
updated: AtomicBool,
available: AtomicBool,
failed: AtomicBool,
}
impl WorkerAvailability {
fn create() -> (Self, WorkerAvailabilityTx) {
let (mut tx, rx) = broadcast(16);
tx.set_overflow(true);
let inner = Arc::new(Inner {
waker: AtomicWaker::new(),
updated: AtomicBool::new(false),
available: AtomicBool::new(false),
failed: AtomicBool::new(false),
});
let avail = WorkerAvailability {
notify: rx,
available: Arc::new(AtomicBool::new(false)),
};
let avail_tx = WorkerAvailabilityTx {
notify: tx,
available: avail.available.clone(),
inner: inner.clone(),
};
let avail_tx = WorkerAvailabilityTx { inner };
(avail, avail_tx)
}
fn failed(&self) -> bool {
self.inner.failed.load(Ordering::Acquire)
}
fn available(&self) -> bool {
self.available.load(Ordering::Acquire)
self.inner.available.load(Ordering::Acquire)
}
async fn wait_for_update(&self) {
poll_fn(|cx| {
if self.inner.updated.load(Ordering::Acquire) {
self.inner.updated.store(false, Ordering::Release);
Poll::Ready(())
} else {
self.inner.waker.register(cx.waker());
Poll::Pending
}
})
.await;
}
}
impl WorkerAvailabilityTx {
fn set(&self, val: bool) {
let old = self.available.swap(val, Ordering::Release);
if !old && val {
let _ = self.notify.try_broadcast(());
let old = self.inner.available.swap(val, Ordering::Release);
if old != val {
self.inner.updated.store(true, Ordering::Release);
self.inner.waker.wake();
}
}
}
impl Drop for WorkerAvailabilityTx {
fn drop(&mut self) {
self.inner.failed.store(true, Ordering::Release);
self.inner.updated.store(true, Ordering::Release);
self.inner.available.store(false, Ordering::Release);
self.inner.waker.wake();
}
}
/// Service worker
///
/// Worker accepts message via unbounded channel and starts processing.