Refactor async runtimes support (#88)

* refactor async runtimes support
This commit is contained in:
Nikolay Kim 2022-01-03 21:24:49 +06:00 committed by GitHub
parent 713e02d6a3
commit 847f2738dd
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
45 changed files with 801 additions and 1033 deletions

View file

@ -10,6 +10,8 @@ members = [
"ntex-tls",
"ntex-macros",
"ntex-util",
"ntex-tokio",
"ntex-async-std",
]
[patch.crates-io]
@ -23,3 +25,6 @@ ntex-service = { path = "ntex-service" }
ntex-tls = { path = "ntex-tls" }
ntex-macros = { path = "ntex-macros" }
ntex-util = { path = "ntex-util" }
ntex-tokio = { path = "ntex-tokio" }
ntex-async-std = { path = "ntex-async-std" }

View file

@ -0,0 +1,5 @@
# Changes
## [0.1.0] - 2022-01-03
* Initial release

26
ntex-async-std/Cargo.toml Normal file
View file

@ -0,0 +1,26 @@
[package]
name = "ntex-async-std"
version = "0.1.0"
authors = ["ntex contributors <team@ntex.rs>"]
description = "async-std intergration for ntex framework"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://ntex.rs"
repository = "https://github.com/ntex-rs/ntex.git"
documentation = "https://docs.rs/ntex-rt-tokio/"
categories = ["network-programming", "asynchronous"]
license = "MIT"
edition = "2018"
[lib]
name = "ntex_async_std"
path = "src/lib.rs"
[dependencies]
ntex-bytes = "0.1.8"
ntex-io = "0.1.0"
ntex-util = "0.1.6"
async-oneshot = "0.5.0"
derive_more = "0.99"
log = "0.4"
pin-project-lite = "0.2"
async-std = { version = "1", features = ["unstable"] }

1
ntex-async-std/LICENSE Symbolic link
View file

@ -0,0 +1 @@
../LICENSE

View file

@ -1,215 +1,18 @@
#![allow(dead_code)]
use std::future::Future;
use std::task::{Context, Poll};
use std::{any, cell::RefCell, io, net, net::SocketAddr, pin::Pin, rc::Rc};
use std::{any, future::Future, io, pin::Pin, task::Context, task::Poll};
use async_oneshot as oneshot;
use async_std::io::{Read, Write};
use ntex_bytes::{Buf, BufMut, BytesMut, PoolRef};
use ntex_bytes::{Buf, BufMut, BytesMut};
use ntex_io::{
types, Handle, Io, IoStream, ReadContext, ReadStatus, WriteContext, WriteStatus,
types, Handle, IoStream, ReadContext, ReadStatus, WriteContext, WriteStatus,
};
use ntex_util::{future::lazy, ready, time::sleep, time::Sleep};
use ntex_util::{ready, time::sleep, time::Sleep};
use crate::{Runtime, Signal};
#[derive(Debug, Copy, Clone, derive_more::Display)]
pub struct JoinError;
impl std::error::Error for JoinError {}
#[derive(Clone)]
struct TcpStream(async_std::net::TcpStream);
#[cfg(unix)]
#[derive(Clone)]
struct UnixStream(async_std::os::unix::net::UnixStream);
/// Create new single-threaded async-std runtime.
pub fn create_runtime() -> Box<dyn Runtime> {
Box::new(AsyncStdRuntime::new().unwrap())
}
/// Opens a TCP connection to a remote host.
pub async fn tcp_connect(addr: SocketAddr) -> Result<Io, io::Error> {
let sock = async_std::net::TcpStream::connect(addr).await?;
sock.set_nodelay(true)?;
Ok(Io::new(TcpStream(sock)))
}
/// Opens a TCP connection to a remote host and use specified memory pool.
pub async fn tcp_connect_in(addr: SocketAddr, pool: PoolRef) -> Result<Io, io::Error> {
let sock = async_std::net::TcpStream::connect(addr).await?;
sock.set_nodelay(true)?;
Ok(Io::with_memory_pool(TcpStream(sock), pool))
}
#[cfg(unix)]
/// Opens a unix stream connection.
pub async fn unix_connect<P>(addr: P) -> Result<Io, io::Error>
where
P: AsRef<async_std::path::Path>,
{
let sock = async_std::os::unix::net::UnixStream::connect(addr).await?;
Ok(Io::new(UnixStream(sock)))
}
#[cfg(unix)]
/// Opens a unix stream connection and specified memory pool.
pub async fn unix_connect_in<P>(addr: P, pool: PoolRef) -> Result<Io, io::Error>
where
P: AsRef<async_std::path::Path>,
{
let sock = async_std::os::unix::net::UnixStream::connect(addr).await?;
Ok(Io::with_memory_pool(UnixStream(sock), pool))
}
/// Convert std TcpStream to async-std's TcpStream
pub fn from_tcp_stream(stream: net::TcpStream) -> Result<Io, io::Error> {
stream.set_nonblocking(true)?;
stream.set_nodelay(true)?;
Ok(Io::new(TcpStream(async_std::net::TcpStream::from(stream))))
}
#[cfg(unix)]
/// Convert std UnixStream to async-std's UnixStream
pub fn from_unix_stream(stream: std::os::unix::net::UnixStream) -> Result<Io, io::Error> {
stream.set_nonblocking(true)?;
Ok(Io::new(UnixStream(From::from(stream))))
}
/// Spawn a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for spawning futures on the current
/// thread.
///
/// # Panics
///
/// This function panics if ntex system is not running.
#[inline]
pub fn spawn<F>(f: F) -> JoinHandle<F::Output>
where
F: Future + 'static,
{
JoinHandle {
fut: async_std::task::spawn_local(f),
}
}
/// Executes a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for executing futures on the current
/// thread.
///
/// # Panics
///
/// This function panics if ntex system is not running.
#[inline]
pub fn spawn_fn<F, R>(f: F) -> JoinHandle<R::Output>
where
F: FnOnce() -> R + 'static,
R: Future + 'static,
{
spawn(async move {
let r = lazy(|_| f()).await;
r.await
})
}
/// Spawns a blocking task.
///
/// The task will be spawned onto a thread pool specifically dedicated
/// to blocking tasks. This is useful to prevent long-running synchronous
/// operations from blocking the main futures executor.
pub fn spawn_blocking<F, T>(f: F) -> JoinHandle<T>
where
F: FnOnce() -> T + Send + 'static,
T: Send + 'static,
{
JoinHandle {
fut: async_std::task::spawn_blocking(f),
}
}
pub struct JoinHandle<T> {
fut: async_std::task::JoinHandle<T>,
}
impl<T> Future for JoinHandle<T> {
type Output = Result<T, JoinError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(Ok(ready!(Pin::new(&mut self.fut).poll(cx))))
}
}
thread_local! {
static SRUN: RefCell<bool> = RefCell::new(false);
static SHANDLERS: Rc<RefCell<Vec<oneshot::Sender<Signal>>>> = Default::default();
}
/// Register signal handler.
///
/// Signals are handled by oneshots, you have to re-register
/// after each signal.
pub fn signal() -> Option<oneshot::Receiver<Signal>> {
if !SRUN.with(|v| *v.borrow()) {
spawn(Signals::new());
}
SHANDLERS.with(|handlers| {
let (tx, rx) = oneshot::oneshot();
handlers.borrow_mut().push(tx);
Some(rx)
})
}
/// Single-threaded async-std runtime.
#[derive(Debug)]
struct AsyncStdRuntime {}
impl AsyncStdRuntime {
/// Returns a new runtime initialized with default configuration values.
fn new() -> io::Result<Self> {
Ok(Self {})
}
}
impl Runtime for AsyncStdRuntime {
/// Spawn a future onto the single-threaded runtime.
fn spawn(&self, future: Pin<Box<dyn Future<Output = ()>>>) {
async_std::task::spawn_local(future);
}
/// Runs the provided future, blocking the current thread until the future
/// completes.
fn block_on(&self, f: Pin<Box<dyn Future<Output = ()>>>) {
// set ntex-util spawn fn
ntex_util::set_spawn_fn(|fut| {
async_std::task::spawn_local(fut);
});
async_std::task::block_on(f);
}
}
struct Signals {}
impl Signals {
pub(super) fn new() -> Signals {
Self {}
}
}
impl Future for Signals {
type Output = ();
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(())
}
}
use crate::TcpStream;
impl IoStream for TcpStream {
fn start(self, read: ReadContext, write: WriteContext) -> Option<Box<dyn Handle>> {
spawn(ReadTask::new(self.clone(), read));
spawn(WriteTask::new(self.clone(), write));
async_std::task::spawn_local(ReadTask::new(self.clone(), read));
async_std::task::spawn_local(WriteTask::new(self.clone(), write));
Some(Box::new(self))
}
}
@ -406,8 +209,11 @@ impl Future for WriteTask {
// flush write buffer
match flush_io(&mut this.io.0, &this.state, cx) {
Poll::Ready(true) => {
if let Err(_) =
this.io.0.shutdown(std::net::Shutdown::Write)
if this
.io
.0
.shutdown(std::net::Shutdown::Write)
.is_err()
{
this.state.close(None);
return Poll::Ready(());
@ -571,11 +377,12 @@ pub fn poll_read_buf<T: Read>(
#[cfg(unix)]
mod unixstream {
use super::*;
use crate::UnixStream;
impl IoStream for UnixStream {
fn start(self, read: ReadContext, write: WriteContext) -> Option<Box<dyn Handle>> {
spawn(ReadTask::new(self.clone(), read));
spawn(WriteTask::new(self.clone(), write));
async_std::task::spawn_local(ReadTask::new(self.clone(), read));
async_std::task::spawn_local(WriteTask::new(self, write));
None
}
}
@ -749,8 +556,11 @@ mod unixstream {
// flush write buffer
match flush_io(&mut this.io.0, &this.state, cx) {
Poll::Ready(true) => {
if let Err(_) =
this.io.0.shutdown(std::net::Shutdown::Write)
if this
.io
.0
.shutdown(std::net::Shutdown::Write)
.is_err()
{
this.state.close(None);
return Poll::Ready(());

64
ntex-async-std/src/lib.rs Normal file
View file

@ -0,0 +1,64 @@
use std::{io::Result, net, net::SocketAddr};
use ntex_bytes::PoolRef;
use ntex_io::Io;
mod io;
mod signals;
pub use self::signals::{signal, Signal};
#[derive(Clone)]
struct TcpStream(async_std::net::TcpStream);
#[cfg(unix)]
#[derive(Clone)]
struct UnixStream(async_std::os::unix::net::UnixStream);
/// Opens a TCP connection to a remote host.
pub async fn tcp_connect(addr: SocketAddr) -> Result<Io> {
let sock = async_std::net::TcpStream::connect(addr).await?;
sock.set_nodelay(true)?;
Ok(Io::new(TcpStream(sock)))
}
/// Opens a TCP connection to a remote host and use specified memory pool.
pub async fn tcp_connect_in(addr: SocketAddr, pool: PoolRef) -> Result<Io> {
let sock = async_std::net::TcpStream::connect(addr).await?;
sock.set_nodelay(true)?;
Ok(Io::with_memory_pool(TcpStream(sock), pool))
}
#[cfg(unix)]
/// Opens a unix stream connection.
pub async fn unix_connect<P>(addr: P) -> Result<Io>
where
P: AsRef<async_std::path::Path>,
{
let sock = async_std::os::unix::net::UnixStream::connect(addr).await?;
Ok(Io::new(UnixStream(sock)))
}
#[cfg(unix)]
/// Opens a unix stream connection and specified memory pool.
pub async fn unix_connect_in<P>(addr: P, pool: PoolRef) -> Result<Io>
where
P: AsRef<async_std::path::Path>,
{
let sock = async_std::os::unix::net::UnixStream::connect(addr).await?;
Ok(Io::with_memory_pool(UnixStream(sock), pool))
}
/// Convert std TcpStream to async-std's TcpStream
pub fn from_tcp_stream(stream: net::TcpStream) -> Result<Io> {
stream.set_nonblocking(true)?;
stream.set_nodelay(true)?;
Ok(Io::new(TcpStream(async_std::net::TcpStream::from(stream))))
}
#[cfg(unix)]
/// Convert std UnixStream to async-std's UnixStream
pub fn from_unix_stream(stream: std::os::unix::net::UnixStream) -> Result<Io> {
stream.set_nonblocking(true)?;
Ok(Io::new(UnixStream(From::from(stream))))
}

View file

@ -0,0 +1,52 @@
use std::{cell::RefCell, future::Future, pin::Pin, rc::Rc, task::Context, task::Poll};
use async_oneshot as oneshot;
thread_local! {
static SRUN: RefCell<bool> = RefCell::new(false);
static SHANDLERS: Rc<RefCell<Vec<oneshot::Sender<Signal>>>> = Default::default();
}
/// Different types of process signals
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum Signal {
/// SIGHUP
Hup,
/// SIGINT
Int,
/// SIGTERM
Term,
/// SIGQUIT
Quit,
}
/// Register signal handler.
///
/// Signals are handled by oneshots, you have to re-register
/// after each signal.
pub fn signal() -> Option<oneshot::Receiver<Signal>> {
if !SRUN.with(|v| *v.borrow()) {
async_std::task::spawn_local(Signals::new());
}
SHANDLERS.with(|handlers| {
let (tx, rx) = oneshot::oneshot();
handlers.borrow_mut().push(tx);
Some(rx)
})
}
struct Signals {}
impl Signals {
pub(super) fn new() -> Signals {
Self {}
}
}
impl Future for Signals {
type Output = ();
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(())
}
}

View file

@ -1,5 +1,9 @@
# Changes
## [0.1.1] - 2022-01-03
* Move tokio support to separate crate
## [0.1.0] - 2021-12-30
* Unify keep-alive timers

View file

@ -15,22 +15,10 @@ edition = "2018"
name = "ntex_io"
path = "src/lib.rs"
[features]
default = ["tokio-traits"]
# tokio traits support
tokio-traits = ["tok-io/net", "tok-io/rt"]
# tokio runtime support
tokio = ["tok-io/net", "tok-io/rt"]
# async-std runtime support
async-std = ["async_std/unstable"]
[dependencies]
ntex-codec = "0.6.0"
ntex-bytes = "0.1.8"
ntex-util = "0.1.5"
ntex-util = "0.1.6"
ntex-service = "0.3.0"
bitflags = "1.3"
@ -38,9 +26,6 @@ fxhash = "0.2.1"
log = "0.4"
pin-project-lite = "0.2"
tok-io = { version = "1", package = "tokio", default-features = false, optional = true }
async_std = { version = "1", package = "async-std", optional = true }
[dev-dependencies]
ntex = "0.5.0"
rand = "0.8"

View file

@ -1,38 +0,0 @@
#![allow(dead_code)]
//! async net providers
use ntex_util::future::lazy;
use std::future::Future;
/// Spawn a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for spawning futures on the current
/// thread.
///
/// # Panics
///
/// This function panics if ntex system is not running.
#[inline]
pub fn spawn<F>(f: F) -> async_std::task::JoinHandle<F::Output>
where
F: Future + 'static,
{
async_std::task::spawn_local(f)
}
/// Executes a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for executing futures on the current
/// thread.
///
/// # Panics
///
/// This function panics if ntex system is not running.
#[inline]
pub fn spawn_fn<F, R>(f: F) -> async_std::task::JoinHandle<R::Output>
where
F: FnOnce() -> R + 'static,
R: Future + 'static,
{
spawn(async move {
let r = lazy(|_| f()).await;
r.await
})
}

View file

@ -5,9 +5,9 @@ use ntex_bytes::Pool;
use ntex_codec::{Decoder, Encoder};
use ntex_service::{IntoService, Service};
use ntex_util::time::Seconds;
use ntex_util::{future::Either, ready};
use ntex_util::{future::Either, ready, spawn};
use crate::{rt::spawn, DispatchItem, IoBoxed, IoRef, IoStatusUpdate, RecvError};
use crate::{DispatchItem, IoBoxed, IoRef, IoStatusUpdate, RecvError};
type Response<U> = <U as Encoder>::Item;

View file

@ -18,13 +18,6 @@ mod tasks;
mod timer;
pub mod utils;
#[cfg(feature = "async-std")]
mod asyncstd_rt;
#[cfg(any(feature = "tokio-traits", feature = "tokio"))]
mod tokio_impl;
#[cfg(feature = "tokio")]
mod tokio_rt;
use ntex_bytes::BytesMut;
use ntex_codec::{Decoder, Encoder};
use ntex_util::time::Millis;
@ -181,24 +174,6 @@ where
}
}
pub mod rt {
//! async runtime helpers
#[cfg(feature = "tokio")]
pub use crate::tokio_rt::*;
#[cfg(all(not(feature = "tokio"), feature = "async-std"))]
pub use crate::asyncstd_rt::*;
#[cfg(all(not(feature = "tokio"), not(feature = "async-std")))]
pub fn spawn<F>(_: F) -> std::pin::Pin<Box<dyn std::future::Future<Output = F::Output>>>
where
F: std::future::Future + 'static,
{
unimplemented!()
}
}
#[cfg(test)]
mod tests {
use super::*;

View file

@ -351,11 +351,11 @@ impl IoStream for IoTest {
fn start(self, read: ReadContext, write: WriteContext) -> Option<Box<dyn Handle>> {
let io = Rc::new(self);
crate::rt::spawn(ReadTask {
ntex_util::spawn(ReadTask {
io: io.clone(),
state: read,
});
crate::rt::spawn(WriteTask {
ntex_util::spawn(WriteTask {
io: io.clone(),
state: write,
st: IoWriteState::Processing(None),
@ -644,110 +644,6 @@ pub(super) fn flush_io(
}
}
#[cfg(any(feature = "tokio", feature = "tokio-traits"))]
mod tokio_impl {
use tok_io::io::{AsyncRead, AsyncWrite, ReadBuf};
use super::*;
impl AsyncRead for IoTest {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
let guard = self.local.lock().unwrap();
let mut ch = guard.borrow_mut();
*ch.waker.0.lock().unwrap().borrow_mut() = Some(cx.waker().clone());
if !ch.buf.is_empty() {
let size = std::cmp::min(ch.buf.len(), buf.remaining());
let b = ch.buf.split_to(size);
buf.put_slice(&b);
return Poll::Ready(Ok(()));
}
match mem::take(&mut ch.read) {
IoTestState::Ok => Poll::Pending,
IoTestState::Close => {
ch.read = IoTestState::Close;
Poll::Ready(Ok(()))
}
IoTestState::Pending => Poll::Pending,
IoTestState::Err(e) => Poll::Ready(Err(e)),
}
}
}
impl AsyncWrite for IoTest {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
let guard = self.remote.lock().unwrap();
let mut ch = guard.borrow_mut();
match mem::take(&mut ch.write) {
IoTestState::Ok => {
let cap = cmp::min(buf.len(), ch.buf_cap);
if cap > 0 {
ch.buf.extend(&buf[..cap]);
ch.buf_cap -= cap;
ch.flags.remove(IoTestFlags::FLUSHED);
ch.waker.wake();
Poll::Ready(Ok(cap))
} else {
*self
.local
.lock()
.unwrap()
.borrow_mut()
.waker
.0
.lock()
.unwrap()
.borrow_mut() = Some(cx.waker().clone());
Poll::Pending
}
}
IoTestState::Close => Poll::Ready(Ok(0)),
IoTestState::Pending => {
*self
.local
.lock()
.unwrap()
.borrow_mut()
.waker
.0
.lock()
.unwrap()
.borrow_mut() = Some(cx.waker().clone());
Poll::Pending
}
IoTestState::Err(e) => Poll::Ready(Err(e)),
}
}
fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<io::Result<()>> {
self.local
.lock()
.unwrap()
.borrow_mut()
.flags
.insert(IoTestFlags::CLOSED);
Poll::Ready(Ok(()))
}
}
}
#[cfg(test)]
#[allow(clippy::redundant_clone)]
mod tests {

View file

@ -1,9 +1,9 @@
use std::{cell::RefCell, collections::BTreeMap, rc::Rc, time::Duration, time::Instant};
use ntex_util::time::{now, sleep, Millis};
use ntex_util::HashSet;
use ntex_util::{spawn, HashSet};
use crate::{io::IoState, rt::spawn, IoRef};
use crate::{io::IoState, IoRef};
thread_local! {
static TIMER: Rc<RefCell<Inner>> = Rc::new(RefCell::new(

View file

@ -1,37 +0,0 @@
//! async net providers
use ntex_util::future::lazy;
use std::future::Future;
/// Spawn a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for spawning futures on the current
/// thread.
///
/// # Panics
///
/// This function panics if ntex system is not running.
#[inline]
pub fn spawn<F>(f: F) -> tok_io::task::JoinHandle<F::Output>
where
F: Future + 'static,
{
tok_io::task::spawn_local(f)
}
/// Executes a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for executing futures on the current
/// thread.
///
/// # Panics
///
/// This function panics if ntex system is not running.
#[inline]
pub fn spawn_fn<F, R>(f: F) -> tok_io::task::JoinHandle<R::Output>
where
F: FnOnce() -> R + 'static,
R: Future + 'static,
{
spawn(async move {
let r = lazy(|_| f()).await;
r.await
})
}

View file

@ -1,5 +1,9 @@
# Changes
## [0.4.1] - 2022-01-03
* Refactor async runtimes support
## [0.4.0] - 2021-12-30
* 0.4 release

View file

@ -1,6 +1,6 @@
[package]
name = "ntex-rt"
version = "0.4.0"
version = "0.4.1"
authors = ["ntex contributors <team@ntex.rs>"]
description = "ntex runtime"
keywords = ["network", "framework", "async", "futures"]
@ -19,20 +19,18 @@ path = "src/lib.rs"
default = []
# tokio support
tokio = ["ntex-io/tokio", "tok-io"]
tokio = ["tok-io"]
# async-std support
async-std = ["ntex-io/async-std", "async_std/unstable"]
async-std = ["async_std/unstable"]
[dependencies]
ntex-bytes = "0.1.8"
ntex-io = "0.1.0"
ntex-util = "0.1.4"
async-oneshot = "0.5.0"
async-channel = "1.6.1"
derive_more = "0.99.14"
futures-core = "0.3"
log = "0.4"
pin-project-lite = "0.2"
tok-io = { version = "1", package = "tokio", default-features = false, features = ["rt", "net", "signal"], optional = true }
tok-io = { version = "1", package = "tokio", default-features = false, features = ["rt", "net"], optional = true }
async_std = { version = "1", package = "async-std", optional = true }

View file

@ -5,9 +5,9 @@ use std::{cell::RefCell, collections::HashMap, fmt, future::Future, pin::Pin, th
use async_channel::{unbounded, Receiver, Sender};
use async_oneshot as oneshot;
use ntex_util::Stream;
use futures_core::stream::Stream;
use crate::{system::System, Runtime};
use crate::system::System;
thread_local!(
static ADDR: RefCell<Option<Arbiter>> = RefCell::new(None);
@ -50,16 +50,14 @@ impl Clone for Arbiter {
impl Arbiter {
#[allow(clippy::borrowed_box)]
pub(super) fn new_system(rt: &Box<dyn Runtime>) -> Self {
pub(super) fn new_system() -> (Self, ArbiterController) {
let (tx, rx) = unbounded();
let arb = Arbiter::with_sender(tx);
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
STORAGE.with(|cell| cell.borrow_mut().clear());
rt.spawn(Box::pin(ArbiterController { stop: None, rx }));
arb
(arb, ArbiterController { stop: None, rx })
}
/// Returns the current thread's arbiter's address. If no Arbiter is present, then this
@ -88,7 +86,6 @@ impl Arbiter {
let handle = thread::Builder::new()
.name(name.clone())
.spawn(move || {
let rt = crate::create_runtime();
let arb = Arbiter::with_sender(arb_tx);
let (stop, stop_rx) = oneshot::oneshot();
@ -96,11 +93,12 @@ impl Arbiter {
System::set_current(sys);
crate::block_on(async move {
// start arbiter controller
rt.spawn(Box::pin(ArbiterController {
crate::spawn(ArbiterController {
stop: Some(stop),
rx: arb_rx,
}));
});
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
// register arbiter
@ -109,9 +107,8 @@ impl Arbiter {
.try_send(SystemCommand::RegisterArbiter(id, arb));
// run loop
rt.block_on(Box::pin(async move {
let _ = stop_rx.await;
}));
});
// unregister arbiter
let _ = System::current()
@ -232,7 +229,7 @@ impl Arbiter {
}
}
struct ArbiterController {
pub(crate) struct ArbiterController {
stop: Option<oneshot::Sender<i32>>,
rx: Receiver<ArbiterCommand>,
}

View file

@ -2,10 +2,9 @@ use std::{cell::RefCell, future::Future, io, rc::Rc};
use async_channel::unbounded;
use async_oneshot as oneshot;
use ntex_util::future::lazy;
use crate::arbiter::{Arbiter, SystemArbiter};
use crate::{create_runtime, Runtime, System};
use crate::arbiter::{Arbiter, ArbiterController, SystemArbiter};
use crate::System;
/// Builder struct for a ntex runtime.
///
@ -46,58 +45,63 @@ impl Builder {
///
/// This method panics if it can not create tokio runtime
pub fn finish(self) -> SystemRunner {
self.create_runtime(|| {})
}
/// This function will start tokio runtime and will finish once the
/// `System::stop()` message get called.
/// Function `f` get called within tokio runtime context.
pub fn run<F>(self, f: F) -> io::Result<()>
where
F: FnOnce() + 'static,
{
self.create_runtime(f).run()
}
fn create_runtime<F>(self, f: F) -> SystemRunner
where
F: FnOnce() + 'static,
{
let (stop_tx, stop) = oneshot::oneshot();
let (sys_sender, sys_receiver) = unbounded();
let stop_on_panic = self.stop_on_panic;
let rt = create_runtime();
let (arb, arb_controller) = Arbiter::new_system();
let system = System::construct(sys_sender, arb, stop_on_panic);
// system arbiter
let _system =
System::construct(sys_sender, Arbiter::new_system(&rt), self.stop_on_panic);
let arb = SystemArbiter::new(stop_tx, sys_receiver);
rt.spawn(Box::pin(arb));
// init system arbiter and run configuration method
let runner = SystemRunner { rt, stop, _system };
runner.block_on(lazy(move |_| f()));
runner
SystemRunner {
stop,
arb,
arb_controller,
system,
}
}
}
/// Helper object that runs System's event loop
#[must_use = "SystemRunner must be run"]
pub struct SystemRunner {
rt: Box<dyn Runtime>,
stop: oneshot::Receiver<i32>,
_system: System,
arb: SystemArbiter,
arb_controller: ArbiterController,
system: System,
}
impl SystemRunner {
/// Get current system.
pub fn system(&self) -> System {
self.system.clone()
}
/// This function will start event loop and will finish once the
/// `System::stop()` function is called.
pub fn run(self) -> io::Result<()> {
let SystemRunner { rt, stop, .. } = self;
pub fn run_until_stop(self) -> io::Result<()> {
self.run(|| Ok(()))
}
/// This function will start event loop and will finish once the
/// `System::stop()` function is called.
#[inline]
pub fn run<F>(self, f: F) -> io::Result<()>
where
F: FnOnce() -> io::Result<()> + 'static,
{
let SystemRunner {
stop,
arb,
arb_controller,
..
} = self;
// run loop
match block_on(&rt, stop).take() {
match block_on(stop, arb, arb_controller, f).take()? {
Ok(code) => {
if code != 0 {
Err(io::Error::new(
@ -114,22 +118,22 @@ impl SystemRunner {
/// Execute a future and wait for result.
#[inline]
pub fn block_on<F, R>(&self, fut: F) -> R
pub fn block_on<F, R>(self, fut: F) -> R
where
F: Future<Output = R> + 'static,
R: 'static,
{
block_on(&self.rt, fut).take()
}
let SystemRunner {
arb,
arb_controller,
..
} = self;
/// Execute a function with enabled executor.
#[inline]
pub fn exec<F, R>(&self, f: F) -> R
where
F: FnOnce() -> R + 'static,
R: 'static,
{
self.block_on(lazy(|_| f()))
// run loop
match block_on(fut, arb, arb_controller, || Ok(())).take() {
Ok(result) => result,
Err(_) => unreachable!(),
}
}
}
@ -142,17 +146,28 @@ impl<T> BlockResult<T> {
}
#[inline]
#[allow(clippy::borrowed_box)]
fn block_on<F, R>(rt: &Box<dyn Runtime>, fut: F) -> BlockResult<R>
fn block_on<F, R, F1>(
fut: F,
arb: SystemArbiter,
arb_controller: ArbiterController,
f: F1,
) -> BlockResult<io::Result<R>>
where
F: Future<Output = R> + 'static,
R: 'static,
F1: FnOnce() -> io::Result<()> + 'static,
{
let result = Rc::new(RefCell::new(None));
let result_inner = result.clone();
rt.block_on(Box::pin(async move {
crate::block_on(Box::pin(async move {
crate::spawn(arb);
crate::spawn(arb_controller);
if let Err(e) = f() {
*result_inner.borrow_mut() = Some(Err(e));
} else {
let r = fut.await;
*result_inner.borrow_mut() = Some(r);
*result_inner.borrow_mut() = Some(Ok(r));
}
}));
BlockResult(result)
}
@ -171,8 +186,8 @@ mod tests {
thread::spawn(move || {
let runner = crate::System::build().stop_on_panic(true).finish();
tx.send(System::current()).unwrap();
let _ = runner.run();
tx.send(runner.system()).unwrap();
let _ = runner.run_until_stop();
});
let s = System::new("test");

View file

@ -1,6 +1,5 @@
//! A runtime implementation that runs everything on the current thread.
#![allow(clippy::return_self_not_must_use)]
use std::{future::Future, pin::Pin};
mod arbiter;
mod builder;
@ -11,40 +10,140 @@ pub use self::builder::{Builder, SystemRunner};
pub use self::system::System;
#[cfg(feature = "tokio")]
mod tokio;
#[cfg(feature = "tokio")]
pub use self::tokio::*;
#[cfg(feature = "async-std")]
mod asyncstd;
#[cfg(all(not(feature = "tokio"), feature = "async-std"))]
pub use self::asyncstd::*;
pub trait Runtime {
/// Spawn a future onto the single-threaded runtime.
fn spawn(&self, future: Pin<Box<dyn Future<Output = ()>>>);
mod tokio {
use std::future::Future;
pub use tok_io::task::{spawn_blocking, JoinError, JoinHandle};
/// Runs the provided future, blocking the current thread until the future
/// completes.
fn block_on(&self, f: Pin<Box<dyn Future<Output = ()>>>);
pub fn block_on<F: Future<Output = ()>>(fut: F) {
let rt = tok_io::runtime::Builder::new_current_thread()
.enable_io()
.build()
.unwrap();
tok_io::task::LocalSet::new().block_on(&rt, fut);
}
/// Different types of process signals
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum Signal {
/// SIGHUP
Hup,
/// SIGINT
Int,
/// SIGTERM
Term,
/// SIGQUIT
Quit,
/// Spawn a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for spawning futures on the current
/// thread.
///
/// # Panics
///
/// This function panics if ntex system is not running.
#[inline]
pub fn spawn<F>(f: F) -> tok_io::task::JoinHandle<F::Output>
where
F: Future + 'static,
{
tok_io::task::spawn_local(f)
}
/// Executes a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for executing futures on the current
/// thread.
///
/// # Panics
///
/// This function panics if ntex system is not running.
#[inline]
pub fn spawn_fn<F, R>(f: F) -> tok_io::task::JoinHandle<R::Output>
where
F: FnOnce() -> R + 'static,
R: Future + 'static,
{
spawn(async move { f().await })
}
}
#[allow(dead_code)]
#[cfg(feature = "async-std")]
mod asyncstd {
use futures_core::ready;
use std::{future::Future, pin::Pin, task::Context, task::Poll};
/// Runs the provided future, blocking the current thread until the future
/// completes.
pub fn block_on<F: Future<Output = ()>>(fut: F) {
async_std::task::block_on(fut);
}
/// Spawn a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for spawning futures on the current
/// thread.
///
/// # Panics
///
/// This function panics if ntex system is not running.
#[inline]
pub fn spawn<F>(f: F) -> JoinHandle<F::Output>
where
F: Future + 'static,
{
JoinHandle {
fut: async_std::task::spawn_local(f),
}
}
/// Executes a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for executing futures on the current
/// thread.
///
/// # Panics
///
/// This function panics if ntex system is not running.
#[inline]
pub fn spawn_fn<F, R>(f: F) -> JoinHandle<R::Output>
where
F: FnOnce() -> R + 'static,
R: Future + 'static,
{
spawn(async move { f().await })
}
/// Spawns a blocking task.
///
/// The task will be spawned onto a thread pool specifically dedicated
/// to blocking tasks. This is useful to prevent long-running synchronous
/// operations from blocking the main futures executor.
pub fn spawn_blocking<F, T>(f: F) -> JoinHandle<T>
where
F: FnOnce() -> T + Send + 'static,
T: Send + 'static,
{
JoinHandle {
fut: async_std::task::spawn_blocking(f),
}
}
#[derive(Debug, Copy, Clone, derive_more::Display)]
pub struct JoinError;
impl std::error::Error for JoinError {}
pub struct JoinHandle<T> {
fut: async_std::task::JoinHandle<T>,
}
impl<T> Future for JoinHandle<T> {
type Output = Result<T, JoinError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(Ok(ready!(Pin::new(&mut self.fut).poll(cx))))
}
}
}
#[cfg(feature = "tokio")]
pub use self::tokio::*;
#[cfg(all(not(feature = "tokio"), feature = "async-std"))]
pub use self::asyncstd::*;
/// Runs the provided future, blocking the current thread until the future
/// completes.
#[cfg(all(not(feature = "tokio"), not(feature = "async-std")))]
pub fn create_runtime() -> Box<dyn Runtime> {
unimplemented!()
pub fn block_on<F: std::future::Future<Output = ()>>(_: F) {
panic!("async runtime is not configured");
}
#[cfg(all(not(feature = "tokio"), not(feature = "async-std")))]

View file

@ -98,13 +98,13 @@ impl System {
&self.arbiter
}
/// This function will start tokio runtime and will finish once the
/// This function will start async runtime and will finish once the
/// `System::stop()` message get called.
/// Function `f` get called within tokio runtime context.
/// Function `f` get called within async runtime context.
pub fn run<F>(f: F) -> io::Result<()>
where
F: FnOnce() + 'static,
F: FnOnce() -> io::Result<()> + 'static,
{
Builder::new().run(f)
Builder::new().finish().run(f)
}
}

View file

@ -1,236 +0,0 @@
use std::future::Future;
use std::task::{Context, Poll};
use std::{cell::RefCell, io, mem, net, net::SocketAddr, path::Path, pin::Pin, rc::Rc};
use async_oneshot as oneshot;
use ntex_bytes::PoolRef;
use ntex_io::Io;
use ntex_util::future::lazy;
pub use tok_io::task::{spawn_blocking, JoinError, JoinHandle};
use tok_io::{runtime, task::LocalSet};
use crate::{Runtime, Signal};
/// Create new single-threaded tokio runtime.
pub fn create_runtime() -> Box<dyn Runtime> {
Box::new(TokioRuntime::new().unwrap())
}
/// Opens a TCP connection to a remote host.
pub async fn tcp_connect(addr: SocketAddr) -> Result<Io, io::Error> {
let sock = tok_io::net::TcpStream::connect(addr).await?;
sock.set_nodelay(true)?;
Ok(Io::new(sock))
}
/// Opens a TCP connection to a remote host and use specified memory pool.
pub async fn tcp_connect_in(addr: SocketAddr, pool: PoolRef) -> Result<Io, io::Error> {
let sock = tok_io::net::TcpStream::connect(addr).await?;
sock.set_nodelay(true)?;
Ok(Io::with_memory_pool(sock, pool))
}
#[cfg(unix)]
/// Opens a unix stream connection.
pub async fn unix_connect<'a, P>(addr: P) -> Result<Io, io::Error>
where
P: AsRef<Path> + 'a,
{
let sock = tok_io::net::UnixStream::connect(addr).await?;
Ok(Io::new(sock))
}
#[cfg(unix)]
/// Opens a unix stream connection and specified memory pool.
pub async fn unix_connect_in<'a, P>(addr: P, pool: PoolRef) -> Result<Io, io::Error>
where
P: AsRef<Path> + 'a,
{
let sock = tok_io::net::UnixStream::connect(addr).await?;
Ok(Io::with_memory_pool(sock, pool))
}
/// Convert std TcpStream to tokio's TcpStream
pub fn from_tcp_stream(stream: net::TcpStream) -> Result<Io, io::Error> {
stream.set_nonblocking(true)?;
stream.set_nodelay(true)?;
Ok(Io::new(tok_io::net::TcpStream::from_std(stream)?))
}
#[cfg(unix)]
/// Convert std UnixStream to tokio's UnixStream
pub fn from_unix_stream(stream: std::os::unix::net::UnixStream) -> Result<Io, io::Error> {
stream.set_nonblocking(true)?;
Ok(Io::new(tok_io::net::UnixStream::from_std(stream)?))
}
/// Spawn a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for spawning futures on the current
/// thread.
///
/// # Panics
///
/// This function panics if ntex system is not running.
#[inline]
pub fn spawn<F>(f: F) -> tok_io::task::JoinHandle<F::Output>
where
F: Future + 'static,
{
tok_io::task::spawn_local(f)
}
/// Executes a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for executing futures on the current
/// thread.
///
/// # Panics
///
/// This function panics if ntex system is not running.
#[inline]
pub fn spawn_fn<F, R>(f: F) -> tok_io::task::JoinHandle<R::Output>
where
F: FnOnce() -> R + 'static,
R: Future + 'static,
{
spawn(async move {
let r = lazy(|_| f()).await;
r.await
})
}
thread_local! {
static SRUN: RefCell<bool> = RefCell::new(false);
static SHANDLERS: Rc<RefCell<Vec<oneshot::Sender<Signal>>>> = Default::default();
}
/// Register signal handler.
///
/// Signals are handled by oneshots, you have to re-register
/// after each signal.
pub fn signal() -> Option<oneshot::Receiver<Signal>> {
if !SRUN.with(|v| *v.borrow()) {
spawn(Signals::new());
}
SHANDLERS.with(|handlers| {
let (tx, rx) = oneshot::oneshot();
handlers.borrow_mut().push(tx);
Some(rx)
})
}
/// Single-threaded tokio runtime.
#[derive(Debug)]
struct TokioRuntime {
local: LocalSet,
rt: runtime::Runtime,
}
impl TokioRuntime {
/// Returns a new runtime initialized with default configuration values.
fn new() -> io::Result<Self> {
let rt = runtime::Builder::new_current_thread().enable_io().build()?;
Ok(Self {
rt,
local: LocalSet::new(),
})
}
}
impl Runtime for TokioRuntime {
/// Spawn a future onto the single-threaded runtime.
fn spawn(&self, future: Pin<Box<dyn Future<Output = ()>>>) {
self.local.spawn_local(future);
}
/// Runs the provided future, blocking the current thread until the future
/// completes.
fn block_on(&self, f: Pin<Box<dyn Future<Output = ()>>>) {
// set ntex-util spawn fn
ntex_util::set_spawn_fn(|fut| {
tok_io::task::spawn_local(fut);
});
self.local.block_on(&self.rt, f);
}
}
struct Signals {
#[cfg(not(unix))]
signal: Pin<Box<dyn Future<Output = io::Result<()>>>>,
#[cfg(unix)]
signals: Vec<(Signal, tok_io::signal::unix::Signal)>,
}
impl Signals {
pub(super) fn new() -> Signals {
SRUN.with(|h| *h.borrow_mut() = true);
#[cfg(not(unix))]
{
Signals {
signal: Box::pin(tok_io::signal::ctrl_c()),
}
}
#[cfg(unix)]
{
use tok_io::signal::unix;
let sig_map = [
(unix::SignalKind::interrupt(), Signal::Int),
(unix::SignalKind::hangup(), Signal::Hup),
(unix::SignalKind::terminate(), Signal::Term),
(unix::SignalKind::quit(), Signal::Quit),
];
let mut signals = Vec::new();
for (kind, sig) in sig_map.iter() {
match unix::signal(*kind) {
Ok(stream) => signals.push((*sig, stream)),
Err(e) => log::error!(
"Cannot initialize stream handler for {:?} err: {}",
sig,
e
),
}
}
Signals { signals }
}
}
}
impl Drop for Signals {
fn drop(&mut self) {
SRUN.with(|h| *h.borrow_mut() = false);
}
}
impl Future for Signals {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
#[cfg(not(unix))]
{
if self.signal.as_mut().poll(cx).is_ready() {
let handlers = SHANDLERS.with(|h| mem::take(&mut *h.borrow_mut()));
for mut sender in handlers {
let _ = sender.send(Signal::Int);
}
}
Poll::Pending
}
#[cfg(unix)]
{
for (sig, fut) in self.signals.iter_mut() {
if Pin::new(fut).poll_recv(cx).is_ready() {
let handlers = SHANDLERS.with(|h| mem::take(&mut *h.borrow_mut()));
for mut sender in handlers {
let _ = sender.send(*sig);
}
}
}
Poll::Pending
}
}
}

5
ntex-tokio/CHANGES.md Normal file
View file

@ -0,0 +1,5 @@
# Changes
## [0.1.0] - 2022-01-03
* Initial release

24
ntex-tokio/Cargo.toml Normal file
View file

@ -0,0 +1,24 @@
[package]
name = "ntex-tokio"
version = "0.1.0"
authors = ["ntex contributors <team@ntex.rs>"]
description = "tokio intergration for ntex framework"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://ntex.rs"
repository = "https://github.com/ntex-rs/ntex.git"
documentation = "https://docs.rs/ntex-rt-tokio/"
categories = ["network-programming", "asynchronous"]
license = "MIT"
edition = "2018"
[lib]
name = "ntex_tokio"
path = "src/lib.rs"
[dependencies]
ntex-bytes = "0.1.8"
ntex-io = "0.1.0"
ntex-util = "0.1.6"
log = "0.4"
pin-project-lite = "0.2"
tokio = { version = "1", default-features = false, features = ["rt", "net", "sync", "signal"] }

1
ntex-tokio/LICENSE Symbolic link
View file

@ -0,0 +1 @@
../LICENSE

View file

@ -2,29 +2,30 @@ use std::task::{Context, Poll};
use std::{any, cell::RefCell, cmp, future::Future, io, mem, pin::Pin, rc::Rc};
use ntex_bytes::{Buf, BufMut, BytesMut};
use ntex_util::{ready, time::sleep, time::Sleep};
use tok_io::io::{AsyncRead, AsyncWrite, ReadBuf};
use tok_io::net::TcpStream;
use crate::{
use ntex_io::{
types, Filter, Handle, Io, IoBoxed, IoStream, ReadContext, ReadStatus, WriteContext,
WriteStatus,
};
use ntex_util::{ready, time::sleep, time::Sleep};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
impl IoStream for TcpStream {
impl IoStream for crate::TcpStream {
fn start(self, read: ReadContext, write: WriteContext) -> Option<Box<dyn Handle>> {
let io = Rc::new(RefCell::new(self));
let io = Rc::new(RefCell::new(self.0));
tok_io::task::spawn_local(ReadTask::new(io.clone(), read));
tok_io::task::spawn_local(WriteTask::new(io.clone(), write));
Some(Box::new(io))
tokio::task::spawn_local(ReadTask::new(io.clone(), read));
tokio::task::spawn_local(WriteTask::new(io.clone(), write));
Some(Box::new(HandleWrapper(io)))
}
}
impl Handle for Rc<RefCell<TcpStream>> {
struct HandleWrapper(Rc<RefCell<TcpStream>>);
impl Handle for HandleWrapper {
fn query(&self, id: any::TypeId) -> Option<Box<dyn any::Any>> {
if id == any::TypeId::of::<types::PeerAddr>() {
if let Ok(addr) = self.borrow().peer_addr() {
if let Ok(addr) = self.0.borrow().peer_addr() {
return Some(Box::new(types::PeerAddr(addr)));
}
}
@ -367,62 +368,43 @@ pub(super) fn flush_io<T: AsyncRead + AsyncWrite + Unpin>(
}
}
impl<F: Filter> AsyncRead for Io<F> {
pub struct TokioIoBoxed(IoBoxed);
impl std::ops::Deref for TokioIoBoxed {
type Target = IoBoxed;
#[inline]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<IoBoxed> for TokioIoBoxed {
fn from(io: IoBoxed) -> TokioIoBoxed {
TokioIoBoxed(io)
}
}
impl<F: Filter> From<Io<F>> for TokioIoBoxed {
fn from(io: Io<F>) -> TokioIoBoxed {
TokioIoBoxed(IoBoxed::from(io))
}
}
impl AsyncRead for TokioIoBoxed {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
let len = self.with_read_buf(|src| {
let len = self.0.with_read_buf(|src| {
let len = cmp::min(src.len(), buf.remaining());
buf.put_slice(&src.split_to(len));
len
});
if len == 0 {
match ready!(self.poll_read_ready(cx)) {
Ok(Some(())) => Poll::Pending,
Ok(None) => Poll::Ready(Ok(())),
Err(e) => Poll::Ready(Err(e)),
}
} else {
Poll::Ready(Ok(()))
}
}
}
impl<F: Filter> AsyncWrite for Io<F> {
fn poll_write(
self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(self.write(buf).map(|_| buf.len()))
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Io::poll_flush(&*self, cx, false)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Io::poll_shutdown(&*self, cx)
}
}
impl AsyncRead for IoBoxed {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
let len = self.with_read_buf(|src| {
let len = cmp::min(src.len(), buf.remaining());
buf.put_slice(&src.split_to(len));
len
});
if len == 0 {
match ready!(self.poll_read_ready(cx)) {
match ready!(self.0.poll_read_ready(cx)) {
Ok(Some(())) => Poll::Pending,
Err(e) => Poll::Ready(Err(e)),
Ok(None) => Poll::Ready(Ok(())),
@ -433,36 +415,36 @@ impl AsyncRead for IoBoxed {
}
}
impl AsyncWrite for IoBoxed {
impl AsyncWrite for TokioIoBoxed {
fn poll_write(
self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(self.write(buf).map(|_| buf.len()))
Poll::Ready(self.0.write(buf).map(|_| buf.len()))
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
(&*self.as_ref()).poll_flush(cx, false)
(*self.as_ref()).0.poll_flush(cx, false)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
(&*self.as_ref()).poll_shutdown(cx)
(*self.as_ref()).0.poll_shutdown(cx)
}
}
#[cfg(unix)]
mod unixstream {
use tok_io::net::UnixStream;
use tokio::net::UnixStream;
use super::*;
impl IoStream for UnixStream {
impl IoStream for crate::UnixStream {
fn start(self, read: ReadContext, write: WriteContext) -> Option<Box<dyn Handle>> {
let io = Rc::new(RefCell::new(self));
let io = Rc::new(RefCell::new(self.0));
tok_io::task::spawn_local(ReadTask::new(io.clone(), read));
tok_io::task::spawn_local(WriteTask::new(io, write));
tokio::task::spawn_local(ReadTask::new(io.clone(), read));
tokio::task::spawn_local(WriteTask::new(io, write));
None
}
}

65
ntex-tokio/src/lib.rs Normal file
View file

@ -0,0 +1,65 @@
use std::{io::Result, net, net::SocketAddr, path::Path};
use ntex_bytes::PoolRef;
use ntex_io::Io;
mod io;
mod signals;
pub use self::io::TokioIoBoxed;
pub use self::signals::{signal, Signal};
struct TcpStream(tokio::net::TcpStream);
#[cfg(unix)]
struct UnixStream(tokio::net::UnixStream);
/// Opens a TCP connection to a remote host.
pub async fn tcp_connect(addr: SocketAddr) -> Result<Io> {
let sock = tokio::net::TcpStream::connect(addr).await?;
sock.set_nodelay(true)?;
Ok(Io::new(TcpStream(sock)))
}
/// Opens a TCP connection to a remote host and use specified memory pool.
pub async fn tcp_connect_in(addr: SocketAddr, pool: PoolRef) -> Result<Io> {
let sock = tokio::net::TcpStream::connect(addr).await?;
sock.set_nodelay(true)?;
Ok(Io::with_memory_pool(TcpStream(sock), pool))
}
#[cfg(unix)]
/// Opens a unix stream connection.
pub async fn unix_connect<'a, P>(addr: P) -> Result<Io>
where
P: AsRef<Path> + 'a,
{
let sock = tokio::net::UnixStream::connect(addr).await?;
Ok(Io::new(UnixStream(sock)))
}
#[cfg(unix)]
/// Opens a unix stream connection and specified memory pool.
pub async fn unix_connect_in<'a, P>(addr: P, pool: PoolRef) -> Result<Io>
where
P: AsRef<Path> + 'a,
{
let sock = tokio::net::UnixStream::connect(addr).await?;
Ok(Io::with_memory_pool(UnixStream(sock), pool))
}
/// Convert std TcpStream to tokio's TcpStream
pub fn from_tcp_stream(stream: net::TcpStream) -> Result<Io> {
stream.set_nonblocking(true)?;
stream.set_nodelay(true)?;
Ok(Io::new(TcpStream(tokio::net::TcpStream::from_std(stream)?)))
}
#[cfg(unix)]
/// Convert std UnixStream to tokio's UnixStream
pub fn from_unix_stream(stream: std::os::unix::net::UnixStream) -> Result<Io> {
stream.set_nonblocking(true)?;
Ok(Io::new(UnixStream(tokio::net::UnixStream::from_std(
stream,
)?)))
}

120
ntex-tokio/src/signals.rs Normal file
View file

@ -0,0 +1,120 @@
use std::{
cell::RefCell, future::Future, mem, pin::Pin, rc::Rc, task::Context, task::Poll,
};
use tokio::sync::oneshot;
use tokio::task::spawn_local;
thread_local! {
static SRUN: RefCell<bool> = RefCell::new(false);
static SHANDLERS: Rc<RefCell<Vec<oneshot::Sender<Signal>>>> = Default::default();
}
/// Different types of process signals
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum Signal {
/// SIGHUP
Hup,
/// SIGINT
Int,
/// SIGTERM
Term,
/// SIGQUIT
Quit,
}
/// Register signal handler.
///
/// Signals are handled by oneshots, you have to re-register
/// after each signal.
pub fn signal() -> Option<oneshot::Receiver<Signal>> {
if !SRUN.with(|v| *v.borrow()) {
spawn_local(Signals::new());
}
SHANDLERS.with(|handlers| {
let (tx, rx) = oneshot::channel();
handlers.borrow_mut().push(tx);
Some(rx)
})
}
struct Signals {
#[cfg(not(unix))]
signal: Pin<Box<dyn Future<Output = std::io::Result<()>>>>,
#[cfg(unix)]
signals: Vec<(Signal, tokio::signal::unix::Signal)>,
}
impl Signals {
fn new() -> Signals {
SRUN.with(|h| *h.borrow_mut() = true);
#[cfg(not(unix))]
{
Signals {
signal: Box::pin(tokio::signal::ctrl_c()),
}
}
#[cfg(unix)]
{
use tokio::signal::unix;
let sig_map = [
(unix::SignalKind::interrupt(), Signal::Int),
(unix::SignalKind::hangup(), Signal::Hup),
(unix::SignalKind::terminate(), Signal::Term),
(unix::SignalKind::quit(), Signal::Quit),
];
let mut signals = Vec::new();
for (kind, sig) in sig_map.iter() {
match unix::signal(*kind) {
Ok(stream) => signals.push((*sig, stream)),
Err(e) => log::error!(
"Cannot initialize stream handler for {:?} err: {}",
sig,
e
),
}
}
Signals { signals }
}
}
}
impl Drop for Signals {
fn drop(&mut self) {
SRUN.with(|h| *h.borrow_mut() = false);
}
}
impl Future for Signals {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
#[cfg(not(unix))]
{
if self.signal.as_mut().poll(cx).is_ready() {
let handlers = SHANDLERS.with(|h| mem::take(&mut *h.borrow_mut()));
for mut sender in handlers {
let _ = sender.send(Signal::Int);
}
}
Poll::Pending
}
#[cfg(unix)]
{
for (sig, fut) in self.signals.iter_mut() {
if Pin::new(fut).poll_recv(cx).is_ready() {
let handlers = SHANDLERS.with(|h| mem::take(&mut *h.borrow_mut()));
for sender in handlers {
let _ = sender.send(*sig);
}
}
}
Poll::Pending
}
}
}

View file

@ -1,5 +1,9 @@
# Changes
## [0.1.6] - 2022-01-03
* Use ntex-rt::spawn
## [0.1.5] - 2021-12-27
* Fix borrow error when timer get dropped immidietly after start

View file

@ -1,6 +1,6 @@
[package]
name = "ntex-util"
version = "0.1.5"
version = "0.1.6"
authors = ["ntex contributors <team@ntex.rs>"]
description = "Utilities for ntex framework"
keywords = ["network", "framework", "async", "futures"]
@ -16,6 +16,7 @@ name = "ntex_util"
path = "src/lib.rs"
[dependencies]
ntex-rt = "0.4.1"
bitflags = "1.3"
fxhash = "0.2.1"
log = "0.4"
@ -26,7 +27,7 @@ futures-sink = { version = "0.3", default-features = false, features = ["alloc"]
pin-project-lite = "0.2.6"
[dev-dependencies]
ntex = "0.5.0-b.0"
ntex-rt = "0.4.0-b.0"
ntex = "0.5.4"
ntex-rt = "0.4.1"
ntex-macros = "0.1.3"
futures-util = { version = "0.3", default-features = false, features = ["alloc"] }

View file

@ -1,6 +1,4 @@
//! Utilities for ntex framework
use std::{cell::RefCell, future::Future, pin::Pin};
pub mod channel;
pub mod future;
pub mod task;
@ -8,37 +6,7 @@ pub mod time;
pub use futures_core::{ready, Stream};
pub use futures_sink::Sink;
pub use ntex_rt::spawn;
pub type HashMap<K, V> = std::collections::HashMap<K, V, fxhash::FxBuildHasher>;
pub type HashSet<V> = std::collections::HashSet<V, fxhash::FxBuildHasher>;
thread_local! {
#[allow(clippy::type_complexity)]
static SPAWNER: RefCell<Box<dyn Fn(Pin<Box<dyn Future<Output = ()>>>)>> = RefCell::new(Box::new(|_| {
panic!("spawn fn is not configured");
}));
}
/// Spawn a future on the current thread.
///
/// # Panics
///
/// This function panics if spawn fn is not set.
#[inline]
pub fn spawn<F>(fut: F)
where
F: Future<Output = ()> + 'static,
{
SPAWNER.with(move |f| {
(*f.borrow())(Box::pin(fut));
});
}
pub fn set_spawn_fn<F>(f: F)
where
F: Fn(Pin<Box<dyn Future<Output = ()>>>) + 'static,
{
SPAWNER.with(|ctx| {
*ctx.borrow_mut() = Box::new(f);
});
}

View file

@ -201,9 +201,6 @@ mod tests {
/// Expected Behavior: Two back-to-back calls of `now()` return the same value.
#[ntex_macros::rt_test2]
async fn lowres_time_does_not_immediately_change() {
crate::set_spawn_fn(|f| {
ntex_rt::spawn(f);
});
assert_eq!(now(), now())
}
@ -213,10 +210,6 @@ mod tests {
/// and second value is greater than the first one at least by a 1ms interval.
#[ntex_macros::rt_test2]
async fn lowres_time_updates_after_resolution_interval() {
crate::set_spawn_fn(|f| {
ntex_rt::spawn(f);
});
let first_time = now();
sleep(Millis(25)).await;
@ -230,10 +223,6 @@ mod tests {
/// Expected Behavior: Two back-to-back calls of `now()` return the same value.
#[ntex_macros::rt_test2]
async fn system_time_service_time_does_not_immediately_change() {
crate::set_spawn_fn(|f| {
ntex_rt::spawn(f);
});
assert_eq!(system_time(), system_time());
}
@ -243,10 +232,6 @@ mod tests {
/// and second value is greater than the first one at least by a resolution interval.
#[ntex_macros::rt_test2]
async fn system_time_service_time_updates_after_resolution_interval() {
crate::set_spawn_fn(|f| {
ntex_rt::spawn(f);
});
let wait_time = 300;
let first_time = system_time()
@ -264,10 +249,6 @@ mod tests {
#[ntex_macros::rt_test2]
async fn test_interval() {
crate::set_spawn_fn(|f| {
ntex_rt::spawn(f);
});
let mut int = interval(Millis(250));
let time = time::Instant::now();
@ -293,10 +274,6 @@ mod tests {
#[ntex_macros::rt_test2]
async fn test_interval_one_sec() {
crate::set_spawn_fn(|f| {
ntex_rt::spawn(f);
});
let int = interval(Millis::ONE_SEC);
for _i in 0..3 {

View file

@ -646,10 +646,6 @@ mod tests {
#[ntex_macros::rt_test2]
async fn test_timer() {
crate::set_spawn_fn(|f| {
ntex_rt::spawn(f);
});
crate::spawn(async {
let s = interval(Millis(25));
loop {

View file

@ -1,5 +1,9 @@
# Changes
## [0.5.5] - 2022-01-03
* Disable default runtime selection
## [0.5.4] - 2022-01-02
* http1: Unregister keep-alive timer after request is received

View file

@ -1,6 +1,6 @@
[package]
name = "ntex"
version = "0.5.4"
version = "0.5.5"
authors = ["ntex contributors <team@ntex.rs>"]
description = "Framework for composable network services"
readme = "README.md"
@ -21,7 +21,7 @@ name = "ntex"
path = "src/lib.rs"
[features]
default = ["tokio"]
default = []
# openssl
openssl = ["tls-openssl", "ntex-tls/openssl"]
@ -42,18 +42,20 @@ url = ["url-pkg"]
tokio = ["ntex-rt/tokio"]
# async-std runtime
async-std = ["ntex-rt/async-std"]
async-std = ["ntex-rt/async-std", "ntex-async-std"]
[dependencies]
ntex-codec = "0.6.0"
ntex-router = "0.5.1"
ntex-service = "0.3.0"
ntex-macros = "0.1.3"
ntex-util = "0.1.5"
ntex-util = "0.1.6"
ntex-bytes = "0.1.8"
ntex-tls = "0.1.0"
ntex-rt = "0.4.0"
ntex-io = { version = "0.1.0", features = ["tokio-traits"] }
ntex-io = "0.1.0"
ntex-tokio = "0.1.0"
ntex-async-std = { version = "0.1.0", optional = true }
base64 = "0.13"
bitflags = "1.3"

View file

@ -7,7 +7,7 @@ use http::uri::Authority;
use ntex_tls::types::HttpProtocol;
use crate::channel::pool;
use crate::io::IoBoxed;
use crate::io::{IoBoxed, TokioIoBoxed};
use crate::rt::spawn;
use crate::service::Service;
use crate::task::LocalWaker;
@ -357,7 +357,7 @@ struct OpenConnection<F> {
Box<
dyn Future<
Output = Result<
(SendRequest<Bytes>, H2Connection<IoBoxed, Bytes>),
(SendRequest<Bytes>, H2Connection<TokioIoBoxed, Bytes>),
h2::Error,
>,
>,
@ -443,7 +443,8 @@ where
if io.query::<HttpProtocol>().get() == Some(HttpProtocol::Http2) {
log::trace!("Connection is established, start http2 handshake");
// init http2 handshake
this.h2 = Some(Box::pin(Builder::new().handshake(io)));
this.h2 =
Some(Box::pin(Builder::new().handshake(TokioIoBoxed::from(io))));
self.poll(cx)
} else {
log::trace!("Connection is established, init http1 connection");

View file

@ -14,7 +14,7 @@ use crate::http::header::{
use crate::http::{
message::ResponseHead, payload::Payload, request::Request, response::Response,
};
use crate::io::{Filter, Io, IoRef};
use crate::io::{IoRef, TokioIoBoxed};
use crate::service::Service;
use crate::time::{now, Sleep};
use crate::util::{Bytes, BytesMut};
@ -23,19 +23,18 @@ const CHUNK_SIZE: usize = 16_384;
pin_project_lite::pin_project! {
/// Dispatcher for HTTP/2 protocol
pub struct Dispatcher<F, S: Service<Request>, B: MessageBody, X, U> {
pub struct Dispatcher<S: Service<Request>, B: MessageBody, X, U> {
io: IoRef,
config: Rc<DispatcherConfig<S, X, U>>,
connection: Connection<Io<F>, Bytes>,
connection: Connection<TokioIoBoxed, Bytes>,
ka_expire: time::Instant,
ka_timer: Option<Sleep>,
_t: PhantomData<B>,
}
}
impl<F, S, B, X, U> Dispatcher<F, S, B, X, U>
impl<S, B, X, U> Dispatcher<S, B, X, U>
where
F: Filter,
S: Service<Request> + 'static,
S::Error: ResponseError,
S::Response: Into<Response<B>>,
@ -44,7 +43,7 @@ where
pub(in crate::http) fn new(
io: IoRef,
config: Rc<DispatcherConfig<S, X, U>>,
connection: Connection<Io<F>, Bytes>,
connection: Connection<TokioIoBoxed, Bytes>,
timeout: Option<Sleep>,
) -> Self {
// keep-alive timer
@ -69,9 +68,8 @@ where
}
}
impl<F, S, B, X, U> Future for Dispatcher<F, S, B, X, U>
impl<S, B, X, U> Future for Dispatcher<S, B, X, U>
where
F: Filter,
S: Service<Request> + 'static,
S::Error: ResponseError,
S::Response: Into<Response<B>>,

View file

@ -8,7 +8,7 @@ use crate::http::config::{DispatcherConfig, ServiceConfig};
use crate::http::error::{DispatchError, ResponseError};
use crate::http::request::Request;
use crate::http::response::Response;
use crate::io::{types, Filter, Io, IoRef};
use crate::io::{types, Filter, Io, IoRef, TokioIoBoxed};
use crate::service::{IntoServiceFactory, Service, ServiceFactory};
use crate::time::Millis;
use crate::util::Bytes;
@ -171,7 +171,7 @@ where
{
type Response = ();
type Error = DispatchError;
type Future = H2ServiceHandlerResponse<F, S, B>;
type Future = H2ServiceHandlerResponse<S, B>;
#[inline]
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
@ -197,39 +197,36 @@ where
state: State::Handshake(
io.get_ref(),
self.config.clone(),
server::Builder::new().handshake(io),
server::Builder::new().handshake(TokioIoBoxed::from(io)),
),
}
}
}
enum State<F, S: Service<Request>, B: MessageBody>
enum State<S: Service<Request>, B: MessageBody>
where
F: Filter,
S: 'static,
{
Incoming(Dispatcher<F, S, B, (), ()>),
Incoming(Dispatcher<S, B, (), ()>),
Handshake(
IoRef,
Rc<DispatcherConfig<S, (), ()>>,
Handshake<Io<F>, Bytes>,
Handshake<TokioIoBoxed, Bytes>,
),
}
pub struct H2ServiceHandlerResponse<F, S, B>
pub struct H2ServiceHandlerResponse<S, B>
where
F: Filter,
S: Service<Request> + 'static,
S::Error: ResponseError,
S::Response: Into<Response<B>>,
B: MessageBody,
{
state: State<F, S, B>,
state: State<S, B>,
}
impl<F, S, B> Future for H2ServiceHandlerResponse<F, S, B>
impl<S, B> Future for H2ServiceHandlerResponse<S, B>
where
F: Filter,
S: Service<Request> + 'static,
S::Error: ResponseError,
S::Response: Into<Response<B>>,

View file

@ -4,7 +4,7 @@ use std::{cell, error, fmt, future, marker, pin::Pin, rc::Rc};
use h2::server::{self, Handshake};
use ntex_tls::types::HttpProtocol;
use crate::io::{types, Filter, Io, IoRef};
use crate::io::{types, Filter, Io, IoRef, TokioIoBoxed};
use crate::service::{IntoServiceFactory, Service, ServiceFactory};
use crate::time::{Millis, Seconds};
use crate::util::Bytes;
@ -382,7 +382,7 @@ where
state: ResponseState::H2Handshake {
data: Some((
io.get_ref(),
server::Builder::new().handshake(io),
server::Builder::new().handshake(TokioIoBoxed::from(io)),
self.config.clone(),
)),
},
@ -437,11 +437,11 @@ pin_project_lite::pin_project! {
U: 'static,
{
H1 { #[pin] fut: h1::Dispatcher<F, S, B, X, U> },
H2 { fut: Dispatcher<F, S, B, X, U> },
H2 { fut: Dispatcher<S, B, X, U> },
H2Handshake { data:
Option<(
IoRef,
Handshake<Io<F>, Bytes>,
Handshake<TokioIoBoxed, Bytes>,
Rc<DispatcherConfig<S, X, U>>,
)>,
},

View file

@ -1,5 +1,5 @@
//! Test helpers to use during testing.
use std::{convert::TryFrom, io, net, str::FromStr, sync::mpsc, thread};
use std::{convert::TryFrom, net, str::FromStr, sync::mpsc, thread};
#[cfg(feature = "cookie")]
use coo_kie::{Cookie, CookieJar};
@ -218,17 +218,16 @@ where
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap();
sys.exec(|| {
tx.send((sys.system(), local_addr)).unwrap();
sys.run(|| {
Server::build()
.listen("test", tcp, move |_| factory())?
.workers(1)
.disable_signals()
.run();
Ok::<_, io::Error>(())
})?;
tx.send((System::current(), local_addr)).unwrap();
sys.run()
Ok(())
})
});
let (system, addr) = rx.recv().unwrap();

View file

@ -64,6 +64,12 @@ pub mod router {
pub mod rt {
//! A runtime implementation that runs everything on the current thread.
pub use ntex_rt::*;
#[cfg(feature = "tokio")]
pub use ntex_tokio::*;
#[cfg(all(not(feature = "tokio"), feature = "async-std"))]
pub use ntex_async_std::*;
}
pub mod service {
@ -78,6 +84,8 @@ pub mod time {
pub mod io {
//! IO streaming utilities.
pub use ntex_io::*;
pub use ntex_tokio::TokioIoBoxed;
}
pub mod testing {

View file

@ -51,17 +51,15 @@ where
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap();
sys.exec(|| {
tx.send((sys.system(), local_addr)).unwrap();
sys.run(|| {
Server::build()
.listen("test", tcp, move |_| factory())?
.workers(1)
.disable_signals()
.run();
Ok::<_, io::Error>(())
})?;
tx.send((System::current(), local_addr)).unwrap();
sys.run()
Ok(())
})
});
let (system, addr) = rx.recv().unwrap();
@ -80,12 +78,11 @@ where
thread::spawn(move || {
let sys = System::new("ntex-test-server");
sys.exec(|| {
tx.send(sys.system()).unwrap();
sys.run(|| {
factory(Server::build()).workers(1).disable_signals().run();
});
tx.send(System::current()).unwrap();
sys.run()
Ok(())
})
});
let system = rx.recv().unwrap();

View file

@ -606,10 +606,10 @@ where
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap();
let srv = sys.exec(move || {
sys.run(move || {
let builder = Server::build().workers(1).disable_signals();
match cfg.stream {
let srv = match cfg.stream {
StreamType::Tcp => match cfg.tp {
HttpVer::Http1 => builder.listen("test", tcp, move |_| {
let cfg =
@ -689,11 +689,11 @@ where
},
}
.unwrap()
.run()
});
.run();
tx.send((System::current(), srv, local_addr)).unwrap();
sys.run()
Ok(())
})
});
let (system, server, addr) = rx.recv().unwrap();

View file

@ -17,16 +17,16 @@ fn test_bind() {
let h = thread::spawn(move || {
let sys = ntex::rt::System::new("test");
let srv = sys.exec(move || {
Server::build()
sys.run(move || {
let srv = Server::build()
.workers(1)
.disable_signals()
.bind("test", addr, move |_| fn_service(|_| ok::<_, ()>(())))
.unwrap()
.run()
});
.run();
let _ = tx.send((srv, ntex::rt::System::current()));
let _ = sys.run();
Ok(())
})
});
let (_, sys) = rx.recv().unwrap();
@ -44,16 +44,16 @@ fn test_listen() {
let h = thread::spawn(move || {
let sys = ntex::rt::System::new("test");
let lst = net::TcpListener::bind(addr).unwrap();
sys.exec(move || {
sys.run(move || {
Server::build()
.disable_signals()
.workers(1)
.listen("test", lst, move |_| fn_service(|_| ok::<_, ()>(())))
.unwrap()
.run()
});
.run();
let _ = tx.send(ntex::rt::System::current());
let _ = sys.run();
Ok(())
})
});
let sys = rx.recv().unwrap();
@ -71,8 +71,8 @@ fn test_run() {
let h = thread::spawn(move || {
let sys = ntex::rt::System::new("test");
let srv = sys.exec(move || {
Server::build()
sys.run(move || {
let srv = Server::build()
.backlog(100)
.disable_signals()
.bind("test", addr, move |_| {
@ -84,11 +84,10 @@ fn test_run() {
})
})
.unwrap()
.run()
});
.run();
let _ = tx.send((srv, ntex::rt::System::current()));
let _ = sys.run();
Ok(())
})
});
let (srv, sys) = rx.recv().unwrap();
@ -141,8 +140,8 @@ fn test_on_worker_start() {
let num = num2.clone();
let num2 = num2.clone();
let sys = ntex::rt::System::new("test");
let srv = sys.exec(move || {
Server::build()
sys.run(move || {
let srv = Server::build()
.disable_signals()
.configure(move |cfg| {
let num = num.clone();
@ -170,10 +169,10 @@ fn test_on_worker_start() {
Ready::Ok::<_, io::Error>(())
})
.workers(1)
.run()
});
.run();
let _ = tx.send((srv, ntex::rt::System::current()));
let _ = sys.run();
Ok(())
})
});
let (_, sys) = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
@ -198,9 +197,9 @@ fn test_panic_in_worker() {
let h = thread::spawn(move || {
let sys = ntex::rt::System::new("test");
let counter = counter2.clone();
let srv = sys.exec(move || {
sys.run(move || {
let counter = counter.clone();
Server::build()
let srv = Server::build()
.workers(1)
.disable_signals()
.bind("test", addr, move |_| {
@ -212,11 +211,11 @@ fn test_panic_in_worker() {
})
})
.unwrap()
.run()
});
.run();
let _ = tx.send((srv.clone(), ntex::rt::System::current()));
sys.exec(move || ntex::rt::spawn(srv.map(|_| ())));
let _ = sys.run();
ntex::rt::spawn(srv.map(|_| ()));
Ok(())
})
});
let (_, sys) = rx.recv().unwrap();

View file

@ -15,8 +15,8 @@ async fn test_run() {
thread::spawn(move || {
let sys = ntex::rt::System::new("test");
let srv = sys.exec(move || {
HttpServer::new(|| {
sys.run(move || {
let srv = HttpServer::new(|| {
App::new().service(
web::resource("/")
.route(web::to(|| async { HttpResponse::Ok().body("test") })),
@ -35,11 +35,10 @@ async fn test_run() {
.disable_signals()
.bind(format!("{}", addr))
.unwrap()
.run()
});
.run();
let _ = tx.send((srv, ntex::rt::System::current()));
let _ = sys.run();
Ok(())
})
});
let (srv, sys) = rx.recv().unwrap();
@ -107,8 +106,8 @@ async fn test_openssl() {
let sys = ntex::rt::System::new("test");
let builder = ssl_acceptor().unwrap();
let srv = sys.exec(move || {
HttpServer::new(|| {
sys.run(move || {
let srv = HttpServer::new(|| {
App::new().service(web::resource("/").route(web::to(
|req: HttpRequest| async move {
assert!(req.app_config().secure());
@ -122,11 +121,10 @@ async fn test_openssl() {
.disable_signals()
.bind_openssl(format!("{}", addr), builder)
.unwrap()
.run()
});
.run();
let _ = tx.send((srv, ntex::rt::System::current()));
let _ = sys.run();
Ok(())
})
});
let (srv, sys) = rx.recv().unwrap();
@ -172,8 +170,8 @@ async fn test_rustls() {
.with_single_cert(cert_chain, keys)
.unwrap();
let srv = sys.exec(move || {
HttpServer::new(|| {
sys.run(move || {
let srv = HttpServer::new(|| {
App::new().service(web::resource("/").route(web::to(
|req: HttpRequest| async move {
assert!(req.app_config().secure());
@ -187,11 +185,10 @@ async fn test_rustls() {
.disable_signals()
.bind_rustls(format!("{}", addr), config)
.unwrap()
.run()
});
.run();
let _ = tx.send((srv, ntex::rt::System::current()));
let _ = sys.run();
Ok(())
})
});
let (srv, sys) = rx.recv().unwrap();
@ -215,8 +212,8 @@ async fn test_bind_uds() {
thread::spawn(move || {
let sys = ntex::rt::System::new("test");
let srv = sys.exec(move || {
HttpServer::new(|| {
sys.run(move || {
let srv = HttpServer::new(|| {
App::new().service(
web::resource("/")
.route(web::to(|| async { HttpResponse::Ok().body("test") })),
@ -228,11 +225,10 @@ async fn test_bind_uds() {
.disable_signals()
.bind_uds("/tmp/uds-test")
.unwrap()
.run()
});
.run();
let _ = tx.send((srv, ntex::rt::System::current()));
let _ = sys.run();
Ok(())
})
});
let (srv, sys) = rx.recv().unwrap();
@ -265,11 +261,11 @@ async fn test_listen_uds() {
thread::spawn(move || {
let sys = ntex::rt::System::new("test");
let srv = sys.exec(|| {
sys.run(move || {
let _ = std::fs::remove_file("/tmp/uds-test2");
let lst = std::os::unix::net::UnixListener::bind("/tmp/uds-test2").unwrap();
HttpServer::new(|| {
let srv = HttpServer::new(|| {
App::new().service(
web::resource("/")
.route(web::to(|| async { HttpResponse::Ok().body("test") })),
@ -281,11 +277,10 @@ async fn test_listen_uds() {
.disable_signals()
.listen_uds(lst)
.unwrap()
.run()
});
.run();
let _ = tx.send((srv, ntex::rt::System::current()));
let _ = sys.run();
Ok(())
})
});
let (srv, sys) = rx.recv().unwrap();