Fix KeepAlive::Os support for h1 dispatcher

This commit is contained in:
Nikolay Kim 2021-02-18 18:59:55 +06:00
parent ff33374810
commit 96ca5c3542
3 changed files with 22 additions and 25 deletions

View file

@ -2,6 +2,8 @@
## [0.2.0-b.12] - 2021-02-18
* http: Fix KeepAlive::Os support for h1 dispatcher
* Handle EINTR in server accept loop
* Fix double registation for accept back-pressure

View file

@ -99,7 +99,7 @@ pub(super) struct DispatcherConfig<S, X, U> {
pub(super) service: S,
pub(super) expect: X,
pub(super) upgrade: Option<U>,
pub(super) keep_alive: u64,
pub(super) keep_alive: Duration,
pub(super) client_timeout: u64,
pub(super) client_disconnect: u64,
pub(super) ka_enabled: bool,
@ -118,7 +118,7 @@ impl<S, X, U> DispatcherConfig<S, X, U> {
service,
expect,
upgrade,
keep_alive: cfg.0.keep_alive,
keep_alive: Duration::from_secs(cfg.0.keep_alive),
client_timeout: cfg.0.client_timeout,
client_disconnect: cfg.0.client_disconnect,
ka_enabled: cfg.0.ka_enabled,
@ -134,10 +134,8 @@ impl<S, X, U> DispatcherConfig<S, X, U> {
/// Return keep-alive timer delay is configured.
pub(super) fn keep_alive_timer(&self) -> Option<Delay> {
if self.keep_alive != 0 {
Some(delay_until(
self.timer.now() + Duration::from_secs(self.keep_alive),
))
if self.keep_alive.as_secs() != 0 {
Some(delay_until(self.timer.now() + self.keep_alive))
} else {
None
}
@ -145,8 +143,8 @@ impl<S, X, U> DispatcherConfig<S, X, U> {
/// Keep-alive expire time
pub(super) fn keep_alive_expire(&self) -> Option<Instant> {
if self.keep_alive != 0 {
Some(self.timer.now() + Duration::from_secs(self.keep_alive))
if self.keep_alive.as_secs() != 0 {
Some(self.timer.now() + self.keep_alive)
} else {
None
}

View file

@ -111,9 +111,7 @@ where
on_connect_data: Option<Box<dyn DataFactory>>,
) -> Self {
let codec = Codec::new(config.timer.clone(), config.keep_alive_enabled());
let state = IoState::new();
state.set_disconnect_timeout(config.client_disconnect as u16);
let state = IoState::new().disconnect_timeout(config.client_disconnect as u16);
let mut expire = config.timer_h1.now();
let io = Rc::new(RefCell::new(io));
@ -171,8 +169,6 @@ where
let next = match this.call.project() {
// handle SERVICE call
CallStateProject::Service { fut } => {
// we have to loop because of read back-pressure,
// check Poll::Pending processing
match fut.poll(cx) {
Poll::Ready(result) => match result {
Ok(res) => {
@ -314,15 +310,14 @@ where
}
if req.head().expect() {
// call service
// Handle normal requests with EXPECT: 100-Continue` header
*this.st = State::Call;
// Handle `EXPECT: 100-Continue` header
this.call.set(CallState::Expect {
fut: this.inner.config.expect.call(req),
});
} else if upgrade {
log::trace!("prep io for upgrade handler");
// Handle UPGRADE request
log::trace!("prep io for upgrade handler");
this.inner.state.dsp_stop_io(cx.waker());
*this.st = State::Upgrade(Some(req));
return Poll::Pending;
@ -352,8 +347,8 @@ where
return Poll::Pending;
}
Err(err) => {
log::trace!("malformed request: {:?}", err);
// Malformed requests, respond with 400
log::trace!("malformed request: {:?}", err);
let (res, body) =
Response::BadRequest().finish().into_parts();
this.inner.error = Some(DispatchError::Parse(err));
@ -480,14 +475,16 @@ where
fn reset_keepalive(&mut self) {
// re-register keep-alive
if self.flags.contains(Flags::KEEPALIVE) {
let expire = self.config.timer_h1.now()
+ time::Duration::from_secs(self.config.keep_alive);
self.config
.timer_h1
.register(expire, self.expire, &self.state);
self.expire = expire;
self.state.reset_keepalive();
if self.flags.contains(Flags::KEEPALIVE) && self.config.keep_alive.as_secs() != 0
{
let expire = self.config.timer_h1.now() + self.config.keep_alive;
if expire != self.expire {
self.config
.timer_h1
.register(expire, self.expire, &self.state);
self.expire = expire;
self.state.reset_keepalive();
}
}
}