Compare commits

..

No commits in common. "master" and "0.9.4" have entirely different histories.

15 changed files with 159 additions and 328 deletions

View file

@ -1,17 +0,0 @@
name: Close inactive issues
on:
schedule:
- cron: "30 1 * * *"
jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v9
with:
stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
repo-token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -14,11 +14,7 @@ jobs:
id: get_version id: get_version
run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/} run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/}
- uses: actions/checkout@v3 - uses: actions/checkout@master
- uses: mlugg/setup-zig@v1
with:
version: 0.10.1
- uses: hecrj/setup-rust-action@master - uses: hecrj/setup-rust-action@master
with: with:
@ -31,60 +27,18 @@ jobs:
run: rustup default | grep stable run: rustup default | grep stable
- name: Install cargo-deb - name: Install cargo-deb
run: cargo install cargo-deb run: cargo install --debug cargo-deb
- name: Install cargo-generate-rpm - name: Release build
run: cargo install cargo-generate-rpm
- name: Install cargo-zigbuild
run: cargo install cargo-zigbuild
- name: Release build Linux-x86-64
run: | run: |
rustup target add x86_64-unknown-linux-musl env RUSTFLAGS="-C link-arg=-s" cargo build --release
env RUSTFLAGS="-C strip=symbols" cargo zigbuild --release --target x86_64-unknown-linux-musl
mkdir doh-proxy mkdir doh-proxy
mv target/x86_64-unknown-linux-musl/release/doh-proxy doh-proxy/ mv target/release/doh-proxy doh-proxy/
cp README.md localhost.pem doh-proxy/ cp README.md localhost.pem doh-proxy/
tar cjpf doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2 doh-proxy tar cJpf doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2 doh-proxy
rm -fr doh-proxy - name: Debian package
- name: Release build Linux-aarch64
run: | run: |
rustup target add aarch64-unknown-linux-musl cargo deb
env RUSTFLAGS="-C strip=symbols" cargo zigbuild --release --target aarch64-unknown-linux-musl
mkdir doh-proxy
mv target/aarch64-unknown-linux-musl/release/doh-proxy doh-proxy/
cp README.md localhost.pem doh-proxy/
tar cjpf doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-aarch64.tar.bz2 doh-proxy
rm -fr doh-proxy
- name: Release build Windows-x86_64
run: |
rustup target add x86_64-pc-windows-gnu
env RUSTFLAGS="-C strip=symbols" cargo zigbuild --release --target x86_64-pc-windows-gnu
mkdir doh-proxy
mv target/x86_64-pc-windows-gnu/release/doh-proxy.exe doh-proxy/
cp README.md localhost.pem doh-proxy/
zip -9 -r doh-proxy_${{ steps.get_version.outputs.VERSION }}_windows-x86_64.zip doh-proxy
rm -fr doh-proxy
- name: Debian packages
run: |
rustup target add x86_64-unknown-linux-musl
env RUSTFLAGS="-C strip=symbols" cargo deb --no-strip --cargo-build=zigbuild --target=x86_64-unknown-linux-musl
rustup target add aarch64-unknown-linux-musl
env RUSTFLAGS="-C strip=symbols" cargo deb --no-strip --cargo-build=zigbuild --target=aarch64-unknown-linux-musl
- name: RPM packages
run: |
rustup target add x86_64-unknown-linux-gnu
env RUSTFLAGS="-C strip=symbols" cargo-zigbuild build --target=x86_64-unknown-linux-gnu.2.17 --release
mv target/x86_64-unknown-linux-musl/release/doh-proxy target/release/
cargo generate-rpm --target x86_64-unknown-linux-gnu
rustup target add aarch64-unknown-linux-gnu
env RUSTFLAGS="-C strip=symbols" cargo-zigbuild build --target=aarch64-unknown-linux-gnu.2.17 --release
cargo generate-rpm --target aarch64-unknown-linux-gnu
- name: Create release - name: Create release
id: create_release id: create_release
@ -97,41 +51,19 @@ jobs:
draft: true draft: true
prerelease: false prerelease: false
- name: Upload Debian package for x86_64 - name: Upload Debian package
id: upload-release-asset-debian-x86_64 id: upload-release-asset-debian
uses: actions/upload-release-asset@v1 uses: actions/upload-release-asset@v1
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:
upload_url: ${{ steps.create_release.outputs.upload_url }} upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}-1_amd64.deb" asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_amd64.deb"
asset_path: "target/x86_64-unknown-linux-musl/debian/doh-proxy_${{ steps.get_version.outputs.VERSION }}-1_amd64.deb" asset_path: "target/debian/doh-proxy_${{ steps.get_version.outputs.VERSION }}_amd64.deb"
asset_content_type: application/x-debian-package asset_content_type: application/x-debian-package
- name: Upload RPM package for x86_64 - name: Upload tarball
id: upload-release-asset-rpm-x86_64 id: upload-release-asset-tarball
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.x86_64.rpm"
asset_path: "target/x86_64-unknown-linux-gnu/generate-rpm/doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.x86_64.rpm"
asset_content_type: application/x-redhat-package-manager
- name: Upload RPM package for aarch64
id: upload-release-asset-rpm-aarch64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.aarch64.rpm"
asset_path: "target/aarch64-unknown-linux-gnu/generate-rpm/doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.aarch64.rpm"
asset_content_type: application/x-redhat-package-manager
- name: Upload tarball for linux-x86_64
id: upload-release-asset-tarball-linux-x86_64
uses: actions/upload-release-asset@v1 uses: actions/upload-release-asset@v1
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@ -140,25 +72,3 @@ jobs:
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2" asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2"
asset_path: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2" asset_path: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2"
asset_content_type: application/x-tar asset_content_type: application/x-tar
- name: Upload tarball for linux-aarch64
id: upload-release-asset-tarball-linux-aarch64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-aarch64.tar.bz2"
asset_path: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-aarch64.tar.bz2"
asset_content_type: application/x-tar
- name: Upload tarball for windows-x86_64
id: upload-release-asset-tarball-windows-x86_64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_windows-x86_64.zip"
asset_path: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_windows-x86_64.zip"
asset_content_type: application/zip

View file

@ -1,6 +1,6 @@
[package] [package]
name = "doh-proxy" name = "doh-proxy"
version = "0.9.11" version = "0.9.4"
authors = ["Frank Denis <github@pureftpd.org>"] authors = ["Frank Denis <github@pureftpd.org>"]
description = "A DNS-over-HTTPS (DoH) and ODoH (Oblivious DoH) proxy" description = "A DNS-over-HTTPS (DoH) and ODoH (Oblivious DoH) proxy"
keywords = ["dns", "https", "doh", "odoh", "proxy"] keywords = ["dns", "https", "doh", "odoh", "proxy"]
@ -16,16 +16,10 @@ default = ["tls"]
tls = ["libdoh/tls"] tls = ["libdoh/tls"]
[dependencies] [dependencies]
libdoh = { path = "src/libdoh", version = "0.9.9", default-features = false } libdoh = { path = "src/libdoh", version = "0.9.4", default-features = false }
clap = { version = "4", features = ["std", "cargo", "wrap_help", "string"] } clap = { version = "3.1.18", features = ["std", "cargo", "wrap_help"] }
dnsstamps = "0.1.10" dnsstamps = "0.1.9"
mimalloc = { version = "0.1.44", default-features = false } mimalloc = { version = "0.1.29", default-features = false }
[package.metadata.generate-rpm]
assets = [
{ source = "target/release/doh-proxy", dest = "/usr/bin/doh-proxy", mode = "755" },
{ source = "README.md", dest = "/usr/share/doc/doh-proxy/README.md", mode = "644", doc = true },
]
[package.metadata.deb] [package.metadata.deb]
extended-description = """\ extended-description = """\

View file

@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2018-2025 Frank Denis Copyright (c) 2018-2022 Frank Denis
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View file

@ -1,4 +1,4 @@
# ![DoH server (and ODoH - Oblivious DoH server)](logo.png) # doh-proxy
A fast and secure DoH (DNS-over-HTTPS) and ODoH (Oblivious DoH) server. A fast and secure DoH (DNS-over-HTTPS) and ODoH (Oblivious DoH) server.
@ -60,21 +60,13 @@ OPTIONS:
Path to the PEM/PKCS#8-encoded certificates (only required for built-in TLS) Path to the PEM/PKCS#8-encoded certificates (only required for built-in TLS)
``` ```
Example command-line: ## HTTP/2 termination
```sh The recommended way to use `doh-proxy` is to use a TLS termination proxy (such as [hitch](https://github.com/varnish/hitch) or [relayd](https://bsd.plumbing/about.html)), a CDN or a web server with proxying abilities as a front-end.
doh-proxy -H 'doh.example.com' -u 127.0.0.1:53 -g 233.252.0.5
```
Here, `doh.example.com` is the host name (which should match a name included in the TLS certificate), `127.0.0.1:53` is the address of the DNS resolver, and `233.252.0.5` is the public IP address of the DoH server.
## HTTP/2 and HTTP/3 termination
The recommended way to use `doh-proxy` is to use a TLS termination proxy (such as [hitch](https://github.com/varnish/hitch) or [relayd](https://man.openbsd.org/relayd.8)), a CDN or a web server with proxying abilities as a front-end.
That way, the DoH service can be exposed as a virtual host, sharing the same IP addresses as existing websites. That way, the DoH service can be exposed as a virtual host, sharing the same IP addresses as existing websites.
If `doh-proxy` and the HTTP/2 (/ HTTP/3) front-end run on the same host, using the HTTP protocol to communicate between both is fine. If `doh-proxy` and the HTTP/2 front-end run on the same host, using the HTTP protocol to communicate between both is fine.
If both are on distinct networks, such as when using a CDN, `doh-proxy` can handle HTTPS requests, provided that it was compiled with the `tls` feature. If both are on distinct networks, such as when using a CDN, `doh-proxy` can handle HTTPS requests, provided that it was compiled with the `tls` feature.
@ -136,7 +128,7 @@ This can be achieved with the `--allow-odoh-post` command-line switch.
* When using DoH, DNS stamps should include a resolver IP address in order to remove a dependency on non-encrypted, non-authenticated, easy-to-block resolvers. * When using DoH, DNS stamps should include a resolver IP address in order to remove a dependency on non-encrypted, non-authenticated, easy-to-block resolvers.
* Unlike DNSCrypt where users must explicitly trust a DNS server's public key, the security of DoH relies on traditional public Certificate Authorities. Additional root certificates (required by governments, security software, enterprise gateways) installed on a client immediately make DoH vulnerable to MITM. In order to prevent this, DNS stamps should include the hash of the parent certificate. * Unlike DNSCrypt where users must explicitly trust a DNS server's public key, the security of DoH relies on traditional public Certificate Authorities. Additional root certificates (required by governments, security software, enterprise gateways) installed on a client immediately make DoH vulnerable to MITM. In order to prevent this, DNS stamps should include the hash of the parent certificate.
* TLS certificates are tied to host names. But domains expire, get reassigned and switch hands all the time. If a domain originally used for a DoH service gets a new, possibly malicious owner, clients still configured to use the service will blindly keep trusting it if the CA is the same. As a mitigation, the CA should sign an intermediate certificate (the only one present in the stamp), itself used to sign the name used by the DoH server. While commercial CAs offer this, Let's Encrypt currently doesn't. * TLS certificates are tied to host names. But domains expire, get reassigned and switch hands all the time. If a domain originally used for a DoH service gets a new, possibly malicious owner, clients still configured to use the service will blindly keep trusting it if the CA is the same. As a mitigation, the CA should sign an intermediate certificate (the only one present in the stamp), itself used to sign the name used by the DoH server. While commercial CAs offer this, Let's Encrypt currently doesn't.
* Make sure that the front-end supports at least HTTP/2 and TLS 1.3. * Make sure that the front-end supports HTTP/2 and TLS 1.3.
* Internal DoH servers still require TLS certificates. So, if you are planning to deploy an internal server, you need to set up an internal CA, or add self-signed certificates to every single client. * Internal DoH servers still require TLS certificates. So, if you are planning to deploy an internal server, you need to set up an internal CA, or add self-signed certificates to every single client.
## Example usage with `encrypted-dns-server` ## Example usage with `encrypted-dns-server`
@ -150,10 +142,10 @@ upstream_addr = "127.0.0.1:3000"
## Example usage with `nginx` ## Example usage with `nginx`
In an existing `server`, a `/dns-query` endpoint can be exposed that way: In an existing `server`, a `/doh` endpoint can be exposed that way:
```text ```text
location /dns-query { location /doh {
proxy_pass http://127.0.0.1:3000; proxy_pass http://127.0.0.1:3000;
} }
``` ```
@ -195,14 +187,10 @@ This [Go code snippet](https://gist.github.com/d6cb41742a1ceb54d48cc286f3d5c5fa)
### Common certificate hashes ### Common certificate hashes
* Let's Encrypt E1:
* `cc1060d39c8329b62b6fbc7d0d6df9309869b981e7e6392d5cd8fa408f4d80e6`
* Let's Encrypt R3: * Let's Encrypt R3:
* `444ebd67bb83f8807b3921e938ac9178b882bd50aadb11231f044cf5f08df7ce` * `444ebd67bb83f8807b3921e938ac9178b882bd50aadb11231f044cf5f08df7ce`
* Let's Encrypt R10: * Let's Encrypt E1:
* `e644ba6963e335fe765cb9976b12b10eb54294b42477764ccb3a3acca3acb2fc` * `cc1060d39c8329b62b6fbc7d0d6df9309869b981e7e6392d5cd8fa408f4d80e6`
* ZeroSSL:
* `9a3a34f727deb9bca51003d9ce9c39f8f27dd9c5242901c2bab1a44e635a0219`
## Clients ## Clients

BIN
logo.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

View file

@ -3,7 +3,7 @@ use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSoc
use std::path::PathBuf; use std::path::PathBuf;
use std::time::Duration; use std::time::Duration;
use clap::{Arg, ArgAction::SetTrue}; use clap::Arg;
use libdoh::*; use libdoh::*;
use crate::constants::*; use crate::constants::*;
@ -24,54 +24,54 @@ pub fn parse_opts(globals: &mut Globals) {
Arg::new("hostname") Arg::new("hostname")
.short('H') .short('H')
.long("hostname") .long("hostname")
.num_args(1) .takes_value(true)
.help("Host name (not IP address) DoH clients will use to connect"), .help("Host name (not IP address) DoH clients will use to connect"),
) )
.arg( .arg(
Arg::new("public_address") Arg::new("public_address")
.short('g') .short('g')
.long("public-address") .long("public-address")
.num_args(1) .takes_value(true)
.help("External IP address DoH clients will connect to"), .help("External IP address DoH clients will connect to"),
) )
.arg( .arg(
Arg::new("public_port") Arg::new("public_port")
.short('j') .short('j')
.long("public-port") .long("public-port")
.num_args(1) .takes_value(true)
.help("External port DoH clients will connect to, if not 443"), .help("External port DoH clients will connect to, if not 443"),
) )
.arg( .arg(
Arg::new("listen_address") Arg::new("listen_address")
.short('l') .short('l')
.long("listen-address") .long("listen-address")
.num_args(1) .takes_value(true)
.default_value(LISTEN_ADDRESS) .default_value(LISTEN_ADDRESS)
.value_parser(verify_sock_addr) .validator(verify_sock_addr)
.help("Address to listen to"), .help("Address to listen to"),
) )
.arg( .arg(
Arg::new("server_address") Arg::new("server_address")
.short('u') .short('u')
.long("server-address") .long("server-address")
.num_args(1) .takes_value(true)
.default_value(SERVER_ADDRESS) .default_value(SERVER_ADDRESS)
.value_parser(verify_remote_server) .validator(verify_remote_server)
.help("Address to connect to"), .help("Address to connect to"),
) )
.arg( .arg(
Arg::new("local_bind_address") Arg::new("local_bind_address")
.short('b') .short('b')
.long("local-bind-address") .long("local-bind-address")
.num_args(1) .takes_value(true)
.value_parser(verify_sock_addr) .validator(verify_sock_addr)
.help("Address to connect from"), .help("Address to connect from"),
) )
.arg( .arg(
Arg::new("path") Arg::new("path")
.short('p') .short('p')
.long("path") .long("path")
.num_args(1) .takes_value(true)
.default_value(PATH) .default_value(PATH)
.help("URI path"), .help("URI path"),
) )
@ -79,68 +79,65 @@ pub fn parse_opts(globals: &mut Globals) {
Arg::new("max_clients") Arg::new("max_clients")
.short('c') .short('c')
.long("max-clients") .long("max-clients")
.num_args(1) .takes_value(true)
.default_value(max_clients) .default_value(&max_clients)
.help("Maximum number of simultaneous clients"), .help("Maximum number of simultaneous clients"),
) )
.arg( .arg(
Arg::new("max_concurrent") Arg::new("max_concurrent")
.short('C') .short('C')
.long("max-concurrent") .long("max-concurrent")
.num_args(1) .takes_value(true)
.default_value(max_concurrent_streams) .default_value(&max_concurrent_streams)
.help("Maximum number of concurrent requests per client"), .help("Maximum number of concurrent requests per client"),
) )
.arg( .arg(
Arg::new("timeout") Arg::new("timeout")
.short('t') .short('t')
.long("timeout") .long("timeout")
.num_args(1) .takes_value(true)
.default_value(timeout_sec) .default_value(&timeout_sec)
.help("Timeout, in seconds"), .help("Timeout, in seconds"),
) )
.arg( .arg(
Arg::new("min_ttl") Arg::new("min_ttl")
.short('T') .short('T')
.long("min-ttl") .long("min-ttl")
.num_args(1) .takes_value(true)
.default_value(min_ttl) .default_value(&min_ttl)
.help("Minimum TTL, in seconds"), .help("Minimum TTL, in seconds"),
) )
.arg( .arg(
Arg::new("max_ttl") Arg::new("max_ttl")
.short('X') .short('X')
.long("max-ttl") .long("max-ttl")
.num_args(1) .takes_value(true)
.default_value(max_ttl) .default_value(&max_ttl)
.help("Maximum TTL, in seconds"), .help("Maximum TTL, in seconds"),
) )
.arg( .arg(
Arg::new("err_ttl") Arg::new("err_ttl")
.short('E') .short('E')
.long("err-ttl") .long("err-ttl")
.num_args(1) .takes_value(true)
.default_value(err_ttl) .default_value(&err_ttl)
.help("TTL for errors, in seconds"), .help("TTL for errors, in seconds"),
) )
.arg( .arg(
Arg::new("disable_keepalive") Arg::new("disable_keepalive")
.short('K') .short('K')
.action(SetTrue)
.long("disable-keepalive") .long("disable-keepalive")
.help("Disable keepalive"), .help("Disable keepalive"),
) )
.arg( .arg(
Arg::new("disable_post") Arg::new("disable_post")
.short('P') .short('P')
.action(SetTrue)
.long("disable-post") .long("disable-post")
.help("Disable POST queries"), .help("Disable POST queries"),
) )
.arg( .arg(
Arg::new("allow_odoh_post") Arg::new("allow_odoh_post")
.short('O') .short('O')
.action(SetTrue)
.long("allow-odoh-post") .long("allow-odoh-post")
.help("Allow POST queries over ODoH even if they have been disabed for DoH"), .help("Allow POST queries over ODoH even if they have been disabed for DoH"),
); );
@ -151,7 +148,7 @@ pub fn parse_opts(globals: &mut Globals) {
Arg::new("tls_cert_path") Arg::new("tls_cert_path")
.short('i') .short('i')
.long("tls-cert-path") .long("tls-cert-path")
.num_args(1) .takes_value(true)
.help( .help(
"Path to the PEM/PKCS#8-encoded certificates (only required for built-in TLS)", "Path to the PEM/PKCS#8-encoded certificates (only required for built-in TLS)",
), ),
@ -160,24 +157,21 @@ pub fn parse_opts(globals: &mut Globals) {
Arg::new("tls_cert_key_path") Arg::new("tls_cert_key_path")
.short('I') .short('I')
.long("tls-cert-key-path") .long("tls-cert-key-path")
.num_args(1) .takes_value(true)
.help("Path to the PEM-encoded secret keys (only required for built-in TLS)"), .help("Path to the PEM-encoded secret keys (only required for built-in TLS)"),
); );
let matches = options.get_matches(); let matches = options.get_matches();
globals.listen_address = matches globals.listen_address = matches.value_of("listen_address").unwrap().parse().unwrap();
.get_one::<String>("listen_address")
.unwrap()
.parse()
.unwrap();
globals.server_address = matches globals.server_address = matches
.get_one::<String>("server_address") .value_of("server_address")
.unwrap() .unwrap()
.to_socket_addrs() .to_socket_addrs()
.unwrap() .unwrap()
.next() .next()
.unwrap(); .unwrap();
globals.local_bind_address = match matches.get_one::<String>("local_bind_address") { globals.local_bind_address = match matches.value_of("local_bind_address") {
Some(address) => address.parse().unwrap(), Some(address) => address.parse().unwrap(),
None => match globals.server_address { None => match globals.server_address {
SocketAddr::V4(_) => SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0)), SocketAddr::V4(_) => SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0)),
@ -189,93 +183,62 @@ pub fn parse_opts(globals: &mut Globals) {
)), )),
}, },
}; };
globals.path = matches.get_one::<String>("path").unwrap().to_string(); globals.path = matches.value_of("path").unwrap().to_string();
if !globals.path.starts_with('/') { if !globals.path.starts_with('/') {
globals.path = format!("/{}", globals.path); globals.path = format!("/{}", globals.path);
} }
globals.max_clients = matches globals.max_clients = matches.value_of("max_clients").unwrap().parse().unwrap();
.get_one::<String>("max_clients") globals.timeout = Duration::from_secs(matches.value_of("timeout").unwrap().parse().unwrap());
.unwrap() globals.max_concurrent_streams = matches.value_of("max_concurrent").unwrap().parse().unwrap();
.parse() globals.min_ttl = matches.value_of("min_ttl").unwrap().parse().unwrap();
.unwrap(); globals.max_ttl = matches.value_of("max_ttl").unwrap().parse().unwrap();
globals.timeout = Duration::from_secs( globals.err_ttl = matches.value_of("err_ttl").unwrap().parse().unwrap();
matches globals.keepalive = !matches.is_present("disable_keepalive");
.get_one::<String>("timeout") globals.disable_post = matches.is_present("disable_post");
.unwrap() globals.allow_odoh_post = matches.is_present("allow_odoh_post");
.parse()
.unwrap(),
);
globals.max_concurrent_streams = matches
.get_one::<String>("max_concurrent")
.unwrap()
.parse()
.unwrap();
globals.min_ttl = matches
.get_one::<String>("min_ttl")
.unwrap()
.parse()
.unwrap();
globals.max_ttl = matches
.get_one::<String>("max_ttl")
.unwrap()
.parse()
.unwrap();
globals.err_ttl = matches
.get_one::<String>("err_ttl")
.unwrap()
.parse()
.unwrap();
globals.keepalive = !matches.get_flag("disable_keepalive");
globals.disable_post = matches.get_flag("disable_post");
globals.allow_odoh_post = matches.get_flag("allow_odoh_post");
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
{ {
globals.tls_cert_path = matches globals.tls_cert_path = matches.value_of("tls_cert_path").map(PathBuf::from);
.get_one::<String>("tls_cert_path")
.map(PathBuf::from);
globals.tls_cert_key_path = matches globals.tls_cert_key_path = matches
.get_one::<String>("tls_cert_key_path") .value_of("tls_cert_key_path")
.map(PathBuf::from) .map(PathBuf::from)
.or_else(|| globals.tls_cert_path.clone()); .or_else(|| globals.tls_cert_path.clone());
} }
match matches.get_one::<String>("hostname") { if let Some(hostname) = matches.value_of("hostname") {
Some(hostname) => { let mut builder =
let mut builder = dnsstamps::DoHBuilder::new(hostname.to_string(), globals.path.to_string());
dnsstamps::DoHBuilder::new(hostname.to_string(), globals.path.to_string()); if let Some(public_address) = matches.value_of("public_address") {
if let Some(public_address) = matches.get_one::<String>("public_address") { builder = builder.with_address(public_address.to_string());
builder = builder.with_address(public_address.to_string());
}
if let Some(public_port) = matches.get_one::<String>("public_port") {
let public_port = public_port.parse().expect("Invalid public port");
builder = builder.with_port(public_port);
}
println!(
"Test DNS stamp to reach [{}] over DoH: [{}]\n",
hostname,
builder.serialize().unwrap()
);
let mut builder =
dnsstamps::ODoHTargetBuilder::new(hostname.to_string(), globals.path.to_string());
if let Some(public_port) = matches.get_one::<String>("public_port") {
let public_port = public_port.parse().expect("Invalid public port");
builder = builder.with_port(public_port);
}
println!(
"Test DNS stamp to reach [{}] over Oblivious DoH: [{}]\n",
hostname,
builder.serialize().unwrap()
);
println!("Check out https://dnscrypt.info/stamps/ to compute the actual stamps.\n")
} }
_ => { if let Some(public_port) = matches.value_of("public_port") {
println!( let public_port = public_port.parse().expect("Invalid public port");
builder = builder.with_port(public_port);
}
println!(
"Test DNS stamp to reach [{}] over DoH: [{}]\n",
hostname,
builder.serialize().unwrap()
);
let mut builder =
dnsstamps::ODoHTargetBuilder::new(hostname.to_string(), globals.path.to_string());
if let Some(public_port) = matches.value_of("public_port") {
let public_port = public_port.parse().expect("Invalid public port");
builder = builder.with_port(public_port);
}
println!(
"Test DNS stamp to reach [{}] over Oblivious DoH: [{}]\n",
hostname,
builder.serialize().unwrap()
);
println!("Check out https://dnscrypt.info/stamps/ to compute the actual stamps.\n")
} else {
println!(
"Please provide a fully qualified hostname (-H <hostname> command-line option) to get \ "Please provide a fully qualified hostname (-H <hostname> command-line option) to get \
test DNS stamps for your server.\n" test DNS stamps for your server.\n"
); );
}
} }
} }

View file

@ -1,13 +1,13 @@
[package] [package]
name = "libdoh" name = "libdoh"
version = "0.9.11" version = "0.9.4"
authors = ["Frank Denis <github@pureftpd.org>"] authors = ["Frank Denis <github@pureftpd.org>"]
description = "DoH and Oblivious DoH library for the rust-doh app" description = "DoH and Oblivious DoH library for the rust-doh app"
keywords = ["dns", "https", "doh", "odoh", "proxy"] keywords = ["dns","https","doh","odoh","proxy"]
license = "MIT" license = "MIT"
homepage = "https://github.com/jedisct1/rust-doh" homepage = "https://github.com/jedisct1/rust-doh"
repository = "https://github.com/jedisct1/rust-doh" repository = "https://github.com/jedisct1/rust-doh"
categories = ["asynchronous", "network-programming", "command-line-utilities"] categories = ["asynchronous", "network-programming","command-line-utilities"]
edition = "2018" edition = "2018"
[features] [features]
@ -15,31 +15,19 @@ default = ["tls"]
tls = ["tokio-rustls"] tls = ["tokio-rustls"]
[dependencies] [dependencies]
anyhow = "1.0.97" anyhow = "1.0.57"
arc-swap = "1.7.1" arc-swap = "1.5.0"
base64 = "0.22.1" base64 = "0.13.0"
byteorder = "1.5.0" byteorder = "1.4.3"
bytes = "1.10.1" bytes = "1.1.0"
futures = "0.3.31" futures = "0.3.21"
hyper = { version = "^0.14.32", default-features = false, features = [ hpke = "0.5.1"
"server", hyper = { version = "0.14.19", default-features = false, features = ["server", "http1", "http2", "stream"] }
"http1", odoh-rs = "1.0.0-alpha.1"
"http2", rand = "0.8.5"
"stream", tokio = { version = "1.19.1", features = ["net", "rt-multi-thread", "parking_lot", "time", "sync"] }
"runtime", tokio-rustls = { version = "0.23.4", features = ["early-data"], optional = true }
] } rustls-pemfile = "1.0.0"
odoh-rs = "1.0.3"
rand = "^0.8.5"
tokio = { version = "1.44.1", features = [
"net",
"rt-multi-thread",
"time",
"sync",
] }
tokio-rustls = { version = "^0.24.1", features = [
"early-data",
], optional = true }
rustls-pemfile = "^1.0.4"
[profile.release] [profile.release]
codegen-units = 1 codegen-units = 1

View file

@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2018-2025 Frank Denis Copyright (c) 2018-2022 Frank Denis
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View file

@ -180,7 +180,7 @@ fn add_edns_section(packet: &mut Vec<u8>, max_payload_size: u16) -> Result<(), E
"Packet would be too large to add a new record" "Packet would be too large to add a new record"
); );
arcount_inc(packet)?; arcount_inc(packet)?;
packet.extend(opt_rr); packet.extend(&opt_rr);
Ok(()) Ok(())
} }

View file

@ -27,9 +27,9 @@ impl std::fmt::Display for DoHError {
DoHError::UpstreamIssue => write!(fmt, "Upstream error"), DoHError::UpstreamIssue => write!(fmt, "Upstream error"),
DoHError::UpstreamTimeout => write!(fmt, "Upstream timeout"), DoHError::UpstreamTimeout => write!(fmt, "Upstream timeout"),
DoHError::StaleKey => write!(fmt, "Stale key material"), DoHError::StaleKey => write!(fmt, "Stale key material"),
DoHError::Hyper(e) => write!(fmt, "HTTP error: {e}"), DoHError::Hyper(e) => write!(fmt, "HTTP error: {}", e),
DoHError::Io(e) => write!(fmt, "IO error: {e}"), DoHError::Io(e) => write!(fmt, "IO error: {}", e),
DoHError::ODoHConfigError(e) => write!(fmt, "ODoH config error: {e}"), DoHError::ODoHConfigError(e) => write!(fmt, "ODoH config error: {}", e),
DoHError::TooManyTcpSessions => write!(fmt, "Too many TCP sessions"), DoHError::TooManyTcpSessions => write!(fmt, "Too many TCP sessions"),
} }
} }

View file

@ -11,7 +11,6 @@ use std::pin::Pin;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use base64::engine::Engine;
use byteorder::{BigEndian, ByteOrder}; use byteorder::{BigEndian, ByteOrder};
use futures::prelude::*; use futures::prelude::*;
use futures::task::{Context, Poll}; use futures::task::{Context, Poll};
@ -30,14 +29,6 @@ pub mod reexports {
pub use tokio; pub use tokio;
} }
const BASE64_URL_SAFE_NO_PAD: base64::engine::GeneralPurpose =
base64::engine::general_purpose::GeneralPurpose::new(
&base64::alphabet::URL_SAFE,
base64::engine::general_purpose::GeneralPurposeConfig::new()
.with_encode_padding(false)
.with_decode_padding_mode(base64::engine::DecodePaddingMode::Indifferent),
);
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
struct DnsResponse { struct DnsResponse {
packet: Vec<u8>, packet: Vec<u8>,
@ -170,9 +161,9 @@ impl DoH {
return None; return None;
} }
} }
let query = match question_str let query = match question_str.and_then(|question_str| {
.and_then(|question_str| BASE64_URL_SAFE_NO_PAD.decode(question_str).ok()) base64::decode_config(question_str, base64::URL_SAFE_NO_PAD).ok()
{ }) {
Some(query) => query, Some(query) => query,
_ => return None, _ => return None,
}; };
@ -257,7 +248,10 @@ impl DoH {
content_types: &[&'static str], content_types: &[&'static str],
) -> Option<&'static str> { ) -> Option<&'static str> {
let accept = headers.get(hyper::header::ACCEPT); let accept = headers.get(hyper::header::ACCEPT);
let accept = accept?; let accept = match accept {
None => return None,
Some(accept) => accept,
};
for part in accept.to_str().unwrap_or("").split(',').map(|s| s.trim()) { for part in accept.to_str().unwrap_or("").split(',').map(|s| s.trim()) {
if let Some(found) = part if let Some(found) = part
.split(';') .split(';')
@ -433,8 +427,8 @@ impl DoH {
.header( .header(
hyper::header::CACHE_CONTROL, hyper::header::CACHE_CONTROL,
format!( format!(
"max-age={ttl}, stale-if-error={STALE_IF_ERROR_SECS}, \ "max-age={}, stale-if-error={}, stale-while-revalidate={}",
stale-while-revalidate={STALE_WHILE_REVALIDATE_SECS}" ttl, STALE_IF_ERROR_SECS, STALE_WHILE_REVALIDATE_SECS
) )
.as_str(), .as_str(),
); );
@ -501,9 +495,9 @@ impl DoH {
self.globals.tls_cert_path.is_some() && self.globals.tls_cert_key_path.is_some(); self.globals.tls_cert_path.is_some() && self.globals.tls_cert_key_path.is_some();
} }
if tls_enabled { if tls_enabled {
println!("Listening on https://{listen_address}{path}"); println!("Listening on https://{}{}", listen_address, path);
} else { } else {
println!("Listening on http://{listen_address}{path}"); println!("Listening on http://{}{}", listen_address, path);
} }
let mut server = Http::new(); let mut server = Http::new();

View file

@ -77,7 +77,7 @@ impl ODoHPublicKey {
impl ODoHQueryContext { impl ODoHQueryContext {
pub fn encrypt_response(self, response_body: Vec<u8>) -> Result<Vec<u8>, DoHError> { pub fn encrypt_response(self, response_body: Vec<u8>) -> Result<Vec<u8>, DoHError> {
let response_nonce = rand::thread_rng().r#gen::<ResponseNonce>(); let response_nonce = rand::thread_rng().gen::<ResponseNonce>();
let response_body_ = ObliviousDoHMessagePlaintext::new(response_body, 0); let response_body_ = ObliviousDoHMessagePlaintext::new(response_body, 0);
let encrypted_response = odoh_rs::encrypt_response( let encrypted_response = odoh_rs::encrypt_response(
&self.query, &self.query,
@ -115,7 +115,7 @@ impl ODoHRotator {
Ok(key) => { Ok(key) => {
current_key.store(Arc::new(key)); current_key.store(Arc::new(key));
} }
Err(e) => eprintln!("ODoH key rotation error: {e}"), Err(e) => eprintln!("ODoH key rotation error: {}", e),
}; };
} }
}); });

View file

@ -29,7 +29,10 @@ where
let mut reader = BufReader::new(File::open(certs_path).map_err(|e| { let mut reader = BufReader::new(File::open(certs_path).map_err(|e| {
io::Error::new( io::Error::new(
e.kind(), e.kind(),
format!("Unable to load the certificates [{certs_path_str}]: {e}"), format!(
"Unable to load the certificates [{}]: {}",
certs_path_str, e
),
) )
})?); })?);
rustls_pemfile::certs(&mut reader).map_err(|_| { rustls_pemfile::certs(&mut reader).map_err(|_| {
@ -50,7 +53,10 @@ where
.map_err(|e| { .map_err(|e| {
io::Error::new( io::Error::new(
e.kind(), e.kind(),
format!("Unable to load the certificate keys [{certs_keys_path_str}]: {e}"), format!(
"Unable to load the certificate keys [{}]: {}",
certs_keys_path_str, e
),
) )
})? })?
.read_to_end(&mut encoded_keys)?; .read_to_end(&mut encoded_keys)?;
@ -87,9 +93,12 @@ where
let server_config_builder = ServerConfig::builder() let server_config_builder = ServerConfig::builder()
.with_safe_defaults() .with_safe_defaults()
.with_no_client_auth(); .with_no_client_auth();
match server_config_builder.with_single_cert(certs.clone(), certs_key) { if let Ok(found_config) =
Ok(found_config) => Some(found_config), server_config_builder.with_single_cert(certs.clone(), certs_key)
_ => None, {
Some(found_config)
} else {
None
} }
}) })
.ok_or_else(|| { .ok_or_else(|| {
@ -154,12 +163,12 @@ impl DoH {
break; break;
} }
} }
Err(e) => eprintln!("TLS certificates error: {e}"), Err(e) => eprintln!("TLS certificates error: {}", e),
} }
tokio::time::sleep(Duration::from_secs(CERTS_WATCH_DELAY_SECS.into())).await; tokio::time::sleep(Duration::from_secs(CERTS_WATCH_DELAY_SECS.into())).await;
} }
Ok::<_, DoHError>(()) Ok::<_, DoHError>(())
}; };
join!(https_service, cert_service).0 return join!(https_service, cert_service).0;
} }
} }

View file

@ -2,23 +2,25 @@
use std::net::{SocketAddr, ToSocketAddrs}; use std::net::{SocketAddr, ToSocketAddrs};
pub(crate) fn verify_sock_addr(arg_val: &str) -> Result<String, String> { pub(crate) fn verify_sock_addr(arg_val: &str) -> Result<(), String> {
match arg_val.parse::<SocketAddr>() { match arg_val.parse::<SocketAddr>() {
Ok(_addr) => Ok(arg_val.to_string()), Ok(_addr) => Ok(()),
Err(_) => Err(format!( Err(_) => Err(format!(
"Could not parse \"{arg_val}\" as a valid socket address (with port)." "Could not parse \"{}\" as a valid socket address (with port).",
arg_val
)), )),
} }
} }
pub(crate) fn verify_remote_server(arg_val: &str) -> Result<String, String> { pub(crate) fn verify_remote_server(arg_val: &str) -> Result<(), String> {
match arg_val.to_socket_addrs() { match arg_val.to_socket_addrs() {
Ok(mut addr_iter) => match addr_iter.next() { Ok(mut addr_iter) => match addr_iter.next() {
Some(_) => Ok(arg_val.to_string()), Some(_) => Ok(()),
None => Err(format!( None => Err(format!(
"Could not parse \"{arg_val}\" as a valid remote uri" "Could not parse \"{}\" as a valid remote uri",
arg_val
)), )),
}, },
Err(err) => Err(format!("{err}")), Err(err) => Err(format!("{}", err)),
} }
} }