Compare commits

...

185 commits

Author SHA1 Message Date
Frank Denis
f0242354d3 Update deps 2025-03-20 00:43:38 +01:00
Frank Denis
25fa6946e6 tar cJpf -> tar cjpf in order to build bz2 archives
Fixes #103
2025-03-20 00:37:34 +01:00
Frank Denis
2254632d33 Update deps 2025-02-20 20:37:23 +01:00
Frank Denis
672d1a11f1 2025 2025-02-20 20:33:01 +01:00
Frank Denis
9e4a931bce Nits 2025-02-20 20:32:42 +01:00
Frank Denis
40b0b02972 Add issues.yml 2024-12-31 14:54:55 +01:00
Frank Denis
bf443c33b9 Switch to mlugg/setup-zig@v1 2024-11-04 00:11:49 +01:00
Frank Denis
1a0a0566c4 Back to Zig 0.10.1 2024-07-03 14:38:16 +02:00
Frank Denis
890a74276f Downgrade to Zig 0.12.0 2024-07-03 14:26:44 +02:00
Frank Denis
34f614e938 0.13 -> 0.13.0 2024-07-03 14:17:17 +02:00
Frank Denis
d6635eebb7 up 2024-07-03 14:16:01 +02:00
Frank Denis
c79501aea3 Use Zig 0.13 2024-07-03 14:03:51 +02:00
Frank Denis
e73964fa1d Update deps 2024-07-03 13:52:56 +02:00
Frank Denis
bafbdc0926 Try creating RPM packages
Fixes #98
2024-07-03 13:27:29 +02:00
Frank Denis
30a55a0f2f Merge branch 'master' of github.com:jedisct1/rust-doh
* 'master' of github.com:jedisct1/rust-doh:
  Add Let's Encrypt R10
  Update common hashes
2024-07-03 12:33:38 +02:00
Frank Denis
7bb8293c28 package.metadata.generate-rpm 2024-07-03 12:33:29 +02:00
Frank Denis
a6517472d5
Merge pull request #97 from demarcush/patch-1
Update common hashes
2024-07-03 00:11:22 +02:00
demarcush
3511672d49
Add Let's Encrypt R10 2024-07-02 20:47:53 +00:00
demarcush
bd85572368
Update common hashes 2024-05-14 03:44:17 +00:00
Frank Denis
02b3a67a00 Update hyper to 0.14.28 2024-05-06 12:22:21 +02:00
Frank Denis
66c66c7a28 Update mimalloc 2024-05-05 18:01:19 +02:00
Frank Denis
1165fab90c Update a few deps 2024-03-06 18:25:38 +01:00
Frank Denis
c92308ccbb Update deps 2023-09-02 00:20:06 +02:00
Frank Denis
78c47830ff Update deps 2023-07-15 21:18:46 +02:00
Frank Denis
9e2853da86 Update deps 2023-05-03 17:35:23 +02:00
Frank Denis
e5f6f2a5d6 Bump 2023-04-14 12:44:40 +02:00
Frank Denis
e8df0458ac Bump hyper. Again. 2023-04-14 12:38:08 +02:00
Frank Denis
19040f1e88 Nits 2023-04-14 09:45:20 +02:00
Frank Denis
6f9f63e754 Update deps, especially hyper 2023-04-13 17:13:03 +02:00
Frank Denis
678bd04bed Update deps 2023-04-13 17:12:29 +02:00
Frank Denis
ffa0828515 Update tokio 2023-03-02 19:05:11 +01:00
Frank Denis
6580f6ffb5 Fix CI 2023-02-19 21:50:08 +01:00
Frank Denis
f64770bdd7 Install zig 0.10.1 2023-02-19 21:44:31 +01:00
Frank Denis
18297228c7 Bump 2023-02-19 21:10:14 +01:00
Frank Denis
908e7d64db Update base64 2023-02-19 21:05:34 +01:00
Frank Denis
c54b3303fc Update base64, accept padding on decoding 2023-02-19 21:02:28 +01:00
Frank Denis
1c5c83803a Remove optional requirement 2023-02-09 17:21:29 +01:00
Frank Denis
1386b7d13a Mention HTTP/3 2023-02-09 17:18:27 +01:00
Frank Denis
920d31b502 Update relayd URL 2023-02-09 17:16:34 +01:00
Frank Denis
651224d900 Format 2023-02-09 16:58:58 +01:00
Frank Denis
b5d525abcd Update deps 2023-02-09 16:58:31 +01:00
Frank Denis
11d8f4cb31 Add a logo 2023-02-01 20:28:37 +01:00
Frank Denis
47330ebcad Update deps 2023-01-31 00:00:21 +01:00
Frank Denis
d5fd8231ff Sorry, Debian-aarch64 users 2023-01-12 01:38:53 +01:00
Frank Denis
8cba04338e Debian again... 2023-01-12 01:27:43 +01:00
Frank Denis
85280f4525 Try to build Debian packages 2023-01-12 01:25:34 +01:00
Frank Denis
1c28a28b78 Bump 2023-01-12 01:16:47 +01:00
Frank Denis
fbf82068d1 Only retrieve clap arguments as String, don't expect it to be smart
Fixes #94
2023-01-12 01:14:33 +01:00
Frank Denis
c9e084b2b4 Title 2023-01-09 21:34:33 +01:00
Frank Denis
37dc663b6e Simpler incantation to build Debian packages 2023-01-03 11:53:35 +01:00
Frank Denis
b81cc3e5d2 Remove glibc dependency for the Debian package
Fixes #93
2023-01-03 11:06:31 +01:00
Frank Denis
3f1bbcd8dc link-args=-s -> -C strip=symbols 2023-01-03 11:01:39 +01:00
Frank Denis
e92fddb165 Use zip -9 -r 2023-01-02 22:29:51 +01:00
Frank Denis
d573a20c86 Use v3 for the checkout action 2023-01-02 21:48:23 +01:00
Frank Denis
f5c07a205b Ah 2023-01-02 21:15:00 +01:00
Frank Denis
d277c0a806 Update upload-release actions 2023-01-02 21:14:00 +01:00
Frank Denis
fc61c79a9f Windows requires a .exe suffix 2023-01-02 21:00:07 +01:00
Frank Denis
a92f4a77ae Rust requires every single target to be installed individually 2023-01-02 20:47:01 +01:00
Frank Denis
a373957045 Install Zig 2023-01-02 20:37:09 +01:00
Frank Denis
6f5213838b Actions require unique names 2023-01-02 20:21:24 +01:00
Frank Denis
eede3f4ab3 2023 2023-01-02 20:19:56 +01:00
Frank Denis
fdcc797fcb Bump 2023-01-02 20:19:04 +01:00
Frank Denis
3e59f42558 Bump 2023-01-02 20:17:50 +01:00
Frank Denis
a1fc5bbffc CI: try to package a build for Windows 2023-01-02 20:17:20 +01:00
Frank Denis
4b887d6705 Update arc-swap 2023-01-02 20:05:19 +01:00
Frank Denis
6818fbe8a1 Update to clap 4
The new API is confusing and very error-prone, with errors being
thrown at runtime rather than compile-time.

Hopefully nothing got broken in the process.
2022-12-25 12:37:48 +01:00
Frank Denis
c82fb339ed Update deps 2022-12-25 11:23:13 +01:00
Frank Denis
06a3fa0499 Clarify what -H and -g do
Fixes #68
2022-12-25 10:21:19 +01:00
Frank Denis
8b9f9377b3 Update deps 2022-10-11 22:25:59 +02:00
Frank Denis
767b3e17b1 Update odoh-rs to the final version 2022-10-11 22:25:29 +02:00
Frank Denis
a60ced8782 Update deps 2022-09-21 12:21:00 +02:00
Frank Denis
25d1261730 Remove unneeded reference 2022-09-15 12:41:40 +02:00
Frank Denis
ff62b6a24b Disable the parking_lot feature in tokio.
Mutexes from the standard library have improved in recent Rust
versions. On Linux only, though.
2022-07-02 17:44:42 +02:00
Frank Denis
fd65582aa6 Nits 2022-06-24 23:49:04 +02:00
Frank Denis
d12b9deb35 Bump 2022-06-06 09:03:15 +02:00
Frank Denis
965bca7fde Remove Travis 2022-06-06 09:02:14 +02:00
Frank Denis
5b11bc520e Format 2022-06-06 09:01:36 +02:00
Frank Denis
ab4c27ef86 Update deps 2022-06-06 09:00:51 +02:00
Frank Denis
db9c8634e3 Replace jemalloc with mimalloc 2022-05-25 13:39:09 +02:00
Frank Denis
533c29ec1e Update rustls-pemfile 2022-05-14 13:02:12 +02:00
Frank Denis
e27ab7dee9 Format 2022-05-14 01:33:03 +02:00
Frank Denis
511b0b4388 Reorder 2022-05-07 15:38:04 +02:00
Frank Denis
74939bdc6c Bump 2022-03-05 16:04:30 +01:00
Frank Denis
054beb390c Update deps 2022-03-05 16:04:01 +01:00
Frank Denis
16ab626cc2 Update rustls-pemfile 2022-02-11 20:32:46 +01:00
Frank Denis
115938f90f Y++ 2022-01-01 10:29:39 +01:00
Frank Denis
c6c9d64681 Update clap to v3 2022-01-01 10:29:15 +01:00
Frank Denis
d586c50019 Uglify 2021-10-29 20:23:41 +02:00
Frank Denis
46be8b9662 Painful update of rustls 2021-10-29 20:13:47 +02:00
Frank Denis
e6fe51647d Bump 2021-09-07 13:25:16 +02:00
Frank Denis
379a7abc7e Add CORS header (only for DoH), for web browsers 2021-09-07 13:22:55 +02:00
Frank Denis
5770f9da33 Remove retired Let's Encrypt certificate 2021-09-07 12:08:28 +02:00
Frank Denis
b77f10cd9d Bump 2021-09-06 19:28:30 +02:00
Frank Denis
63eac2a622 Add the ability to specify an alternative port number 2021-09-06 19:24:19 +02:00
Frank Denis
a727c4b9fa Keep the LICENSE file, in addition to its name in Cargo 2021-08-20 01:13:44 +02:00
Frank Denis
2918061786 license -> license-file 2021-08-20 01:12:14 +02:00
Frank Denis
7657d5a2b2 Require tokio 1.10 2021-08-13 22:21:28 +02:00
Frank Denis
f9d2a0fc94 Bump 2021-06-12 14:01:52 +02:00
Frank Denis
4f1e0f2abe Print ODoH DNS stamps 2021-06-12 14:00:24 +02:00
Frank Denis
a988eb42a2 Properly use the odoh-rs API 2021-06-12 13:32:38 +02:00
Frank Denis
a19c523cf2 Nits 2021-06-12 10:46:38 +02:00
Frank Denis
b637bb1ec9 Downgrade hpke 2021-06-12 10:33:51 +02:00
Frank Denis
f4a1dee971 Update odoh-rs
Same thing, just more complicated to use
2021-06-12 10:28:27 +02:00
Frank Denis
f4cc9bb0f9 Merge branch 'master' of github.com:jedisct1/rust-doh
* 'master' of github.com:jedisct1/rust-doh:
  Add retries over TCP
2021-06-10 22:28:33 +02:00
Frank Denis
485afd5976 Add retries over TCP
Fixes #62
2021-06-09 10:50:28 +02:00
Frank Denis
0f268055b7 up 2021-06-07 14:48:03 +02:00
Frank Denis
324bbcde60 Add Cargo keyword 2021-06-07 14:47:36 +02:00
Frank Denis
474701ec1e Up 2021-06-07 14:45:21 +02:00
Frank Denis
3b77ff2e34 Typo 2021-06-07 14:42:47 +02:00
Frank Denis
ece8a445cb DOcument --allow-odoh-post 2021-06-07 14:42:21 +02:00
Frank Denis
eebd6b8356 Add a note on keys 2021-06-06 22:52:54 +02:00
Frank Denis
fd1081e0b0 up 2021-06-06 22:50:50 +02:00
Frank Denis
5c369fc610 Don't use a 0 TTL for ODoH configs 2021-06-06 17:46:18 +02:00
Frank Denis
3bc0d22f69 Add --allow-odoh-post 2021-06-06 17:41:48 +02:00
Frank Denis
a746e2822a Reject large query strings 2021-06-06 17:36:04 +02:00
Frank Denis
9be0d1ed74 Check Accept: if there is no Content-Type: 2021-06-06 17:31:15 +02:00
Frank Denis
62744d5390 Handle ODoH queries using GET 2021-06-06 16:11:45 +02:00
Frank Denis
21fc7441b3 Shuffle 2021-06-06 15:58:58 +02:00
Frank Denis
6edccca03e Factor DoH serving code 2021-06-06 15:53:40 +02:00
Frank Denis
90c30c8905 Avoid unwrap() 2021-06-05 17:24:50 +02:00
Frank Denis
1389c82872 Move file comment up 2021-06-05 17:11:09 +02:00
Frank Denis
9445e95014 Update deps, format 2021-05-14 23:36:37 +02:00
Frank Denis
338d6436c0 Let's Encrypt retired X3, introduced E1 2021-05-12 16:06:39 +02:00
Frank Denis
4e54008b10
Merge pull request #59 from chris-wood/caw/add-odoh
Add Oblivious DoH target support as a default feature.
2021-05-11 22:49:57 +02:00
Christopher Wood
822d3d9a51 Implement ODoH key rotation. 2021-05-01 07:56:10 -07:00
Frank Denis
4cb88417ba
Merge pull request #60 from jedisct1/dependabot/add-v2-config-file
Upgrade to GitHub-native Dependabot
2021-04-30 16:05:35 +02:00
dependabot-preview[bot]
e34f60e2eb
Upgrade to GitHub-native Dependabot 2021-04-29 20:50:32 +00:00
Christopher Wood
25a9c285db Remove dead comment. 2021-04-26 13:08:54 -07:00
Christopher Wood
05a60818ce Add Oblivious DoH target support as a default feature.
This change adds Oblivious DoH (ODoH) target support to doh-server.
This change does include support for ODoH key rotation or algorithm
agility. ODoH is a default feature and not conditionally compiled
out.
2021-04-26 13:05:52 -07:00
Frank Denis
42211d4f5e Sync usage 2021-03-06 22:22:34 +01:00
Frank Denis
0403de66f1 Compute a preliminary stamp 2021-03-06 22:21:19 +01:00
Frank Denis
00cc43e2bb Clarify 2021-03-06 22:02:28 +01:00
Frank Denis
63d672895f Clarify 2021-03-06 21:46:39 +01:00
Frank Denis
4de5310430 fullchain.cer works 2021-03-06 21:45:33 +01:00
Frank Denis
4d685d8948 LE ECDSA certs don't play well with (at least Go) 2021-03-06 21:41:39 +01:00
Frank Denis
6f40f792e3 Clarify more 2021-03-06 21:32:59 +01:00
Frank Denis
eb8ea3dc84 Clarify 2021-03-06 21:32:01 +01:00
Frank Denis
ecacd6eca9 Add the command to convert SEC1 to PKCS8 2021-03-06 21:14:04 +01:00
Frank Denis
623328d37f Mention that certs must be RSA 2021-03-06 21:05:35 +01:00
Frank Denis
0404b8f8a7 space 2021-02-16 01:42:09 +01:00
Frank Denis
de0e8a39c3 Nits 2021-02-16 01:36:49 +01:00
Frank Denis
30abc95e48 CI: add a release task 2021-02-16 01:23:41 +01:00
Frank Denis
03581234b5 CI simplification 2021-02-16 00:54:06 +01:00
Frank Denis
dbc5dc702f Documentation updates 2021-02-16 00:13:44 +01:00
Frank Denis
ba663ef4d9 Reload certs every 10 sec 2021-02-16 00:02:42 +01:00
Frank Denis
518341df37 Reorganize a bit 2021-02-16 00:00:02 +01:00
Frank Denis
39124df9fc Reexport tokio 2021-02-15 23:46:24 +01:00
Frank Denis
b4d4eaae50 Limit the number of concurrent streams per client 2021-02-15 23:43:36 +01:00
Frank Denis
a2f342379e Automatically update the certificates without restarting 2021-02-15 23:31:45 +01:00
Frank Denis
0a99d0d212 Bump 2021-02-15 00:42:08 +01:00
Frank Denis
4326f1afa7 Set ALPN config to advertise HTTP/2 2021-02-15 00:41:16 +01:00
Frank Denis
c6c5c71458 Enable support for early data 2021-02-15 00:11:52 +01:00
Frank Denis
2179ceae67 Rewind cursor 2021-02-15 00:06:50 +01:00
Frank Denis
9f092224cd Parse PKCS8 and RSA keys separately 2021-02-15 00:00:57 +01:00
Frank Denis
c3f724118c Properly parse the -I and -i options (key file and cert file) 2021-02-14 23:48:24 +01:00
Frank Denis
dbe14da43e Disable default features for libdoh
Fixes #56
2021-02-12 21:57:21 +01:00
Frank Denis
d7fa144671 Bump 2021-02-12 19:32:41 +01:00
Frank Denis
2e95a50f9f Update deps; tokio now require the "time" feature 2021-02-12 19:23:08 +01:00
Frank Denis
b281555860 compare_and_swap() was too simple and has been deprecated 2021-02-12 19:20:01 +01:00
Frank Denis
741d28557d Update deps 2021-01-27 20:21:17 +01:00
Frank Denis
c176eeff5f Shrink tokio features 2021-01-03 14:26:29 +01:00
Frank Denis
226d8fe52a Update deps; fix for new tokio 2021-01-03 14:19:26 +01:00
Frank Denis
b544ca3daa year++ 2021-01-01 00:03:32 +01:00
Frank Denis
3ee8477ffa Add a link to rustup 2020-12-12 23:29:05 +01:00
Frank Denis
548adf7810 Remove links to precompiled packages 2020-12-12 23:28:31 +01:00
Frank Denis
1174d2c5b7 Update some base versions 2020-12-12 23:20:49 +01:00
Frank Denis
2b706345a4 Add more Let's Encrypt certificates hashes 2020-11-29 20:43:25 +01:00
Frank Denis
62226b12e4 Update deps 2020-11-23 22:57:55 +01:00
Frank Denis
5fabdbf2d1 Default tls_cert_key_path to tls_cert_path 2020-10-12 12:39:17 +02:00
Frank Denis
cf8ba631e6 Update deps 2020-08-31 22:07:37 +02:00
Frank Denis
d535650ed4 Update server list URL 2020-08-31 22:06:01 +02:00
Frank Denis
bf589911de STALE_IF_ERROR_SECS can be bumped up 2020-07-09 21:10:31 +02:00
Frank Denis
64cd83a440 Don't use the TTL for stale-if-error and stale-while-revalidate
Use constant, reasonable values instead
2020-07-09 21:08:34 +02:00
Frank Denis
b8c8dacb5d Remove max-stale 2020-07-09 20:26:07 +02:00
Frank Denis
1a0acbea44 Add stale-if-error and max-stale 2020-07-09 20:24:58 +02:00
Frank Denis
1812880562 Add stale-while-revalidate to Cache-Control 2020-07-07 14:36:04 +02:00
Frank Denis
5058aeaf07 Remove SARIF upload 2020-06-11 10:46:34 +02:00
Frank Denis
61b5c694c6
Add ShiftLeft scan 2020-06-11 10:43:34 +02:00
Frank Denis
e204f18609 Update links to precompiled binaries 2020-05-19 10:12:26 +02:00
Frank Denis
cf246f929c up 2020-05-19 10:11:34 +02:00
Frank Denis
8e4d66f68a Update deps, especially for tokio-rustls 2020-05-19 10:03:13 +02:00
Frank Denis
8d72413eaf Minibump 2020-04-14 19:25:01 +02:00
Frank Denis
f2215aa52f 'hyper::server::conn::Http::<E>::keep_alive': renamed to http1_keep_alive 2020-04-14 19:23:38 +02:00
Frank Denis
87954b5012 Bump minimal dependency versions 2020-04-14 19:22:49 +02:00
Frank Denis
868c41b9b8 Update minimal tokio version
0.2.13 seems to be stable
2020-03-19 12:07:15 +01:00
24 changed files with 1230 additions and 349 deletions

8
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,8 @@
version: 2
updates:
- package-ecosystem: cargo
directory: "/"
schedule:
interval: daily
time: "04:00"
open-pull-requests-limit: 10

17
.github/workflows/issues.yml vendored Normal file
View file

@ -0,0 +1,17 @@
name: Close inactive issues
on:
schedule:
- cron: "30 1 * * *"
jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v9
with:
stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
repo-token: ${{ secrets.GITHUB_TOKEN }}

164
.github/workflows/release.yml vendored Normal file
View file

@ -0,0 +1,164 @@
name: Release
on:
push:
tags:
- "*"
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Get the version
id: get_version
run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/}
- uses: actions/checkout@v3
- uses: mlugg/setup-zig@v1
with:
version: 0.10.1
- uses: hecrj/setup-rust-action@master
with:
rust-version: stable
- name: Check Cargo availability
run: cargo --version
- name: Check Rustup default toolchain
run: rustup default | grep stable
- name: Install cargo-deb
run: cargo install cargo-deb
- name: Install cargo-generate-rpm
run: cargo install cargo-generate-rpm
- name: Install cargo-zigbuild
run: cargo install cargo-zigbuild
- name: Release build Linux-x86-64
run: |
rustup target add x86_64-unknown-linux-musl
env RUSTFLAGS="-C strip=symbols" cargo zigbuild --release --target x86_64-unknown-linux-musl
mkdir doh-proxy
mv target/x86_64-unknown-linux-musl/release/doh-proxy doh-proxy/
cp README.md localhost.pem doh-proxy/
tar cjpf doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2 doh-proxy
rm -fr doh-proxy
- name: Release build Linux-aarch64
run: |
rustup target add aarch64-unknown-linux-musl
env RUSTFLAGS="-C strip=symbols" cargo zigbuild --release --target aarch64-unknown-linux-musl
mkdir doh-proxy
mv target/aarch64-unknown-linux-musl/release/doh-proxy doh-proxy/
cp README.md localhost.pem doh-proxy/
tar cjpf doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-aarch64.tar.bz2 doh-proxy
rm -fr doh-proxy
- name: Release build Windows-x86_64
run: |
rustup target add x86_64-pc-windows-gnu
env RUSTFLAGS="-C strip=symbols" cargo zigbuild --release --target x86_64-pc-windows-gnu
mkdir doh-proxy
mv target/x86_64-pc-windows-gnu/release/doh-proxy.exe doh-proxy/
cp README.md localhost.pem doh-proxy/
zip -9 -r doh-proxy_${{ steps.get_version.outputs.VERSION }}_windows-x86_64.zip doh-proxy
rm -fr doh-proxy
- name: Debian packages
run: |
rustup target add x86_64-unknown-linux-musl
env RUSTFLAGS="-C strip=symbols" cargo deb --no-strip --cargo-build=zigbuild --target=x86_64-unknown-linux-musl
rustup target add aarch64-unknown-linux-musl
env RUSTFLAGS="-C strip=symbols" cargo deb --no-strip --cargo-build=zigbuild --target=aarch64-unknown-linux-musl
- name: RPM packages
run: |
rustup target add x86_64-unknown-linux-gnu
env RUSTFLAGS="-C strip=symbols" cargo-zigbuild build --target=x86_64-unknown-linux-gnu.2.17 --release
mv target/x86_64-unknown-linux-musl/release/doh-proxy target/release/
cargo generate-rpm --target x86_64-unknown-linux-gnu
rustup target add aarch64-unknown-linux-gnu
env RUSTFLAGS="-C strip=symbols" cargo-zigbuild build --target=aarch64-unknown-linux-gnu.2.17 --release
cargo generate-rpm --target aarch64-unknown-linux-gnu
- name: Create release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Release ${{ github.ref }}
draft: true
prerelease: false
- name: Upload Debian package for x86_64
id: upload-release-asset-debian-x86_64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}-1_amd64.deb"
asset_path: "target/x86_64-unknown-linux-musl/debian/doh-proxy_${{ steps.get_version.outputs.VERSION }}-1_amd64.deb"
asset_content_type: application/x-debian-package
- name: Upload RPM package for x86_64
id: upload-release-asset-rpm-x86_64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.x86_64.rpm"
asset_path: "target/x86_64-unknown-linux-gnu/generate-rpm/doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.x86_64.rpm"
asset_content_type: application/x-redhat-package-manager
- name: Upload RPM package for aarch64
id: upload-release-asset-rpm-aarch64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.aarch64.rpm"
asset_path: "target/aarch64-unknown-linux-gnu/generate-rpm/doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.aarch64.rpm"
asset_content_type: application/x-redhat-package-manager
- name: Upload tarball for linux-x86_64
id: upload-release-asset-tarball-linux-x86_64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2"
asset_path: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2"
asset_content_type: application/x-tar
- name: Upload tarball for linux-aarch64
id: upload-release-asset-tarball-linux-aarch64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-aarch64.tar.bz2"
asset_path: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-aarch64.tar.bz2"
asset_content_type: application/x-tar
- name: Upload tarball for windows-x86_64
id: upload-release-asset-tarball-windows-x86_64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_windows-x86_64.zip"
asset_path: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_windows-x86_64.zip"
asset_content_type: application/zip

View file

@ -1,36 +0,0 @@
name: Rust
on: [push]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- uses: hecrj/setup-rust-action@master
with:
rust-version: nightly
- name: Check Cargo availability
run: cargo --version
- name: Check Rustup default toolchain
run: rustup default | grep nightly
- name: Install cargo-deb
run: cargo install --debug cargo-deb
- name: Build
run: |
env RUSTFLAGS="-C link-arg=-s" cargo build --release --features=tls
mkdir doh-proxy
mv target/release/doh-proxy doh-proxy/
cp README.md doh-proxy/
- name: Debian package
run: |
cargo deb
- uses: actions/upload-artifact@master
with:
name: doh-proxy-linux-x86_64
path: doh-proxy
- uses: actions/upload-artifact@master
with:
name: debian
path: target/debian

View file

@ -0,0 +1,17 @@
name: ShiftLeft Scan
on: push
jobs:
Scan-Build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Perform ShiftLeft Scan
uses: ShiftLeftSecurity/scan-action@master
env:
WORKSPACE: ""
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
output: reports

23
.github/workflows/test.yml vendored Normal file
View file

@ -0,0 +1,23 @@
name: Rust
on: [push]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- uses: hecrj/setup-rust-action@master
with:
rust-version: stable
- name: Check Cargo availability
run: cargo --version
- name: Check Rustup default toolchain
run: rustup default | grep stable
- name: Build without default features
run: |
env RUSTFLAGS="-C link-arg=-s" cargo check --no-default-features
- name: Build with default features
run: |
env RUSTFLAGS="-C link-arg=-s" cargo check

View file

@ -1,4 +0,0 @@
language: rust
rust:
- nightly
- stable

View file

@ -1,13 +1,13 @@
[package]
name = "doh-proxy"
version = "0.3.1"
version = "0.9.11"
authors = ["Frank Denis <github@pureftpd.org>"]
description = "A DNS-over-HTTPS (DoH) proxy"
keywords = ["dns","https","doh","proxy"]
description = "A DNS-over-HTTPS (DoH) and ODoH (Oblivious DoH) proxy"
keywords = ["dns", "https", "doh", "odoh", "proxy"]
license = "MIT"
homepage = "https://github.com/jedisct1/rust-doh"
repository = "https://github.com/jedisct1/rust-doh"
categories = ["asynchronous", "network-programming","command-line-utilities"]
categories = ["asynchronous", "network-programming", "command-line-utilities"]
edition = "2018"
readme = "README.md"
@ -16,17 +16,31 @@ default = ["tls"]
tls = ["libdoh/tls"]
[dependencies]
libdoh = { path = "src/libdoh", version = "0.3" }
clap = "2"
jemallocator = "0"
tokio = { version = "0.2.11", features = ["rt-threaded", "time", "tcp", "udp", "stream", "parking_lot"] }
libdoh = { path = "src/libdoh", version = "0.9.9", default-features = false }
clap = { version = "4", features = ["std", "cargo", "wrap_help", "string"] }
dnsstamps = "0.1.10"
mimalloc = { version = "0.1.44", default-features = false }
[package.metadata.generate-rpm]
assets = [
{ source = "target/release/doh-proxy", dest = "/usr/bin/doh-proxy", mode = "755" },
{ source = "README.md", dest = "/usr/share/doc/doh-proxy/README.md", mode = "644", doc = true },
]
[package.metadata.deb]
extended-description = """\
A fast and secure DoH (DNS-over-HTTPS) server written in Rust."""
A fast and secure DoH (DNS-over-HTTPS) and ODoH server written in Rust."""
assets = [
["target/release/doh-proxy", "usr/bin/", "755"],
["README.md", "usr/share/doc/doh-proxy/README.md", "644"]
[
"target/release/doh-proxy",
"usr/bin/",
"755",
],
[
"README.md",
"usr/share/doc/doh-proxy/README.md",
"644",
],
]
section = "network"
depends = "$auto"

View file

@ -1,6 +1,6 @@
MIT License
Copyright (c) 2018-2020 Frank Denis
Copyright (c) 2018-2025 Frank Denis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

110
README.md
View file

@ -1,25 +1,26 @@
# doh-proxy
# ![DoH server (and ODoH - Oblivious DoH server)](logo.png)
A fast and secure DoH (DNS-over-HTTPS) server written in Rust.
A fast and secure DoH (DNS-over-HTTPS) and ODoH (Oblivious DoH) server.
`doh-proxy` is written in Rust, and has been battle-tested in production since February 2018. It doesn't do DNS resolution on its own, but can sit in front of any DNS resolver in order to augment it with DoH support.
## Installation
### Precompiled packages
### Option 1: precompiled binaries for Linux
- [Linux x86_64 (tar)](https://github.com/jedisct1/rust-doh/suites/432902756/artifacts/1502827)
- [Linux x86_64 (debian package)](https://github.com/jedisct1/rust-doh/suites/432902756/artifacts/1502826)
Precompiled tarballs and Debian packages for Linux/x86_64 [can be downloaded here](https://github.com/jedisct1/doh-server/releases/latest).
### From source code
### Option 2: from source code
This requires the `rust` compiler to be installed.
This requires the [`rust`](https://rustup.rs) compiler to be installed.
With built-in support for HTTPS (default):
* With built-in support for HTTPS (default):
```sh
cargo install doh-proxy
```
Without built-in support for HTTPS:
* Without built-in support for HTTPS:
```sh
cargo install doh-proxy --no-default-features
@ -32,6 +33,7 @@ USAGE:
doh-proxy [FLAGS] [OPTIONS]
FLAGS:
-O, --allow-odoh-post Allow POST queries over ODoH even if they have been disabed for DoH
-K, --disable-keepalive Disable keepalive
-P, --disable-post Disable POST queries
-h, --help Prints help information
@ -39,50 +41,94 @@ FLAGS:
OPTIONS:
-E, --err-ttl <err_ttl> TTL for errors, in seconds [default: 2]
-H, --hostname <hostname> Host name (not IP address) DoH clients will use to connect
-l, --listen-address <listen_address> Address to listen to [default: 127.0.0.1:3000]
-b, --local-bind-address <local_bind_address> Address to connect from
-c, --max-clients <max_clients> Maximum number of simultaneous clients [default: 512]
-C, --max-concurrent <max_concurrent> Maximum number of concurrent requests per client [default: 16]
-X, --max-ttl <max_ttl> Maximum TTL, in seconds [default: 604800]
-T, --min-ttl <min_ttl> Minimum TTL, in seconds [default: 10]
-p, --path <path> URI path [default: /dns-query]
-g, --public-address <public_address> External IP address DoH clients will connect to
-j, --public-port <public_port> External port DoH clients will connect to, if not 443
-u, --server-address <server_address> Address to connect to [default: 9.9.9.9:53]
-t, --timeout <timeout> Timeout, in seconds [default: 10]
-I, --tls-cert-key-path <tls_cert_key_path>
Path to the PEM-encoded secret keys (only required for built-in TLS)
-i, --tls-cert-path <tls_cert_path> Path to a PEM-encoded identity (only required for built-in TLS)
-i, --tls-cert-path <tls_cert_path>
Path to the PEM/PKCS#8-encoded certificates (only required for built-in TLS)
```
## HTTP/2 termination
Example command-line:
The recommended way to use `doh-proxy` is to use a TLS termination proxy (such as [hitch](https://github.com/varnish/hitch) or [relayd](https://bsd.plumbing/about.html)), a CDN or a web server with proxying abilities as a front-end.
```sh
doh-proxy -H 'doh.example.com' -u 127.0.0.1:53 -g 233.252.0.5
```
Here, `doh.example.com` is the host name (which should match a name included in the TLS certificate), `127.0.0.1:53` is the address of the DNS resolver, and `233.252.0.5` is the public IP address of the DoH server.
## HTTP/2 and HTTP/3 termination
The recommended way to use `doh-proxy` is to use a TLS termination proxy (such as [hitch](https://github.com/varnish/hitch) or [relayd](https://man.openbsd.org/relayd.8)), a CDN or a web server with proxying abilities as a front-end.
That way, the DoH service can be exposed as a virtual host, sharing the same IP addresses as existing websites.
If `doh-proxy` and the HTTP/2 front-end run on the same host, using the HTTP protocol to communicate between both is fine.
If `doh-proxy` and the HTTP/2 (/ HTTP/3) front-end run on the same host, using the HTTP protocol to communicate between both is fine.
If both are on distinct networks, such as when using a CDN, `doh-proxy` can handle HTTPS requests, provided that it was compiled with the `tls` feature.
The certificates and private keys must be encoded in PEM format. They can be stored in the same file.
The certificates and private keys must be encoded in PEM/PKCS#8 format. They can be stored in the same file.
If you are using ECDSA certificates and ECDSA private keys start with `-----BEGIN EC PRIVATE KEY-----` and not `-----BEGIN PRIVATE KEY-----`, convert them to PKCS#8 with (in this example, `example.key` is the original file):
```sh
openssl pkcs8 -topk8 -nocrypt -in example.key -out example.pkcs8.pem
```
In order to enable built-in HTTPS support, add the `--tls-cert-path` option to specify the location of the certificates file, as well as the private keys file using `--tls-cert-key-path`.
Once HTTPS is enabled, HTTP connections will not be accepted.
A sample self-signed certificate [`localhost.pem`](https://github.com/jedisct1/rust-doh/raw/master/localhost.pem) can be used for testing.
A sample self-signed certificate [`localhost.pem`](https://github.com/jedisct1/doh-server/raw/master/localhost.pem) can be used for testing.
The file also includes the private key.
[`acme.sh`](https://github.com/acmesh-official/acme.sh) can be used to create and update TLS certificates using Let's Encrypt and other ACME-compliant providers. If you are using it to create ECDSA keys, see above for converting the secret key into PKCS#8.
The certificates path must be set to the full certificates chain (`fullchain.cer`) and the key path to the secret keys (the `.key` file):
```sh
doh-proxy -i /path/to/fullchain.cer -I /path/to/domain.key ...
```
Once started, `doh-proxy` automatically reloads the certificates as they change; there is no need to restart the server.
If clients are getting the `x509: certificate signed by unknown authority` error, double check that the certificate file is the full chain, not the other `.cer` file.
## Accepting both DNSCrypt and DoH connections on port 443
DNSCrypt is an alternative encrypted DNS protocol that is faster and more lightweight than DoH.
Both DNSCrypt and DoH connections can be accepted on the same TCP port using [Encrypted DNS Server](https://github.com/jedisct1/encrypted-dns-server).
Encrypted DNS Server forwards DoH queries to Nginx or `rust-doh` when a TLS connection is detected, or directly responds to DNSCrypt queries.
Encrypted DNS Server forwards DoH queries to Nginx or `doh-proxy` when a TLS connection is detected, or directly responds to DNSCrypt queries.
It also provides DNS caching, server-side filtering, metrics, and TCP connection reuse in order to mitigate exhaustion attacks.
Unless the front-end is a CDN, an ideal setup is to use `rust-doh` behind `Encrypted DNS Server`.
Unless the front-end is a CDN, an ideal setup is to use `doh-proxy` behind `Encrypted DNS Server`.
## Oblivious DoH (ODoH)
Oblivious DoH is similar to Anonymized DNSCrypt, but for DoH. It requires relays, but also upstream DoH servers that support the protocol.
This proxy supports ODoH termination (not relaying) out of the box.
However, ephemeral keys are currently only stored in memory. In a load-balanced configuration, sticky sessions must be used.
Currently available ODoH relays only use `POST` queries.
So, `POST` queries have been disabled for regular DoH queries, accepting them is required to be compatible with ODoH relays.
This can be achieved with the `--allow-odoh-post` command-line switch.
## Operational recommendations
@ -90,7 +136,7 @@ Unless the front-end is a CDN, an ideal setup is to use `rust-doh` behind `Encry
* When using DoH, DNS stamps should include a resolver IP address in order to remove a dependency on non-encrypted, non-authenticated, easy-to-block resolvers.
* Unlike DNSCrypt where users must explicitly trust a DNS server's public key, the security of DoH relies on traditional public Certificate Authorities. Additional root certificates (required by governments, security software, enterprise gateways) installed on a client immediately make DoH vulnerable to MITM. In order to prevent this, DNS stamps should include the hash of the parent certificate.
* TLS certificates are tied to host names. But domains expire, get reassigned and switch hands all the time. If a domain originally used for a DoH service gets a new, possibly malicious owner, clients still configured to use the service will blindly keep trusting it if the CA is the same. As a mitigation, the CA should sign an intermediate certificate (the only one present in the stamp), itself used to sign the name used by the DoH server. While commercial CAs offer this, Let's Encrypt currently doesn't.
* Make sure that the front-end supports HTTP/2 and TLS 1.3.
* Make sure that the front-end supports at least HTTP/2 and TLS 1.3.
* Internal DoH servers still require TLS certificates. So, if you are planning to deploy an internal server, you need to set up an internal CA, or add self-signed certificates to every single client.
## Example usage with `encrypted-dns-server`
@ -104,10 +150,10 @@ upstream_addr = "127.0.0.1:3000"
## Example usage with `nginx`
In an existing `server`, a `/doh` endpoint can be exposed that way:
In an existing `server`, a `/dns-query` endpoint can be exposed that way:
```text
location /doh {
location /dns-query {
proxy_pass http://127.0.0.1:3000;
}
```
@ -140,16 +186,28 @@ Go back to the online DNS stamp calculator, and copy&paste the hash (in this exa
If you are using Let's Encrypt, the last line is likely to be:
```text
Advertised cert: [CN=Let's Encrypt Authority X3,O=Let's Encrypt,C=US] [3e1a1a0f6c53f3e97a492d57084b5b9807059ee057ab1505876fd83fda3db838]
Advertised cert: [CN=Let's Encrypt Authority R3,O=Let's Encrypt,C=US] [444ebd67bb83f8807b3921e938ac9178b882bd50aadb11231f044cf5f08df7ce]
```
There you have it. Your certificate hash is `3e1a1a0f6c53f3e97a492d57084b5b9807059ee057ab1505876fd83fda3db838`.
There you have it. Your certificate hash is `444ebd67bb83f8807b3921e938ac9178b882bd50aadb11231f044cf5f08df7ce`.
This [Go code snippet](https://gist.github.com/d6cb41742a1ceb54d48cc286f3d5c5fa) can also compute the hash of certificates given a `.der` file.
### Common certificate hashes
* Let's Encrypt E1:
* `cc1060d39c8329b62b6fbc7d0d6df9309869b981e7e6392d5cd8fa408f4d80e6`
* Let's Encrypt R3:
* `444ebd67bb83f8807b3921e938ac9178b882bd50aadb11231f044cf5f08df7ce`
* Let's Encrypt R10:
* `e644ba6963e335fe765cb9976b12b10eb54294b42477764ccb3a3acca3acb2fc`
* ZeroSSL:
* `9a3a34f727deb9bca51003d9ce9c39f8f27dd9c5242901c2bab1a44e635a0219`
## Clients
`doh-proxy` can be used with [dnscrypt-proxy](https://github.com/DNSCrypt/dnscrypt-proxy)
as a client.
`doh-proxy` can be used with [dnscrypt-proxy](https://github.com/DNSCrypt/dnscrypt-proxy) as a client.
`doh-proxy` is currently being used by the `doh.crypto.sx` public DNS resolver.
`doh-proxy` is used in production for the `doh.crypto.sx` public DNS resolver and many others.
An extensive list of public DoH servers can be found here: [public encrypted DNS servers](https://github.com/DNSCrypt/dnscrypt-resolvers/blob/master/v2/public-resolvers.md).
An extensive list of public DoH servers can be found here: [public encrypted DNS servers](https://github.com/DNSCrypt/dnscrypt-resolvers/blob/master/v3/public-resolvers.md).

BIN
logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

View file

@ -1,140 +1,183 @@
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
#[cfg(feature = "tls")]
use std::path::PathBuf;
use std::time::Duration;
use clap::{Arg, ArgAction::SetTrue};
use libdoh::*;
use crate::constants::*;
use clap::Arg;
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
use std::time::Duration;
#[cfg(feature = "tls")]
use std::path::PathBuf;
pub fn parse_opts(globals: &mut Globals) {
use crate::utils::{verify_remote_server, verify_sock_addr};
let max_clients = MAX_CLIENTS.to_string();
let timeout_sec = TIMEOUT_SEC.to_string();
let max_concurrent_streams = MAX_CONCURRENT_STREAMS.to_string();
let min_ttl = MIN_TTL.to_string();
let max_ttl = MAX_TTL.to_string();
let err_ttl = ERR_TTL.to_string();
let _ = include_str!("../Cargo.toml");
let options = app_from_crate!()
let options = command!()
.arg(
Arg::with_name("listen_address")
.short("l")
Arg::new("hostname")
.short('H')
.long("hostname")
.num_args(1)
.help("Host name (not IP address) DoH clients will use to connect"),
)
.arg(
Arg::new("public_address")
.short('g')
.long("public-address")
.num_args(1)
.help("External IP address DoH clients will connect to"),
)
.arg(
Arg::new("public_port")
.short('j')
.long("public-port")
.num_args(1)
.help("External port DoH clients will connect to, if not 443"),
)
.arg(
Arg::new("listen_address")
.short('l')
.long("listen-address")
.takes_value(true)
.num_args(1)
.default_value(LISTEN_ADDRESS)
.validator(verify_sock_addr)
.value_parser(verify_sock_addr)
.help("Address to listen to"),
)
.arg(
Arg::with_name("server_address")
.short("u")
Arg::new("server_address")
.short('u')
.long("server-address")
.takes_value(true)
.num_args(1)
.default_value(SERVER_ADDRESS)
.validator(verify_remote_server)
.value_parser(verify_remote_server)
.help("Address to connect to"),
)
.arg(
Arg::with_name("local_bind_address")
.short("b")
Arg::new("local_bind_address")
.short('b')
.long("local-bind-address")
.takes_value(true)
.validator(verify_sock_addr)
.num_args(1)
.value_parser(verify_sock_addr)
.help("Address to connect from"),
)
.arg(
Arg::with_name("path")
.short("p")
Arg::new("path")
.short('p')
.long("path")
.takes_value(true)
.num_args(1)
.default_value(PATH)
.help("URI path"),
)
.arg(
Arg::with_name("max_clients")
.short("c")
Arg::new("max_clients")
.short('c')
.long("max-clients")
.takes_value(true)
.default_value(&max_clients)
.num_args(1)
.default_value(max_clients)
.help("Maximum number of simultaneous clients"),
)
.arg(
Arg::with_name("timeout")
.short("t")
Arg::new("max_concurrent")
.short('C')
.long("max-concurrent")
.num_args(1)
.default_value(max_concurrent_streams)
.help("Maximum number of concurrent requests per client"),
)
.arg(
Arg::new("timeout")
.short('t')
.long("timeout")
.takes_value(true)
.default_value(&timeout_sec)
.num_args(1)
.default_value(timeout_sec)
.help("Timeout, in seconds"),
)
.arg(
Arg::with_name("min_ttl")
.short("T")
Arg::new("min_ttl")
.short('T')
.long("min-ttl")
.takes_value(true)
.default_value(&min_ttl)
.num_args(1)
.default_value(min_ttl)
.help("Minimum TTL, in seconds"),
)
.arg(
Arg::with_name("max_ttl")
.short("X")
Arg::new("max_ttl")
.short('X')
.long("max-ttl")
.takes_value(true)
.default_value(&max_ttl)
.num_args(1)
.default_value(max_ttl)
.help("Maximum TTL, in seconds"),
)
.arg(
Arg::with_name("err_ttl")
.short("E")
Arg::new("err_ttl")
.short('E')
.long("err-ttl")
.takes_value(true)
.default_value(&err_ttl)
.num_args(1)
.default_value(err_ttl)
.help("TTL for errors, in seconds"),
)
.arg(
Arg::with_name("disable_keepalive")
.short("K")
Arg::new("disable_keepalive")
.short('K')
.action(SetTrue)
.long("disable-keepalive")
.help("Disable keepalive"),
)
.arg(
Arg::with_name("disable_post")
.short("P")
Arg::new("disable_post")
.short('P')
.action(SetTrue)
.long("disable-post")
.help("Disable POST queries"),
)
.arg(
Arg::new("allow_odoh_post")
.short('O')
.action(SetTrue)
.long("allow-odoh-post")
.help("Allow POST queries over ODoH even if they have been disabed for DoH"),
);
#[cfg(feature = "tls")]
let options = options
.arg(
Arg::with_name("tls_cert_path")
.short("i")
Arg::new("tls_cert_path")
.short('i')
.long("tls-cert-path")
.takes_value(true)
.help("Path to a PEM-encoded certificates (only required for built-in TLS)"),
.num_args(1)
.help(
"Path to the PEM/PKCS#8-encoded certificates (only required for built-in TLS)",
),
)
.arg(
Arg::with_name("tls_cert_key_path")
.short("I")
Arg::new("tls_cert_key_path")
.short('I')
.long("tls-cert-key-path")
.takes_value(true)
.num_args(1)
.help("Path to the PEM-encoded secret keys (only required for built-in TLS)"),
);
let matches = options.get_matches();
globals.listen_address = matches.value_of("listen_address").unwrap().parse().unwrap();
globals.listen_address = matches
.get_one::<String>("listen_address")
.unwrap()
.parse()
.unwrap();
globals.server_address = matches
.value_of("server_address")
.get_one::<String>("server_address")
.unwrap()
.to_socket_addrs()
.unwrap()
.next()
.unwrap();
globals.local_bind_address = match matches.value_of("local_bind_address") {
globals.local_bind_address = match matches.get_one::<String>("local_bind_address") {
Some(address) => address.parse().unwrap(),
None => match globals.server_address {
SocketAddr::V4(_) => SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0)),
@ -146,21 +189,93 @@ pub fn parse_opts(globals: &mut Globals) {
)),
},
};
globals.path = matches.value_of("path").unwrap().to_string();
globals.path = matches.get_one::<String>("path").unwrap().to_string();
if !globals.path.starts_with('/') {
globals.path = format!("/{}", globals.path);
}
globals.max_clients = matches.value_of("max_clients").unwrap().parse().unwrap();
globals.timeout = Duration::from_secs(matches.value_of("timeout").unwrap().parse().unwrap());
globals.min_ttl = matches.value_of("min_ttl").unwrap().parse().unwrap();
globals.max_ttl = matches.value_of("max_ttl").unwrap().parse().unwrap();
globals.err_ttl = matches.value_of("err_ttl").unwrap().parse().unwrap();
globals.keepalive = !matches.is_present("disable_keepalive");
globals.disable_post = matches.is_present("disable_post");
globals.max_clients = matches
.get_one::<String>("max_clients")
.unwrap()
.parse()
.unwrap();
globals.timeout = Duration::from_secs(
matches
.get_one::<String>("timeout")
.unwrap()
.parse()
.unwrap(),
);
globals.max_concurrent_streams = matches
.get_one::<String>("max_concurrent")
.unwrap()
.parse()
.unwrap();
globals.min_ttl = matches
.get_one::<String>("min_ttl")
.unwrap()
.parse()
.unwrap();
globals.max_ttl = matches
.get_one::<String>("max_ttl")
.unwrap()
.parse()
.unwrap();
globals.err_ttl = matches
.get_one::<String>("err_ttl")
.unwrap()
.parse()
.unwrap();
globals.keepalive = !matches.get_flag("disable_keepalive");
globals.disable_post = matches.get_flag("disable_post");
globals.allow_odoh_post = matches.get_flag("allow_odoh_post");
#[cfg(feature = "tls")]
{
globals.tls_cert_path = matches.value_of("tls_cert_key_path").map(PathBuf::from);
globals.tls_cert_key_path = matches.value_of("tls_cert_key_path").map(PathBuf::from);
globals.tls_cert_path = matches
.get_one::<String>("tls_cert_path")
.map(PathBuf::from);
globals.tls_cert_key_path = matches
.get_one::<String>("tls_cert_key_path")
.map(PathBuf::from)
.or_else(|| globals.tls_cert_path.clone());
}
match matches.get_one::<String>("hostname") {
Some(hostname) => {
let mut builder =
dnsstamps::DoHBuilder::new(hostname.to_string(), globals.path.to_string());
if let Some(public_address) = matches.get_one::<String>("public_address") {
builder = builder.with_address(public_address.to_string());
}
if let Some(public_port) = matches.get_one::<String>("public_port") {
let public_port = public_port.parse().expect("Invalid public port");
builder = builder.with_port(public_port);
}
println!(
"Test DNS stamp to reach [{}] over DoH: [{}]\n",
hostname,
builder.serialize().unwrap()
);
let mut builder =
dnsstamps::ODoHTargetBuilder::new(hostname.to_string(), globals.path.to_string());
if let Some(public_port) = matches.get_one::<String>("public_port") {
let public_port = public_port.parse().expect("Invalid public port");
builder = builder.with_port(public_port);
}
println!(
"Test DNS stamp to reach [{}] over Oblivious DoH: [{}]\n",
hostname,
builder.serialize().unwrap()
);
println!("Check out https://dnscrypt.info/stamps/ to compute the actual stamps.\n")
}
_ => {
println!(
"Please provide a fully qualified hostname (-H <hostname> command-line option) to get \
test DNS stamps for your server.\n"
);
}
}
}

View file

@ -1,6 +1,8 @@
pub const LISTEN_ADDRESS: &str = "127.0.0.1:3000";
pub const MAX_CLIENTS: usize = 512;
pub const MAX_CONCURRENT_STREAMS: u32 = 16;
pub const PATH: &str = "/dns-query";
pub const ODOH_CONFIGS_PATH: &str = "/.well-known/odohconfigs";
pub const SERVER_ADDRESS: &str = "9.9.9.9:53";
pub const TIMEOUT_SEC: u64 = 10;
pub const MAX_TTL: u32 = 86400 * 7;

View file

@ -1,27 +1,45 @@
[package]
name = "libdoh"
version = "0.3.1"
version = "0.9.11"
authors = ["Frank Denis <github@pureftpd.org>"]
description = "DoH library for the rust-doh app"
keywords = ["dns","https","doh","proxy"]
description = "DoH and Oblivious DoH library for the rust-doh app"
keywords = ["dns", "https", "doh", "odoh", "proxy"]
license = "MIT"
homepage = "https://github.com/jedisct1/rust-doh"
repository = "https://github.com/jedisct1/rust-doh"
categories = ["asynchronous", "network-programming","command-line-utilities"]
categories = ["asynchronous", "network-programming", "command-line-utilities"]
edition = "2018"
[features]
default = []
default = ["tls"]
tls = ["tokio-rustls"]
[dependencies]
anyhow = "1.0"
byteorder = "1.3"
base64 = "0.12"
futures = "0.3"
hyper = { version = "0.13", default-features = false, features = ["stream"] }
tokio = { version = "0.2.11", features = ["rt-threaded", "time", "tcp", "udp", "stream", "parking_lot"] }
tokio-rustls = { version = "0.13", optional = true }
anyhow = "1.0.97"
arc-swap = "1.7.1"
base64 = "0.22.1"
byteorder = "1.5.0"
bytes = "1.10.1"
futures = "0.3.31"
hyper = { version = "^0.14.32", default-features = false, features = [
"server",
"http1",
"http2",
"stream",
"runtime",
] }
odoh-rs = "1.0.3"
rand = "^0.8.5"
tokio = { version = "1.44.1", features = [
"net",
"rt-multi-thread",
"time",
"sync",
] }
tokio-rustls = { version = "^0.24.1", features = [
"early-data",
], optional = true }
rustls-pemfile = "^1.0.4"
[profile.release]
codegen-units = 1

21
src/libdoh/LICENSE Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2018-2025 Frank Denis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -2,3 +2,8 @@ pub const DNS_QUERY_PARAM: &str = "dns";
pub const MAX_DNS_QUESTION_LEN: usize = 512;
pub const MAX_DNS_RESPONSE_LEN: usize = 4096;
pub const MIN_DNS_PACKET_LEN: usize = 17;
pub const STALE_IF_ERROR_SECS: u32 = 86400;
pub const STALE_WHILE_REVALIDATE_SECS: u32 = 60;
pub const CERTS_WATCH_DELAY_SECS: u32 = 10;
pub const ODOH_KEY_ROTATION_SECS: u32 = 86400;
pub const UDP_TCP_RATIO: usize = 8;

View file

@ -2,9 +2,13 @@ use anyhow::{ensure, Error};
use byteorder::{BigEndian, ByteOrder};
const DNS_HEADER_SIZE: usize = 12;
pub const DNS_OFFSET_FLAGS: usize = 2;
const DNS_MAX_HOSTNAME_SIZE: usize = 256;
const DNS_MAX_PACKET_SIZE: usize = 4096;
const DNS_OFFSET_QUESTION: usize = DNS_HEADER_SIZE;
const DNS_FLAGS_TC: u16 = 1u16 << 9;
const DNS_TYPE_OPT: u16 = 41;
const DNS_PTYPE_PADDING: u16 = 12;
@ -51,6 +55,11 @@ pub fn is_recoverable_error(packet: &[u8]) -> bool {
rcode == DNS_RCODE_SERVFAIL || rcode == DNS_RCODE_REFUSED
}
#[inline]
pub fn is_truncated(packet: &[u8]) -> bool {
BigEndian::read_u16(&packet[DNS_OFFSET_FLAGS..]) & DNS_FLAGS_TC == DNS_FLAGS_TC
}
fn skip_name(packet: &[u8], offset: usize) -> Result<usize, Error> {
let packet_len = packet.len();
ensure!(offset < packet_len - 1, "Short packet");
@ -171,7 +180,7 @@ fn add_edns_section(packet: &mut Vec<u8>, max_payload_size: u16) -> Result<(), E
"Packet would be too large to add a new record"
);
arcount_inc(packet)?;
packet.extend(&opt_rr);
packet.extend(opt_rr);
Ok(())
}

View file

@ -1,7 +1,7 @@
use hyper::StatusCode;
use std::io;
#[allow(dead_code)]
use hyper::StatusCode;
#[derive(Debug)]
pub enum DoHError {
Incomplete,
@ -9,8 +9,11 @@ pub enum DoHError {
TooLarge,
UpstreamIssue,
UpstreamTimeout,
StaleKey,
Hyper(hyper::Error),
Io(io::Error),
ODoHConfigError(anyhow::Error),
TooManyTcpSessions,
}
impl std::error::Error for DoHError {}
@ -23,8 +26,11 @@ impl std::fmt::Display for DoHError {
DoHError::TooLarge => write!(fmt, "Too large"),
DoHError::UpstreamIssue => write!(fmt, "Upstream error"),
DoHError::UpstreamTimeout => write!(fmt, "Upstream timeout"),
DoHError::Hyper(e) => write!(fmt, "HTTP error: {}", e),
DoHError::Io(e) => write!(fmt, "IO error: {}", e),
DoHError::StaleKey => write!(fmt, "Stale key material"),
DoHError::Hyper(e) => write!(fmt, "HTTP error: {e}"),
DoHError::Io(e) => write!(fmt, "IO error: {e}"),
DoHError::ODoHConfigError(e) => write!(fmt, "ODoH config error: {e}"),
DoHError::TooManyTcpSessions => write!(fmt, "Too many TCP sessions"),
}
}
}
@ -37,8 +43,11 @@ impl From<DoHError> for StatusCode {
DoHError::TooLarge => StatusCode::PAYLOAD_TOO_LARGE,
DoHError::UpstreamIssue => StatusCode::BAD_GATEWAY,
DoHError::UpstreamTimeout => StatusCode::BAD_GATEWAY,
DoHError::StaleKey => StatusCode::UNAUTHORIZED,
DoHError::Hyper(_) => StatusCode::SERVICE_UNAVAILABLE,
DoHError::Io(_) => StatusCode::INTERNAL_SERVER_ERROR,
DoHError::ODoHConfigError(_) => StatusCode::INTERNAL_SERVER_ERROR,
DoHError::TooManyTcpSessions => StatusCode::SERVICE_UNAVAILABLE,
}
}
}

View file

@ -1,11 +1,13 @@
use std::net::SocketAddr;
#[cfg(feature = "tls")]
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;
use tokio::runtime;
#[cfg(feature = "tls")]
use std::path::PathBuf;
use crate::odoh::ODoHRotator;
#[derive(Debug)]
pub struct Globals {
@ -22,11 +24,15 @@ pub struct Globals {
pub max_clients: usize,
pub timeout: Duration,
pub clients_count: ClientsCount,
pub max_concurrent_streams: u32,
pub min_ttl: u32,
pub max_ttl: u32,
pub err_ttl: u32,
pub keepalive: bool,
pub disable_post: bool,
pub allow_odoh_post: bool,
pub odoh_configs_path: String,
pub odoh_rotator: Arc<ODoHRotator>,
pub runtime_handle: runtime::Handle,
}
@ -35,6 +41,10 @@ pub struct Globals {
pub struct ClientsCount(Arc<AtomicUsize>);
impl ClientsCount {
pub fn current(&self) -> usize {
self.0.load(Ordering::Relaxed)
}
pub fn increment(&self) -> usize {
self.0.fetch_add(1, Ordering::Relaxed)
}
@ -43,7 +53,11 @@ impl ClientsCount {
let mut count;
while {
count = self.0.load(Ordering::Relaxed);
count > 0 && self.0.compare_and_swap(count, count - 1, Ordering::Relaxed) != count
count > 0
&& self
.0
.compare_exchange(count, count - 1, Ordering::Relaxed, Ordering::Relaxed)
!= Ok(count)
} {}
count
}

View file

@ -2,33 +2,69 @@ mod constants;
pub mod dns;
mod errors;
mod globals;
pub mod odoh;
#[cfg(feature = "tls")]
mod tls;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::time::Duration;
use base64::engine::Engine;
use byteorder::{BigEndian, ByteOrder};
use futures::prelude::*;
use futures::task::{Context, Poll};
use hyper::http;
use hyper::server::conn::Http;
use hyper::{Body, HeaderMap, Method, Request, Response, StatusCode};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use tokio::net::{TcpListener, TcpSocket, UdpSocket};
use tokio::runtime;
use crate::constants::*;
pub use crate::errors::*;
pub use crate::globals::*;
#[cfg(feature = "tls")]
use crate::tls::*;
pub mod reexports {
pub use tokio;
}
use futures::prelude::*;
use futures::task::{Context, Poll};
use hyper::http;
use hyper::server::conn::Http;
use hyper::{Body, Method, Request, Response, StatusCode};
use std::pin::Pin;
use std::sync::Arc;
use std::time::Duration;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio::net::{TcpListener, UdpSocket};
use tokio::runtime;
const BASE64_URL_SAFE_NO_PAD: base64::engine::GeneralPurpose =
base64::engine::general_purpose::GeneralPurpose::new(
&base64::alphabet::URL_SAFE,
base64::engine::general_purpose::GeneralPurposeConfig::new()
.with_encode_padding(false)
.with_decode_padding_mode(base64::engine::DecodePaddingMode::Indifferent),
);
#[derive(Clone, Debug)]
struct DnsResponse {
packet: Vec<u8>,
ttl: u32,
}
#[derive(Clone, Debug)]
enum DoHType {
Standard,
Oblivious,
}
impl DoHType {
fn as_str(&self) -> String {
match self {
DoHType::Standard => String::from("application/dns-message"),
DoHType::Oblivious => String::from("application/oblivious-dns-message"),
}
}
}
#[derive(Clone, Debug)]
pub struct DoH {
pub globals: Arc<Globals>,
}
#[allow(clippy::unnecessary_wraps)]
fn http_error(status_code: StatusCode) -> Result<Response<Body>, http::Error> {
let response = Response::builder()
.status(status_code)
@ -58,10 +94,11 @@ where
}
}
#[allow(clippy::type_complexity)]
impl hyper::service::Service<http::Request<Body>> for DoH {
type Response = Response<Body>;
type Error = http::Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
type Response = Response<Body>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
@ -69,36 +106,58 @@ impl hyper::service::Service<http::Request<Body>> for DoH {
fn call(&mut self, req: Request<Body>) -> Self::Future {
let globals = &self.globals;
if req.uri().path() != globals.path {
return Box::pin(async { http_error(StatusCode::NOT_FOUND) });
}
let self_inner = self.clone();
match *req.method() {
Method::POST => Box::pin(async move { self_inner.serve_post(req).await }),
Method::GET => Box::pin(async move { self_inner.serve_get(req).await }),
_ => Box::pin(async { http_error(StatusCode::METHOD_NOT_ALLOWED) }),
if req.uri().path() == globals.path {
match *req.method() {
Method::POST => Box::pin(async move { self_inner.serve_post(req).await }),
Method::GET => Box::pin(async move { self_inner.serve_get(req).await }),
_ => Box::pin(async { http_error(StatusCode::METHOD_NOT_ALLOWED) }),
}
} else if req.uri().path() == globals.odoh_configs_path {
match *req.method() {
Method::GET => Box::pin(async move { self_inner.serve_odoh_configs().await }),
_ => Box::pin(async { http_error(StatusCode::METHOD_NOT_ALLOWED) }),
}
} else {
Box::pin(async { http_error(StatusCode::NOT_FOUND) })
}
}
}
impl DoH {
async fn serve_post(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
if self.globals.disable_post {
return http_error(StatusCode::METHOD_NOT_ALLOWED);
}
if let Err(response) = Self::check_content_type(&req) {
return Ok(response);
}
match self.read_body_and_proxy(req.into_body()).await {
Err(e) => http_error(StatusCode::from(e)),
Ok(res) => Ok(res),
async fn serve_get(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
match Self::parse_content_type(&req) {
Ok(DoHType::Standard) => self.serve_doh_get(req).await,
Ok(DoHType::Oblivious) => self.serve_odoh_get(req).await,
Err(response) => Ok(response),
}
}
async fn serve_get(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
let query = req.uri().query().unwrap_or("");
async fn serve_post(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
match Self::parse_content_type(&req) {
Ok(DoHType::Standard) => self.serve_doh_post(req).await,
Ok(DoHType::Oblivious) => self.serve_odoh_post(req).await,
Err(response) => Ok(response),
}
}
async fn serve_doh_query(&self, query: Vec<u8>) -> Result<Response<Body>, http::Error> {
let resp = match self.proxy(query).await {
Ok(resp) => {
self.build_response(resp.packet, resp.ttl, DoHType::Standard.as_str(), true)
}
Err(e) => return http_error(StatusCode::from(e)),
};
match resp {
Ok(resp) => Ok(resp),
Err(e) => http_error(StatusCode::from(e)),
}
}
fn query_from_query_string(&self, req: Request<Body>) -> Option<Vec<u8>> {
let http_query = req.uri().query().unwrap_or("");
let mut question_str = None;
for parts in query.split('&') {
for parts in http_query.split('&') {
let mut kv = parts.split('=');
if let Some(k) = kv.next() {
if k == DNS_QUERY_PARAM {
@ -106,53 +165,162 @@ impl DoH {
}
}
}
let question = match question_str.and_then(|question_str| {
base64::decode_config(question_str, base64::URL_SAFE_NO_PAD).ok()
}) {
Some(question) => question,
_ => {
return http_error(StatusCode::BAD_REQUEST);
if let Some(question_str) = question_str {
if question_str.len() > MAX_DNS_QUESTION_LEN * 4 / 3 {
return None;
}
}
let query = match question_str
.and_then(|question_str| BASE64_URL_SAFE_NO_PAD.decode(question_str).ok())
{
Some(query) => query,
_ => return None,
};
match self.proxy(question).await {
Some(query)
}
async fn serve_doh_get(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
let query = match self.query_from_query_string(req) {
Some(query) => query,
_ => return http_error(StatusCode::BAD_REQUEST),
};
self.serve_doh_query(query).await
}
async fn serve_doh_post(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
if self.globals.disable_post {
return http_error(StatusCode::METHOD_NOT_ALLOWED);
}
let query = match self.read_body(req.into_body()).await {
Ok(q) => q,
Err(e) => return http_error(StatusCode::from(e)),
};
self.serve_doh_query(query).await
}
async fn serve_odoh(&self, encrypted_query: Vec<u8>) -> Result<Response<Body>, http::Error> {
let odoh_public_key = (*self.globals.odoh_rotator).clone().current_public_key();
let (query, context) = match (*odoh_public_key).clone().decrypt_query(encrypted_query) {
Ok((q, context)) => (q.to_vec(), context),
Err(e) => return http_error(StatusCode::from(e)),
};
let resp = match self.proxy(query).await {
Ok(resp) => resp,
Err(e) => return http_error(StatusCode::from(e)),
};
let encrypted_resp = match context.encrypt_response(resp.packet) {
Ok(resp) => self.build_response(resp, 0u32, DoHType::Oblivious.as_str(), false),
Err(e) => return http_error(StatusCode::from(e)),
};
match encrypted_resp {
Ok(resp) => Ok(resp),
Err(e) => http_error(StatusCode::from(e)),
Ok(res) => Ok(res),
}
}
fn check_content_type(req: &Request<Body>) -> Result<(), Response<Body>> {
async fn serve_odoh_get(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
let encrypted_query = match self.query_from_query_string(req) {
Some(encrypted_query) => encrypted_query,
_ => return http_error(StatusCode::BAD_REQUEST),
};
self.serve_odoh(encrypted_query).await
}
async fn serve_odoh_post(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
if self.globals.disable_post && !self.globals.allow_odoh_post {
return http_error(StatusCode::METHOD_NOT_ALLOWED);
}
let encrypted_query = match self.read_body(req.into_body()).await {
Ok(q) => q,
Err(e) => return http_error(StatusCode::from(e)),
};
self.serve_odoh(encrypted_query).await
}
async fn serve_odoh_configs(&self) -> Result<Response<Body>, http::Error> {
let odoh_public_key = (*self.globals.odoh_rotator).clone().current_public_key();
let configs = (*odoh_public_key).clone().into_config();
match self.build_response(
configs,
ODOH_KEY_ROTATION_SECS,
"application/octet-stream".to_string(),
true,
) {
Ok(resp) => Ok(resp),
Err(e) => http_error(StatusCode::from(e)),
}
}
fn acceptable_content_type(
headers: &HeaderMap,
content_types: &[&'static str],
) -> Option<&'static str> {
let accept = headers.get(hyper::header::ACCEPT);
let accept = accept?;
for part in accept.to_str().unwrap_or("").split(',').map(|s| s.trim()) {
if let Some(found) = part
.split(';')
.next()
.map(|s| s.trim().to_ascii_lowercase())
{
if let Some(&content_type) = content_types
.iter()
.find(|&&content_type| content_type == found)
{
return Some(content_type);
}
}
}
None
}
fn parse_content_type(req: &Request<Body>) -> Result<DoHType, Response<Body>> {
const CT_DOH: &str = "application/dns-message";
const CT_ODOH: &str = "application/oblivious-dns-message";
let headers = req.headers();
let content_type = match headers.get(hyper::header::CONTENT_TYPE) {
None => {
let acceptable_content_type =
Self::acceptable_content_type(headers, &[CT_DOH, CT_ODOH]);
match acceptable_content_type {
None => {
let response = Response::builder()
.status(StatusCode::NOT_ACCEPTABLE)
.body(Body::empty())
.unwrap();
return Err(response);
}
Some(content_type) => content_type,
}
}
Some(content_type) => match content_type.to_str() {
Err(_) => {
let response = Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::empty())
.unwrap();
return Err(response);
}
Ok(content_type) => content_type,
},
};
match content_type.to_ascii_lowercase().as_str() {
CT_DOH => Ok(DoHType::Standard),
CT_ODOH => Ok(DoHType::Oblivious),
_ => {
let response = Response::builder()
.status(StatusCode::NOT_ACCEPTABLE)
.status(StatusCode::UNSUPPORTED_MEDIA_TYPE)
.body(Body::empty())
.unwrap();
return Err(response);
Err(response)
}
Some(content_type) => content_type.to_str(),
};
let content_type = match content_type {
Err(_) => {
let response = Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::empty())
.unwrap();
return Err(response);
}
Ok(content_type) => content_type.to_lowercase(),
};
if content_type != "application/dns-message" {
let response = Response::builder()
.status(StatusCode::UNSUPPORTED_MEDIA_TYPE)
.body(Body::empty())
.unwrap();
return Err(response);
}
Ok(())
}
async fn read_body_and_proxy(&self, mut body: Body) -> Result<Response<Body>, DoHError> {
async fn read_body(&self, mut body: Body) -> Result<Vec<u8>, DoHError> {
let mut sum_size = 0;
let mut query = vec![];
while let Some(chunk) = body.next().await {
@ -163,38 +331,80 @@ impl DoH {
}
query.extend(chunk);
}
let response = self.proxy(query).await?;
Ok(response)
Ok(query)
}
async fn proxy(&self, query: Vec<u8>) -> Result<Response<Body>, DoHError> {
async fn proxy(&self, query: Vec<u8>) -> Result<DnsResponse, DoHError> {
let proxy_timeout = self.globals.timeout;
let timeout_res = tokio::time::timeout(proxy_timeout, self._proxy(query)).await;
timeout_res.map_err(|_| DoHError::UpstreamTimeout)?
}
async fn _proxy(&self, mut query: Vec<u8>) -> Result<Response<Body>, DoHError> {
async fn _proxy(&self, mut query: Vec<u8>) -> Result<DnsResponse, DoHError> {
if query.len() < MIN_DNS_PACKET_LEN {
return Err(DoHError::Incomplete);
}
let _ = dns::set_edns_max_payload_size(&mut query, MAX_DNS_RESPONSE_LEN as _);
let globals = &self.globals;
let mut socket = UdpSocket::bind(&globals.local_bind_address)
.await
.map_err(DoHError::Io)?;
let expected_server_address = globals.server_address;
let (min_ttl, max_ttl, err_ttl) = (globals.min_ttl, globals.max_ttl, globals.err_ttl);
socket
.send_to(&query, &globals.server_address)
.map_err(DoHError::Io)
.await?;
let mut packet = vec![0; MAX_DNS_RESPONSE_LEN];
let (len, response_server_address) =
socket.recv_from(&mut packet).map_err(DoHError::Io).await?;
if len < MIN_DNS_PACKET_LEN || expected_server_address != response_server_address {
return Err(DoHError::UpstreamIssue);
let (min_ttl, max_ttl, err_ttl) = (globals.min_ttl, globals.max_ttl, globals.err_ttl);
// UDP
{
let socket = UdpSocket::bind(&globals.local_bind_address)
.await
.map_err(DoHError::Io)?;
let expected_server_address = globals.server_address;
socket
.send_to(&query, &globals.server_address)
.map_err(DoHError::Io)
.await?;
let (len, response_server_address) =
socket.recv_from(&mut packet).map_err(DoHError::Io).await?;
if len < MIN_DNS_PACKET_LEN || expected_server_address != response_server_address {
return Err(DoHError::UpstreamIssue);
}
packet.truncate(len);
}
packet.truncate(len);
// TCP
if dns::is_truncated(&packet) {
let clients_count = self.globals.clients_count.current();
if self.globals.max_clients >= UDP_TCP_RATIO
&& clients_count >= self.globals.max_clients / UDP_TCP_RATIO
{
return Err(DoHError::TooManyTcpSessions);
}
let socket = match globals.server_address {
SocketAddr::V4(_) => TcpSocket::new_v4(),
SocketAddr::V6(_) => TcpSocket::new_v6(),
}
.map_err(DoHError::Io)?;
let mut ext_socket = socket
.connect(globals.server_address)
.await
.map_err(DoHError::Io)?;
ext_socket.set_nodelay(true).map_err(DoHError::Io)?;
let mut binlen = [0u8, 0];
BigEndian::write_u16(&mut binlen, query.len() as u16);
ext_socket.write_all(&binlen).await.map_err(DoHError::Io)?;
ext_socket.write_all(&query).await.map_err(DoHError::Io)?;
ext_socket.flush().await.map_err(DoHError::Io)?;
ext_socket
.read_exact(&mut binlen)
.await
.map_err(DoHError::Io)?;
let packet_len = BigEndian::read_u16(&binlen) as usize;
if !(MIN_DNS_PACKET_LEN..=MAX_DNS_RESPONSE_LEN).contains(&packet_len) {
return Err(DoHError::UpstreamIssue);
}
packet = vec![0u8; packet_len];
ext_socket
.read_exact(&mut packet)
.await
.map_err(DoHError::Io)?;
}
let ttl = if dns::is_recoverable_error(&packet) {
err_ttl
} else {
@ -206,16 +416,35 @@ impl DoH {
dns::add_edns_padding(&mut packet)
.map_err(|_| DoHError::TooLarge)
.ok();
Ok(DnsResponse { packet, ttl })
}
fn build_response(
&self,
packet: Vec<u8>,
ttl: u32,
content_type: String,
cors: bool,
) -> Result<Response<Body>, DoHError> {
let packet_len = packet.len();
let response = Response::builder()
let mut response_builder = Response::builder()
.header(hyper::header::CONTENT_LENGTH, packet_len)
.header(hyper::header::CONTENT_TYPE, "application/dns-message")
.header(hyper::header::CONTENT_TYPE, content_type.as_str())
.header(
hyper::header::CACHE_CONTROL,
format!("max-age={}", ttl).as_str(),
)
format!(
"max-age={ttl}, stale-if-error={STALE_IF_ERROR_SECS}, \
stale-while-revalidate={STALE_WHILE_REVALIDATE_SECS}"
)
.as_str(),
);
if cors {
response_builder =
response_builder.header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*");
}
let response = response_builder
.body(Body::from(packet))
.unwrap();
.map_err(|_| DoHError::InvalidData)?;
Ok(response)
}
@ -241,15 +470,11 @@ impl DoH {
async fn start_without_tls(
self,
mut listener: TcpListener,
listener: TcpListener,
server: Http<LocalExecutor>,
) -> Result<(), DoHError> {
let listener_service = async {
while let Some(stream) = listener.incoming().next().await {
let stream = match stream {
Ok(stream) => stream,
Err(_) => continue,
};
while let Ok((stream, _client_addr)) = listener.accept().await {
self.clone().client_serve(stream, server.clone()).await;
}
Ok(()) as Result<(), DoHError>
@ -265,32 +490,33 @@ impl DoH {
.map_err(DoHError::Io)?;
let path = &self.globals.path;
#[cfg(feature = "tls")]
let tls_acceptor = match (&self.globals.tls_cert_path, &self.globals.tls_cert_key_path) {
(Some(tls_cert_path), Some(tls_cert_key_path)) => {
Some(create_tls_acceptor(tls_cert_path, tls_cert_key_path).unwrap())
}
_ => None,
};
let tls_enabled: bool;
#[cfg(not(feature = "tls"))]
let tls_acceptor: Option<()> = None;
if tls_acceptor.is_some() {
println!("Listening on https://{}{}", listen_address, path);
{
tls_enabled = false;
}
#[cfg(feature = "tls")]
{
tls_enabled =
self.globals.tls_cert_path.is_some() && self.globals.tls_cert_key_path.is_some();
}
if tls_enabled {
println!("Listening on https://{listen_address}{path}");
} else {
println!("Listening on http://{}{}", listen_address, path);
println!("Listening on http://{listen_address}{path}");
}
let mut server = Http::new();
server.keep_alive(self.globals.keepalive);
server.http1_keep_alive(self.globals.keepalive);
server.http2_max_concurrent_streams(self.globals.max_concurrent_streams);
server.pipeline_flush(true);
let executor = LocalExecutor::new(self.globals.runtime_handle.clone());
let server = server.with_executor(executor);
#[cfg(feature = "tls")]
{
if let Some(tls_acceptor) = tls_acceptor {
self.start_with_tls(tls_acceptor, listener, server).await?;
if tls_enabled {
self.start_with_tls(listener, server).await?;
return Ok(());
}
}

132
src/libdoh/src/odoh.rs Normal file
View file

@ -0,0 +1,132 @@
use std::fmt;
use std::sync::Arc;
use std::time::Duration;
use arc_swap::ArcSwap;
use odoh_rs::{
Deserialize, ObliviousDoHConfig, ObliviousDoHConfigs, ObliviousDoHKeyPair, ObliviousDoHMessage,
ObliviousDoHMessagePlaintext, OdohSecret, ResponseNonce, Serialize,
};
use rand::Rng;
use tokio::runtime;
use crate::constants::ODOH_KEY_ROTATION_SECS;
use crate::errors::DoHError;
#[derive(Clone)]
pub struct ODoHPublicKey {
key_pair: ObliviousDoHKeyPair,
serialized_configs: Vec<u8>,
}
impl fmt::Debug for ODoHPublicKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ODoHPublicKey").finish()
}
}
#[derive(Clone, Debug)]
pub struct ODoHQueryContext {
query: ObliviousDoHMessagePlaintext,
server_secret: OdohSecret,
}
impl ODoHPublicKey {
pub fn new() -> Result<ODoHPublicKey, DoHError> {
let key_pair = ObliviousDoHKeyPair::new(&mut rand::thread_rng());
let config = ObliviousDoHConfig::from(key_pair.public().clone());
let mut serialized_configs = Vec::new();
ObliviousDoHConfigs::from(vec![config])
.serialize(&mut serialized_configs)
.map_err(|e| DoHError::ODoHConfigError(e.into()))?;
Ok(ODoHPublicKey {
key_pair,
serialized_configs,
})
}
pub fn into_config(self) -> Vec<u8> {
self.serialized_configs
}
pub fn decrypt_query(
self,
encrypted_query: Vec<u8>,
) -> Result<(Vec<u8>, ODoHQueryContext), DoHError> {
let odoh_query = ObliviousDoHMessage::deserialize(&mut bytes::Bytes::from(encrypted_query))
.map_err(|_| DoHError::InvalidData)?;
match self.key_pair.public().identifier() {
Ok(key_id) => {
if !key_id.eq(&odoh_query.key_id()) {
return Err(DoHError::StaleKey);
}
}
Err(_) => return Err(DoHError::InvalidData),
};
let (query, server_secret) = match odoh_rs::decrypt_query(&odoh_query, &self.key_pair) {
Ok((pq, ss)) => (pq, ss),
Err(_) => return Err(DoHError::InvalidData),
};
let context = ODoHQueryContext {
query: query.clone(),
server_secret,
};
Ok((query.into_msg().to_vec(), context))
}
}
impl ODoHQueryContext {
pub fn encrypt_response(self, response_body: Vec<u8>) -> Result<Vec<u8>, DoHError> {
let response_nonce = rand::thread_rng().r#gen::<ResponseNonce>();
let response_body_ = ObliviousDoHMessagePlaintext::new(response_body, 0);
let encrypted_response = odoh_rs::encrypt_response(
&self.query,
&response_body_,
self.server_secret,
response_nonce,
)
.map_err(|_| DoHError::InvalidData)?;
let mut encrypted_response_bytes = Vec::new();
encrypted_response
.serialize(&mut encrypted_response_bytes)
.map_err(|_| DoHError::InvalidData)?;
Ok(encrypted_response_bytes)
}
}
#[derive(Clone, Debug)]
pub struct ODoHRotator {
key: Arc<ArcSwap<ODoHPublicKey>>,
}
impl ODoHRotator {
pub fn new(runtime_handle: runtime::Handle) -> Result<ODoHRotator, DoHError> {
let public_key = match ODoHPublicKey::new() {
Ok(key) => Arc::new(ArcSwap::from_pointee(key)),
Err(e) => panic!("ODoH key rotation error: {}", e),
};
let current_key = Arc::clone(&public_key);
runtime_handle.spawn(async move {
loop {
tokio::time::sleep(Duration::from_secs(ODOH_KEY_ROTATION_SECS.into())).await;
match ODoHPublicKey::new() {
Ok(key) => {
current_key.store(Arc::new(key));
}
Err(e) => eprintln!("ODoH key rotation error: {e}"),
};
}
});
Ok(ODoHRotator {
key: Arc::clone(&public_key),
})
}
pub fn current_public_key(&self) -> Arc<ODoHPublicKey> {
let key = Arc::clone(&self.key);
Arc::clone(&key.load())
}
}

View file

@ -1,105 +1,165 @@
use crate::errors::*;
use crate::{DoH, LocalExecutor};
use hyper::server::conn::Http;
use std::fs::File;
use std::io::{self, BufReader};
use std::io::{self, BufReader, Cursor, Read};
use std::path::Path;
use std::sync::Arc;
use tokio::net::TcpListener;
use tokio::stream::StreamExt;
use std::time::Duration;
use futures::{future::FutureExt, join, select};
use hyper::server::conn::Http;
use tokio::{
net::TcpListener,
sync::mpsc::{self, Receiver},
};
use tokio_rustls::{
rustls::{internal::pemfile, NoClientAuth, ServerConfig},
rustls::{Certificate, PrivateKey, ServerConfig},
TlsAcceptor,
};
use crate::constants::CERTS_WATCH_DELAY_SECS;
use crate::errors::*;
use crate::{DoH, LocalExecutor};
pub fn create_tls_acceptor<P, P2>(certs_path: P, certs_keys_path: P2) -> io::Result<TlsAcceptor>
where
P: AsRef<Path>,
P2: AsRef<Path>,
{
let certs = {
let certs: Vec<_> = {
let certs_path_str = certs_path.as_ref().display().to_string();
let mut reader = BufReader::new(File::open(certs_path).map_err(|e| {
io::Error::new(
e.kind(),
format!(
"Unable to load the certificates [{}]: {}",
certs_path_str,
e.to_string()
),
format!("Unable to load the certificates [{certs_path_str}]: {e}"),
)
})?);
pemfile::certs(&mut reader).map_err(|_| {
rustls_pemfile::certs(&mut reader).map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Unable to parse the certificates",
)
})?
};
let certs_keys = {
}
.drain(..)
.map(Certificate)
.collect();
let certs_keys: Vec<_> = {
let certs_keys_path_str = certs_keys_path.as_ref().display().to_string();
let mut reader = BufReader::new(File::open(certs_keys_path).map_err(|e| {
io::Error::new(
e.kind(),
format!(
"Unable to load the certificate keys [{}]: {}",
certs_keys_path_str,
e.to_string()
),
)
})?);
let keys = pemfile::pkcs8_private_keys(&mut reader).map_err(|_| {
let encoded_keys = {
let mut encoded_keys = vec![];
File::open(certs_keys_path)
.map_err(|e| {
io::Error::new(
e.kind(),
format!("Unable to load the certificate keys [{certs_keys_path_str}]: {e}"),
)
})?
.read_to_end(&mut encoded_keys)?;
encoded_keys
};
let mut reader = Cursor::new(encoded_keys);
let pkcs8_keys = rustls_pemfile::pkcs8_private_keys(&mut reader).map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Unable to parse the certificates private keys",
"Unable to parse the certificates private keys (PKCS8)",
)
})?;
reader.set_position(0);
let mut rsa_keys = rustls_pemfile::rsa_private_keys(&mut reader).map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Unable to parse the certificates private keys (RSA)",
)
})?;
let mut keys = pkcs8_keys;
keys.append(&mut rsa_keys);
if keys.is_empty() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"No private keys found",
"No private keys found - Make sure that they are in PKCS#8/PEM format",
));
}
keys
keys.drain(..).map(PrivateKey).collect()
};
let mut server_config = ServerConfig::new(NoClientAuth::new());
let has_valid_cert_and_key = certs_keys.into_iter().any(|certs_key| {
server_config
.set_single_cert(certs.clone(), certs_key)
.is_ok()
});
if !has_valid_cert_and_key {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid private key for the given certificate",
));
}
let mut server_config = certs_keys
.into_iter()
.find_map(|certs_key| {
let server_config_builder = ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth();
match server_config_builder.with_single_cert(certs.clone(), certs_key) {
Ok(found_config) => Some(found_config),
_ => None,
}
})
.ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Unable to find a valid certificate and key",
)
})?;
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
Ok(TlsAcceptor::from(Arc::new(server_config)))
}
impl DoH {
pub async fn start_with_tls(
async fn start_https_service(
self,
tls_acceptor: TlsAcceptor,
mut listener: TcpListener,
mut tls_acceptor_receiver: Receiver<TlsAcceptor>,
listener: TcpListener,
server: Http<LocalExecutor>,
) -> Result<(), DoHError> {
let mut tls_acceptor: Option<TlsAcceptor> = None;
let listener_service = async {
while let Some(raw_stream) = listener.incoming().next().await {
let raw_stream = match raw_stream {
Ok(raw_stream) => raw_stream,
Err(_) => continue,
};
let stream = match tls_acceptor.accept(raw_stream).await {
Ok(stream) => stream,
Err(_) => continue,
};
self.clone().client_serve(stream, server.clone()).await;
loop {
select! {
tcp_cnx = listener.accept().fuse() => {
if tls_acceptor.is_none() || tcp_cnx.is_err() {
continue;
}
let (raw_stream, _client_addr) = tcp_cnx.unwrap();
if let Ok(stream) = tls_acceptor.as_ref().unwrap().accept(raw_stream).await {
self.clone().client_serve(stream, server.clone()).await
}
}
new_tls_acceptor = tls_acceptor_receiver.recv().fuse() => {
if new_tls_acceptor.is_none() {
break;
}
tls_acceptor = new_tls_acceptor;
}
complete => break
}
}
Ok(()) as Result<(), DoHError>
};
listener_service.await?;
Ok(())
}
pub async fn start_with_tls(
self,
listener: TcpListener,
server: Http<LocalExecutor>,
) -> Result<(), DoHError> {
let certs_path = self.globals.tls_cert_path.as_ref().unwrap().clone();
let certs_keys_path = self.globals.tls_cert_key_path.as_ref().unwrap().clone();
let (tls_acceptor_sender, tls_acceptor_receiver) = mpsc::channel(1);
let https_service = self.start_https_service(tls_acceptor_receiver, listener, server);
let cert_service = async {
loop {
match create_tls_acceptor(&certs_path, &certs_keys_path) {
Ok(tls_acceptor) => {
if tls_acceptor_sender.send(tls_acceptor).await.is_err() {
break;
}
}
Err(e) => eprintln!("TLS certificates error: {e}"),
}
tokio::time::sleep(Duration::from_secs(CERTS_WATCH_DELAY_SECS.into())).await;
}
Ok::<_, DoHError>(())
};
join!(https_service, cert_service).0
}
}

View file

@ -1,5 +1,5 @@
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
#[macro_use]
extern crate clap;
@ -8,21 +8,27 @@ mod config;
mod constants;
mod utils;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::Arc;
use std::time::Duration;
use libdoh::odoh::ODoHRotator;
use libdoh::reexports::tokio;
use libdoh::*;
use crate::config::*;
use crate::constants::*;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::Arc;
use std::time::Duration;
fn main() {
let mut runtime_builder = tokio::runtime::Builder::new();
let mut runtime_builder = tokio::runtime::Builder::new_multi_thread();
runtime_builder.enable_all();
runtime_builder.threaded_scheduler();
runtime_builder.thread_name("doh-proxy");
let mut runtime = runtime_builder.build().unwrap();
let runtime = runtime_builder.build().unwrap();
let rotator = match ODoHRotator::new(runtime.handle().clone()) {
Ok(r) => r,
Err(_) => panic!("Failed to create ODoHRotator"),
};
let mut globals = Globals {
#[cfg(feature = "tls")]
@ -37,11 +43,15 @@ fn main() {
max_clients: MAX_CLIENTS,
timeout: Duration::from_secs(TIMEOUT_SEC),
clients_count: Default::default(),
max_concurrent_streams: MAX_CONCURRENT_STREAMS,
min_ttl: MIN_TTL,
max_ttl: MAX_TTL,
err_ttl: ERR_TTL,
keepalive: true,
disable_post: false,
allow_odoh_post: false,
odoh_configs_path: ODOH_CONFIGS_PATH.to_string(),
odoh_rotator: Arc::new(rotator),
runtime_handle: runtime.handle().clone(),
};

View file

@ -1,25 +1,24 @@
// functions to verify the startup arguments as correct
use std::net::{SocketAddr, ToSocketAddrs};
// functions to verify the startup arguments as correct
pub(crate) fn verify_sock_addr(arg_val: String) -> Result<(), String> {
pub(crate) fn verify_sock_addr(arg_val: &str) -> Result<String, String> {
match arg_val.parse::<SocketAddr>() {
Ok(_addr) => Ok(()),
Ok(_addr) => Ok(arg_val.to_string()),
Err(_) => Err(format!(
"Could not parse \"{}\" as a valid socket address (with port).",
arg_val
"Could not parse \"{arg_val}\" as a valid socket address (with port)."
)),
}
}
pub(crate) fn verify_remote_server(arg_val: String) -> Result<(), String> {
pub(crate) fn verify_remote_server(arg_val: &str) -> Result<String, String> {
match arg_val.to_socket_addrs() {
Ok(mut addr_iter) => match addr_iter.next() {
Some(_) => Ok(()),
Some(_) => Ok(arg_val.to_string()),
None => Err(format!(
"Could not parse \"{}\" as a valid remote uri",
arg_val
"Could not parse \"{arg_val}\" as a valid remote uri"
)),
},
Err(err) => Err(format!("{}", err)),
Err(err) => Err(format!("{err}")),
}
}