mirror of
https://github.com/DNSCrypt/doh-server.git
synced 2025-04-04 13:37:39 +03:00
Compare commits
312 commits
Author | SHA1 | Date | |
---|---|---|---|
|
f0242354d3 | ||
|
25fa6946e6 | ||
|
2254632d33 | ||
|
672d1a11f1 | ||
|
9e4a931bce | ||
|
40b0b02972 | ||
|
bf443c33b9 | ||
|
1a0a0566c4 | ||
|
890a74276f | ||
|
34f614e938 | ||
|
d6635eebb7 | ||
|
c79501aea3 | ||
|
e73964fa1d | ||
|
bafbdc0926 | ||
|
30a55a0f2f | ||
|
7bb8293c28 | ||
|
a6517472d5 | ||
|
3511672d49 | ||
|
bd85572368 | ||
|
02b3a67a00 | ||
|
66c66c7a28 | ||
|
1165fab90c | ||
|
c92308ccbb | ||
|
78c47830ff | ||
|
9e2853da86 | ||
|
e5f6f2a5d6 | ||
|
e8df0458ac | ||
|
19040f1e88 | ||
|
6f9f63e754 | ||
|
678bd04bed | ||
|
ffa0828515 | ||
|
6580f6ffb5 | ||
|
f64770bdd7 | ||
|
18297228c7 | ||
|
908e7d64db | ||
|
c54b3303fc | ||
|
1c5c83803a | ||
|
1386b7d13a | ||
|
920d31b502 | ||
|
651224d900 | ||
|
b5d525abcd | ||
|
11d8f4cb31 | ||
|
47330ebcad | ||
|
d5fd8231ff | ||
|
8cba04338e | ||
|
85280f4525 | ||
|
1c28a28b78 | ||
|
fbf82068d1 | ||
|
c9e084b2b4 | ||
|
37dc663b6e | ||
|
b81cc3e5d2 | ||
|
3f1bbcd8dc | ||
|
e92fddb165 | ||
|
d573a20c86 | ||
|
f5c07a205b | ||
|
d277c0a806 | ||
|
fc61c79a9f | ||
|
a92f4a77ae | ||
|
a373957045 | ||
|
6f5213838b | ||
|
eede3f4ab3 | ||
|
fdcc797fcb | ||
|
3e59f42558 | ||
|
a1fc5bbffc | ||
|
4b887d6705 | ||
|
6818fbe8a1 | ||
|
c82fb339ed | ||
|
06a3fa0499 | ||
|
8b9f9377b3 | ||
|
767b3e17b1 | ||
|
a60ced8782 | ||
|
25d1261730 | ||
|
ff62b6a24b | ||
|
fd65582aa6 | ||
|
d12b9deb35 | ||
|
965bca7fde | ||
|
5b11bc520e | ||
|
ab4c27ef86 | ||
|
db9c8634e3 | ||
|
533c29ec1e | ||
|
e27ab7dee9 | ||
|
511b0b4388 | ||
|
74939bdc6c | ||
|
054beb390c | ||
|
16ab626cc2 | ||
|
115938f90f | ||
|
c6c9d64681 | ||
|
d586c50019 | ||
|
46be8b9662 | ||
|
e6fe51647d | ||
|
379a7abc7e | ||
|
5770f9da33 | ||
|
b77f10cd9d | ||
|
63eac2a622 | ||
|
a727c4b9fa | ||
|
2918061786 | ||
|
7657d5a2b2 | ||
|
f9d2a0fc94 | ||
|
4f1e0f2abe | ||
|
a988eb42a2 | ||
|
a19c523cf2 | ||
|
b637bb1ec9 | ||
|
f4a1dee971 | ||
|
f4cc9bb0f9 | ||
|
485afd5976 | ||
|
0f268055b7 | ||
|
324bbcde60 | ||
|
474701ec1e | ||
|
3b77ff2e34 | ||
|
ece8a445cb | ||
|
eebd6b8356 | ||
|
fd1081e0b0 | ||
|
5c369fc610 | ||
|
3bc0d22f69 | ||
|
a746e2822a | ||
|
9be0d1ed74 | ||
|
62744d5390 | ||
|
21fc7441b3 | ||
|
6edccca03e | ||
|
90c30c8905 | ||
|
1389c82872 | ||
|
9445e95014 | ||
|
338d6436c0 | ||
|
4e54008b10 | ||
|
822d3d9a51 | ||
|
4cb88417ba | ||
|
e34f60e2eb | ||
|
25a9c285db | ||
|
05a60818ce | ||
|
42211d4f5e | ||
|
0403de66f1 | ||
|
00cc43e2bb | ||
|
63d672895f | ||
|
4de5310430 | ||
|
4d685d8948 | ||
|
6f40f792e3 | ||
|
eb8ea3dc84 | ||
|
ecacd6eca9 | ||
|
623328d37f | ||
|
0404b8f8a7 | ||
|
de0e8a39c3 | ||
|
30abc95e48 | ||
|
03581234b5 | ||
|
dbc5dc702f | ||
|
ba663ef4d9 | ||
|
518341df37 | ||
|
39124df9fc | ||
|
b4d4eaae50 | ||
|
a2f342379e | ||
|
0a99d0d212 | ||
|
4326f1afa7 | ||
|
c6c5c71458 | ||
|
2179ceae67 | ||
|
9f092224cd | ||
|
c3f724118c | ||
|
dbe14da43e | ||
|
d7fa144671 | ||
|
2e95a50f9f | ||
|
b281555860 | ||
|
741d28557d | ||
|
c176eeff5f | ||
|
226d8fe52a | ||
|
b544ca3daa | ||
|
3ee8477ffa | ||
|
548adf7810 | ||
|
1174d2c5b7 | ||
|
2b706345a4 | ||
|
62226b12e4 | ||
|
5fabdbf2d1 | ||
|
cf8ba631e6 | ||
|
d535650ed4 | ||
|
bf589911de | ||
|
64cd83a440 | ||
|
b8c8dacb5d | ||
|
1a0acbea44 | ||
|
1812880562 | ||
|
5058aeaf07 | ||
|
61b5c694c6 | ||
|
e204f18609 | ||
|
cf246f929c | ||
|
8e4d66f68a | ||
|
8d72413eaf | ||
|
f2215aa52f | ||
|
87954b5012 | ||
|
868c41b9b8 | ||
|
657005b427 | ||
|
88eb1114eb | ||
|
4bdad2e075 | ||
|
73e2a5e345 | ||
|
13229624a7 | ||
|
cb8c324def | ||
|
75b54ab57b | ||
|
16cb57c1e1 | ||
|
4914572894 | ||
|
a91241afed | ||
|
072a6c0672 | ||
|
76c28cf48e | ||
|
071af94eba | ||
|
ccca660359 | ||
|
65607f6dc7 | ||
|
a9397bc33e | ||
|
aaf0d50726 | ||
|
a08dfba08a | ||
|
04375a035d | ||
|
9e5db2a218 | ||
|
595f809432 | ||
|
07088fd0a7 | ||
|
c75ebff959 | ||
|
4f2846966e | ||
|
c27e2059bb | ||
|
58b64ce077 | ||
|
c66c13568a | ||
|
06b91af009 | ||
|
bf42e95368 | ||
|
31953b2605 | ||
|
4e4ddcf8e5 | ||
|
a4938aa962 | ||
|
f2e5f13e85 | ||
|
02ce4c9e9b | ||
|
f7770951da | ||
|
ad36120e02 | ||
|
0d55bf73c6 | ||
|
1b850b2f41 | ||
|
1cb4a11a7b | ||
|
49c5ebd9fb | ||
|
c52fa0b8d3 | ||
|
a44974d73c | ||
|
740e23dd93 | ||
|
7102a173fb | ||
|
57d4c6de9f | ||
|
cb31469004 | ||
|
3b3bb6f0e7 | ||
|
0301b25d1c | ||
|
91ba886ee5 | ||
|
d3b92d782d | ||
|
37d40e88a3 | ||
|
7bd2bf2131 | ||
|
51adc024ee | ||
|
eb2e2afb75 | ||
|
a3bb77fa61 | ||
|
8c96ecdc10 | ||
|
c65ce1210d | ||
|
10a99040e1 | ||
|
46cdc9168d | ||
|
abaa8e4da0 | ||
|
48ddb237b4 | ||
|
d12defaf2c | ||
|
98d1a8c8d7 | ||
|
524168795f | ||
|
8fbcdfc2a7 | ||
|
7ff594de3b | ||
|
cc00e62f82 | ||
|
224609ea9b | ||
|
217bb90320 | ||
|
de99e6a8b2 | ||
|
dbef23142a | ||
|
30f2a7e86a | ||
|
d02fa3d393 | ||
|
7612e9a57f | ||
|
764d7c3a64 | ||
|
ed47ab8a1b | ||
|
1706ec0dcb | ||
|
badcb6104d | ||
|
58a0ef3347 | ||
|
dc7480c3f9 | ||
|
d691562f60 | ||
|
ed9c50777a | ||
|
b723245bec | ||
|
4b7956bc78 | ||
|
a65e1883ee | ||
|
08675c6243 | ||
|
4cdb23d9fa | ||
|
0fc3a23d04 | ||
|
587c725ca5 | ||
|
4e4f9aeea8 | ||
|
b3587f9694 | ||
|
d17e6d767f | ||
|
5a07001129 | ||
|
d22331d106 | ||
|
8ad7edbf0e | ||
|
2f85b58f33 | ||
|
4ae4c5d974 | ||
|
9ecdb3dcd7 | ||
|
37c0024e10 | ||
|
a8cd28fff8 | ||
|
98a0c59047 | ||
|
2723bd3ac6 | ||
|
9dcbc328d4 | ||
|
737c5b4d24 | ||
|
34c043288a | ||
|
82587db5f6 | ||
|
81518fe385 | ||
|
2d03cf4b55 | ||
|
93d90acd32 | ||
|
61d1e7cb0e | ||
|
22b51a4c20 | ||
|
a31c9d225a | ||
|
12ce22060c | ||
|
b480b85e84 | ||
|
d14dd35ab0 | ||
|
82630f4a31 | ||
|
bc925cc2d5 | ||
|
eddb36b541 | ||
|
a3ee3f3198 | ||
|
1e2f123a35 | ||
|
d042aa0f5a | ||
|
eebca570ed | ||
|
b66fde914f | ||
|
c45da71704 | ||
|
dff63386f7 | ||
|
4ca54eb71b | ||
|
5571b6c405 |
27 changed files with 2186 additions and 430 deletions
1
.github/FUNDING.yml
vendored
Normal file
1
.github/FUNDING.yml
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
open_collective: dnscrypt
|
8
.github/dependabot.yml
vendored
Normal file
8
.github/dependabot.yml
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: cargo
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "04:00"
|
||||
open-pull-requests-limit: 10
|
17
.github/workflows/issues.yml
vendored
Normal file
17
.github/workflows/issues.yml
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
name: Close inactive issues
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 1 * * *"
|
||||
|
||||
jobs:
|
||||
close-issues:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
|
||||
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
164
.github/workflows/release.yml
vendored
Normal file
164
.github/workflows/release.yml
vendored
Normal file
|
@ -0,0 +1,164 @@
|
|||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "*"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Get the version
|
||||
id: get_version
|
||||
run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: mlugg/setup-zig@v1
|
||||
with:
|
||||
version: 0.10.1
|
||||
|
||||
- uses: hecrj/setup-rust-action@master
|
||||
with:
|
||||
rust-version: stable
|
||||
|
||||
- name: Check Cargo availability
|
||||
run: cargo --version
|
||||
|
||||
- name: Check Rustup default toolchain
|
||||
run: rustup default | grep stable
|
||||
|
||||
- name: Install cargo-deb
|
||||
run: cargo install cargo-deb
|
||||
|
||||
- name: Install cargo-generate-rpm
|
||||
run: cargo install cargo-generate-rpm
|
||||
|
||||
- name: Install cargo-zigbuild
|
||||
run: cargo install cargo-zigbuild
|
||||
|
||||
- name: Release build Linux-x86-64
|
||||
run: |
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
env RUSTFLAGS="-C strip=symbols" cargo zigbuild --release --target x86_64-unknown-linux-musl
|
||||
mkdir doh-proxy
|
||||
mv target/x86_64-unknown-linux-musl/release/doh-proxy doh-proxy/
|
||||
cp README.md localhost.pem doh-proxy/
|
||||
tar cjpf doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2 doh-proxy
|
||||
rm -fr doh-proxy
|
||||
|
||||
- name: Release build Linux-aarch64
|
||||
run: |
|
||||
rustup target add aarch64-unknown-linux-musl
|
||||
env RUSTFLAGS="-C strip=symbols" cargo zigbuild --release --target aarch64-unknown-linux-musl
|
||||
mkdir doh-proxy
|
||||
mv target/aarch64-unknown-linux-musl/release/doh-proxy doh-proxy/
|
||||
cp README.md localhost.pem doh-proxy/
|
||||
tar cjpf doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-aarch64.tar.bz2 doh-proxy
|
||||
rm -fr doh-proxy
|
||||
|
||||
- name: Release build Windows-x86_64
|
||||
run: |
|
||||
rustup target add x86_64-pc-windows-gnu
|
||||
env RUSTFLAGS="-C strip=symbols" cargo zigbuild --release --target x86_64-pc-windows-gnu
|
||||
mkdir doh-proxy
|
||||
mv target/x86_64-pc-windows-gnu/release/doh-proxy.exe doh-proxy/
|
||||
cp README.md localhost.pem doh-proxy/
|
||||
zip -9 -r doh-proxy_${{ steps.get_version.outputs.VERSION }}_windows-x86_64.zip doh-proxy
|
||||
rm -fr doh-proxy
|
||||
|
||||
- name: Debian packages
|
||||
run: |
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
env RUSTFLAGS="-C strip=symbols" cargo deb --no-strip --cargo-build=zigbuild --target=x86_64-unknown-linux-musl
|
||||
rustup target add aarch64-unknown-linux-musl
|
||||
env RUSTFLAGS="-C strip=symbols" cargo deb --no-strip --cargo-build=zigbuild --target=aarch64-unknown-linux-musl
|
||||
|
||||
- name: RPM packages
|
||||
run: |
|
||||
rustup target add x86_64-unknown-linux-gnu
|
||||
env RUSTFLAGS="-C strip=symbols" cargo-zigbuild build --target=x86_64-unknown-linux-gnu.2.17 --release
|
||||
mv target/x86_64-unknown-linux-musl/release/doh-proxy target/release/
|
||||
cargo generate-rpm --target x86_64-unknown-linux-gnu
|
||||
rustup target add aarch64-unknown-linux-gnu
|
||||
env RUSTFLAGS="-C strip=symbols" cargo-zigbuild build --target=aarch64-unknown-linux-gnu.2.17 --release
|
||||
cargo generate-rpm --target aarch64-unknown-linux-gnu
|
||||
|
||||
- name: Create release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: Release ${{ github.ref }}
|
||||
draft: true
|
||||
prerelease: false
|
||||
|
||||
- name: Upload Debian package for x86_64
|
||||
id: upload-release-asset-debian-x86_64
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}-1_amd64.deb"
|
||||
asset_path: "target/x86_64-unknown-linux-musl/debian/doh-proxy_${{ steps.get_version.outputs.VERSION }}-1_amd64.deb"
|
||||
asset_content_type: application/x-debian-package
|
||||
|
||||
- name: Upload RPM package for x86_64
|
||||
id: upload-release-asset-rpm-x86_64
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_name: "doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.x86_64.rpm"
|
||||
asset_path: "target/x86_64-unknown-linux-gnu/generate-rpm/doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.x86_64.rpm"
|
||||
asset_content_type: application/x-redhat-package-manager
|
||||
|
||||
- name: Upload RPM package for aarch64
|
||||
id: upload-release-asset-rpm-aarch64
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_name: "doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.aarch64.rpm"
|
||||
asset_path: "target/aarch64-unknown-linux-gnu/generate-rpm/doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.aarch64.rpm"
|
||||
asset_content_type: application/x-redhat-package-manager
|
||||
|
||||
- name: Upload tarball for linux-x86_64
|
||||
id: upload-release-asset-tarball-linux-x86_64
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2"
|
||||
asset_path: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2"
|
||||
asset_content_type: application/x-tar
|
||||
|
||||
- name: Upload tarball for linux-aarch64
|
||||
id: upload-release-asset-tarball-linux-aarch64
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-aarch64.tar.bz2"
|
||||
asset_path: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-aarch64.tar.bz2"
|
||||
asset_content_type: application/x-tar
|
||||
|
||||
- name: Upload tarball for windows-x86_64
|
||||
id: upload-release-asset-tarball-windows-x86_64
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_windows-x86_64.zip"
|
||||
asset_path: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_windows-x86_64.zip"
|
||||
asset_content_type: application/zip
|
17
.github/workflows/shiftleft-analysis.yml
vendored
Normal file
17
.github/workflows/shiftleft-analysis.yml
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
name: ShiftLeft Scan
|
||||
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
Scan-Build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
- name: Perform ShiftLeft Scan
|
||||
uses: ShiftLeftSecurity/scan-action@master
|
||||
env:
|
||||
WORKSPACE: ""
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
output: reports
|
23
.github/workflows/test.yml
vendored
Normal file
23
.github/workflows/test.yml
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
name: Rust
|
||||
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: hecrj/setup-rust-action@master
|
||||
with:
|
||||
rust-version: stable
|
||||
- name: Check Cargo availability
|
||||
run: cargo --version
|
||||
- name: Check Rustup default toolchain
|
||||
run: rustup default | grep stable
|
||||
- name: Build without default features
|
||||
run: |
|
||||
env RUSTFLAGS="-C link-arg=-s" cargo check --no-default-features
|
||||
- name: Build with default features
|
||||
run: |
|
||||
env RUSTFLAGS="-C link-arg=-s" cargo check
|
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -1,5 +1,7 @@
|
|||
#*#
|
||||
**/*.rs.bk
|
||||
*~
|
||||
/target/
|
||||
Cargo.lock
|
||||
/target/
|
||||
/src/libdoh/target/
|
||||
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
language: rust
|
||||
rust:
|
||||
- nightly
|
||||
- stable
|
58
Cargo.toml
58
Cargo.toml
|
@ -1,20 +1,54 @@
|
|||
[package]
|
||||
name = "doh-proxy"
|
||||
version = "0.1.7"
|
||||
version = "0.9.11"
|
||||
authors = ["Frank Denis <github@pureftpd.org>"]
|
||||
description = "A DNS-over-HTTPS (DoH) proxy"
|
||||
keywords = ["dns","https","doh","proxy"]
|
||||
description = "A DNS-over-HTTPS (DoH) and ODoH (Oblivious DoH) proxy"
|
||||
keywords = ["dns", "https", "doh", "odoh", "proxy"]
|
||||
license = "MIT"
|
||||
homepage = "https://github.com/jedisct1/rust-doh"
|
||||
repository = "https://github.com/jedisct1/rust-doh"
|
||||
categories = ["asynchronous", "network-programming","command-line-utilities"]
|
||||
categories = ["asynchronous", "network-programming", "command-line-utilities"]
|
||||
edition = "2018"
|
||||
readme = "README.md"
|
||||
|
||||
[features]
|
||||
default = ["tls"]
|
||||
tls = ["libdoh/tls"]
|
||||
|
||||
[dependencies]
|
||||
base64 = "~0.9"
|
||||
clap = "~2"
|
||||
futures = "~0.1"
|
||||
hyper = "~0.11"
|
||||
tokio = "~0.1"
|
||||
tokio-io = "~0.1"
|
||||
tokio-timer = "~0.1"
|
||||
clippy = {version = ">=0", optional = true}
|
||||
libdoh = { path = "src/libdoh", version = "0.9.9", default-features = false }
|
||||
clap = { version = "4", features = ["std", "cargo", "wrap_help", "string"] }
|
||||
dnsstamps = "0.1.10"
|
||||
mimalloc = { version = "0.1.44", default-features = false }
|
||||
|
||||
[package.metadata.generate-rpm]
|
||||
assets = [
|
||||
{ source = "target/release/doh-proxy", dest = "/usr/bin/doh-proxy", mode = "755" },
|
||||
{ source = "README.md", dest = "/usr/share/doc/doh-proxy/README.md", mode = "644", doc = true },
|
||||
]
|
||||
|
||||
[package.metadata.deb]
|
||||
extended-description = """\
|
||||
A fast and secure DoH (DNS-over-HTTPS) and ODoH server written in Rust."""
|
||||
assets = [
|
||||
[
|
||||
"target/release/doh-proxy",
|
||||
"usr/bin/",
|
||||
"755",
|
||||
],
|
||||
[
|
||||
"README.md",
|
||||
"usr/share/doc/doh-proxy/README.md",
|
||||
"644",
|
||||
],
|
||||
]
|
||||
section = "network"
|
||||
depends = "$auto"
|
||||
priority = "optional"
|
||||
|
||||
[profile.release]
|
||||
codegen-units = 1
|
||||
incremental = false
|
||||
lto = "fat"
|
||||
opt-level = 3
|
||||
panic = "abort"
|
||||
|
|
2
LICENSE
2
LICENSE
|
@ -1,6 +1,6 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2018 Frank Denis
|
||||
Copyright (c) 2018-2025 Frank Denis
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
205
README.md
205
README.md
|
@ -1,44 +1,213 @@
|
|||
[](https://deps.rs/repo/github/jedisct1/rust-doh)
|
||||
# 
|
||||
|
||||
# doh-proxy
|
||||
A fast and secure DoH (DNS-over-HTTPS) and ODoH (Oblivious DoH) server.
|
||||
|
||||
A DNS-over-HTTP server proxy in Rust. Add a webserver and you get DNS-over-HTTPS, which is actually DNS-over-HTTP/2.
|
||||
`doh-proxy` is written in Rust, and has been battle-tested in production since February 2018. It doesn't do DNS resolution on its own, but can sit in front of any DNS resolver in order to augment it with DoH support.
|
||||
|
||||
## Installation
|
||||
|
||||
### Option 1: precompiled binaries for Linux
|
||||
|
||||
Precompiled tarballs and Debian packages for Linux/x86_64 [can be downloaded here](https://github.com/jedisct1/doh-server/releases/latest).
|
||||
|
||||
### Option 2: from source code
|
||||
|
||||
This requires the [`rust`](https://rustup.rs) compiler to be installed.
|
||||
|
||||
* With built-in support for HTTPS (default):
|
||||
|
||||
```sh
|
||||
cargo install doh-proxy
|
||||
```
|
||||
|
||||
* Without built-in support for HTTPS:
|
||||
|
||||
```sh
|
||||
cargo install doh-proxy --no-default-features
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```text
|
||||
doh-proxy
|
||||
A DNS-over-HTTP server proxy
|
||||
|
||||
USAGE:
|
||||
doh-proxy [OPTIONS]
|
||||
doh-proxy [FLAGS] [OPTIONS]
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
-V, --version Prints version information
|
||||
-O, --allow-odoh-post Allow POST queries over ODoH even if they have been disabed for DoH
|
||||
-K, --disable-keepalive Disable keepalive
|
||||
-P, --disable-post Disable POST queries
|
||||
-h, --help Prints help information
|
||||
-V, --version Prints version information
|
||||
|
||||
OPTIONS:
|
||||
-l, --listen_address <listen_address> Address to listen to [default: 127.0.0.1:3000]
|
||||
-b, --local_bind_address <local_bind_address> Address to connect from [default: 0.0.0.0:0]
|
||||
-c, --max_clients <max_clients> Maximum number of simultaneous clients [default: 512]
|
||||
-E, --err-ttl <err_ttl> TTL for errors, in seconds [default: 2]
|
||||
-H, --hostname <hostname> Host name (not IP address) DoH clients will use to connect
|
||||
-l, --listen-address <listen_address> Address to listen to [default: 127.0.0.1:3000]
|
||||
-b, --local-bind-address <local_bind_address> Address to connect from
|
||||
-c, --max-clients <max_clients> Maximum number of simultaneous clients [default: 512]
|
||||
-C, --max-concurrent <max_concurrent> Maximum number of concurrent requests per client [default: 16]
|
||||
-X, --max-ttl <max_ttl> Maximum TTL, in seconds [default: 604800]
|
||||
-T, --min-ttl <min_ttl> Minimum TTL, in seconds [default: 10]
|
||||
-p, --path <path> URI path [default: /dns-query]
|
||||
-u, --server_address <server_address> Address to connect to [default: 9.9.9.9:53]
|
||||
-g, --public-address <public_address> External IP address DoH clients will connect to
|
||||
-j, --public-port <public_port> External port DoH clients will connect to, if not 443
|
||||
-u, --server-address <server_address> Address to connect to [default: 9.9.9.9:53]
|
||||
-t, --timeout <timeout> Timeout, in seconds [default: 10]
|
||||
-I, --tls-cert-key-path <tls_cert_key_path>
|
||||
Path to the PEM-encoded secret keys (only required for built-in TLS)
|
||||
|
||||
-i, --tls-cert-path <tls_cert_path>
|
||||
Path to the PEM/PKCS#8-encoded certificates (only required for built-in TLS)
|
||||
```
|
||||
|
||||
Serves HTTP requests only. DoH is mostly useful to leverage an existing webserver, so just configure your webserver or CDN to proxy connections to this.
|
||||
Example command-line:
|
||||
|
||||
```sh
|
||||
doh-proxy -H 'doh.example.com' -u 127.0.0.1:53 -g 233.252.0.5
|
||||
```
|
||||
|
||||
Here, `doh.example.com` is the host name (which should match a name included in the TLS certificate), `127.0.0.1:53` is the address of the DNS resolver, and `233.252.0.5` is the public IP address of the DoH server.
|
||||
|
||||
## HTTP/2 and HTTP/3 termination
|
||||
|
||||
The recommended way to use `doh-proxy` is to use a TLS termination proxy (such as [hitch](https://github.com/varnish/hitch) or [relayd](https://man.openbsd.org/relayd.8)), a CDN or a web server with proxying abilities as a front-end.
|
||||
|
||||
That way, the DoH service can be exposed as a virtual host, sharing the same IP addresses as existing websites.
|
||||
|
||||
If `doh-proxy` and the HTTP/2 (/ HTTP/3) front-end run on the same host, using the HTTP protocol to communicate between both is fine.
|
||||
|
||||
If both are on distinct networks, such as when using a CDN, `doh-proxy` can handle HTTPS requests, provided that it was compiled with the `tls` feature.
|
||||
|
||||
The certificates and private keys must be encoded in PEM/PKCS#8 format. They can be stored in the same file.
|
||||
|
||||
If you are using ECDSA certificates and ECDSA private keys start with `-----BEGIN EC PRIVATE KEY-----` and not `-----BEGIN PRIVATE KEY-----`, convert them to PKCS#8 with (in this example, `example.key` is the original file):
|
||||
|
||||
```sh
|
||||
openssl pkcs8 -topk8 -nocrypt -in example.key -out example.pkcs8.pem
|
||||
```
|
||||
|
||||
In order to enable built-in HTTPS support, add the `--tls-cert-path` option to specify the location of the certificates file, as well as the private keys file using `--tls-cert-key-path`.
|
||||
|
||||
Once HTTPS is enabled, HTTP connections will not be accepted.
|
||||
|
||||
A sample self-signed certificate [`localhost.pem`](https://github.com/jedisct1/doh-server/raw/master/localhost.pem) can be used for testing.
|
||||
The file also includes the private key.
|
||||
|
||||
[`acme.sh`](https://github.com/acmesh-official/acme.sh) can be used to create and update TLS certificates using Let's Encrypt and other ACME-compliant providers. If you are using it to create ECDSA keys, see above for converting the secret key into PKCS#8.
|
||||
|
||||
The certificates path must be set to the full certificates chain (`fullchain.cer`) and the key path to the secret keys (the `.key` file):
|
||||
|
||||
```sh
|
||||
doh-proxy -i /path/to/fullchain.cer -I /path/to/domain.key ...
|
||||
```
|
||||
|
||||
Once started, `doh-proxy` automatically reloads the certificates as they change; there is no need to restart the server.
|
||||
|
||||
If clients are getting the `x509: certificate signed by unknown authority` error, double check that the certificate file is the full chain, not the other `.cer` file.
|
||||
|
||||
## Accepting both DNSCrypt and DoH connections on port 443
|
||||
|
||||
DNSCrypt is an alternative encrypted DNS protocol that is faster and more lightweight than DoH.
|
||||
|
||||
Both DNSCrypt and DoH connections can be accepted on the same TCP port using [Encrypted DNS Server](https://github.com/jedisct1/encrypted-dns-server).
|
||||
|
||||
Encrypted DNS Server forwards DoH queries to Nginx or `doh-proxy` when a TLS connection is detected, or directly responds to DNSCrypt queries.
|
||||
|
||||
It also provides DNS caching, server-side filtering, metrics, and TCP connection reuse in order to mitigate exhaustion attacks.
|
||||
|
||||
Unless the front-end is a CDN, an ideal setup is to use `doh-proxy` behind `Encrypted DNS Server`.
|
||||
|
||||
## Oblivious DoH (ODoH)
|
||||
|
||||
Oblivious DoH is similar to Anonymized DNSCrypt, but for DoH. It requires relays, but also upstream DoH servers that support the protocol.
|
||||
|
||||
This proxy supports ODoH termination (not relaying) out of the box.
|
||||
|
||||
However, ephemeral keys are currently only stored in memory. In a load-balanced configuration, sticky sessions must be used.
|
||||
|
||||
Currently available ODoH relays only use `POST` queries.
|
||||
So, `POST` queries have been disabled for regular DoH queries, accepting them is required to be compatible with ODoH relays.
|
||||
|
||||
This can be achieved with the `--allow-odoh-post` command-line switch.
|
||||
|
||||
## Operational recommendations
|
||||
|
||||
* DoH can be easily detected and blocked using SNI inspection. As a mitigation, DoH endpoints should preferably share the same virtual host as existing, popular websites, rather than being on dedicated virtual hosts.
|
||||
* When using DoH, DNS stamps should include a resolver IP address in order to remove a dependency on non-encrypted, non-authenticated, easy-to-block resolvers.
|
||||
* Unlike DNSCrypt where users must explicitly trust a DNS server's public key, the security of DoH relies on traditional public Certificate Authorities. Additional root certificates (required by governments, security software, enterprise gateways) installed on a client immediately make DoH vulnerable to MITM. In order to prevent this, DNS stamps should include the hash of the parent certificate.
|
||||
* TLS certificates are tied to host names. But domains expire, get reassigned and switch hands all the time. If a domain originally used for a DoH service gets a new, possibly malicious owner, clients still configured to use the service will blindly keep trusting it if the CA is the same. As a mitigation, the CA should sign an intermediate certificate (the only one present in the stamp), itself used to sign the name used by the DoH server. While commercial CAs offer this, Let's Encrypt currently doesn't.
|
||||
* Make sure that the front-end supports at least HTTP/2 and TLS 1.3.
|
||||
* Internal DoH servers still require TLS certificates. So, if you are planning to deploy an internal server, you need to set up an internal CA, or add self-signed certificates to every single client.
|
||||
|
||||
## Example usage with `encrypted-dns-server`
|
||||
|
||||
Add the following section to the configuration file:
|
||||
|
||||
```toml
|
||||
[tls]
|
||||
upstream_addr = "127.0.0.1:3000"
|
||||
```
|
||||
|
||||
## Example usage with `nginx`
|
||||
|
||||
In an existing `server`, a `/dns-query` endpoint can be exposed that way:
|
||||
|
||||
```text
|
||||
location /dns-query {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
}
|
||||
```
|
||||
|
||||
This example assumes that the DoH proxy is listening locally to port `3000`.
|
||||
|
||||
HTTP caching can be added (see the `proxy_cache_path` and `proxy_cache` directives in the Nginx documentation), but be aware that a DoH server will quickly create a gigantic amount of files.
|
||||
|
||||
## DNS Stamp and certificate hashes
|
||||
|
||||
Use the online [DNS stamp calculator](https://dnscrypt.info/stamps/) to compute the stamp for your server.
|
||||
|
||||
Add it to the `[static]` section of [`dnscrypt-proxy`](https://github.com/DNSCrypt/dnscrypt-proxy) and check that everything works as expected.
|
||||
|
||||
Then, start `dnscrypt-proxy` with the `-show-certs` command-line flag to print the hashes for your certificate chain.
|
||||
|
||||
Here is an example output:
|
||||
|
||||
```text
|
||||
[NOTICE] Advertised cert: [CN=dohtrial.att.net,O=AT&T Services\, Inc.,L=Dallas,ST=Texas,C=US] [f679e8451940f06141854dc94e1eb79fa5e04463c15b88f3b392da793c16c353]
|
||||
[NOTICE] Advertised cert: [CN=DigiCert Global CA G2,O=DigiCert Inc,C=US] [f61e576877da9650294cccb5f96c75fcb71bda1bbc4646367c4ebeda89d7318f]
|
||||
```
|
||||
|
||||
The first printed certificate is the certificate of the server itself. The next line is the one that signed that certificate. As you keep going down, you are getting closer to the certificate authority.
|
||||
|
||||
Unless you are using intermediate certificates, your safest option is probably to include the last printed hash certificate in your DNS stamp.
|
||||
|
||||
Go back to the online DNS stamp calculator, and copy&paste the hash (in this example: `f61e576877da9650294cccb5f96c75fcb71bda1bbc4646367c4ebeda89d7318f`).
|
||||
|
||||
If you are using Let's Encrypt, the last line is likely to be:
|
||||
|
||||
```text
|
||||
Advertised cert: [CN=Let's Encrypt Authority R3,O=Let's Encrypt,C=US] [444ebd67bb83f8807b3921e938ac9178b882bd50aadb11231f044cf5f08df7ce]
|
||||
```
|
||||
|
||||
There you have it. Your certificate hash is `444ebd67bb83f8807b3921e938ac9178b882bd50aadb11231f044cf5f08df7ce`.
|
||||
|
||||
This [Go code snippet](https://gist.github.com/d6cb41742a1ceb54d48cc286f3d5c5fa) can also compute the hash of certificates given a `.der` file.
|
||||
|
||||
### Common certificate hashes
|
||||
|
||||
* Let's Encrypt E1:
|
||||
* `cc1060d39c8329b62b6fbc7d0d6df9309869b981e7e6392d5cd8fa408f4d80e6`
|
||||
* Let's Encrypt R3:
|
||||
* `444ebd67bb83f8807b3921e938ac9178b882bd50aadb11231f044cf5f08df7ce`
|
||||
* Let's Encrypt R10:
|
||||
* `e644ba6963e335fe765cb9976b12b10eb54294b42477764ccb3a3acca3acb2fc`
|
||||
* ZeroSSL:
|
||||
* `9a3a34f727deb9bca51003d9ce9c39f8f27dd9c5242901c2bab1a44e635a0219`
|
||||
|
||||
## Clients
|
||||
|
||||
`doh-proxy` can be used with [dnscrypt-proxy](https://github.com/jedisct1/dnscrypt-proxy)
|
||||
as a client.
|
||||
`doh-proxy` can be used with [dnscrypt-proxy](https://github.com/DNSCrypt/dnscrypt-proxy) as a client.
|
||||
|
||||
`doh-proxy` is currently being used by the `doh.crypto.sx` public DNS resolver.
|
||||
`doh-proxy` is used in production for the `doh.crypto.sx` public DNS resolver and many others.
|
||||
|
||||
Other public DoH servers can be found here: [public encrypted DNS servers](https://dnscrypt.info/public-servers).
|
||||
An extensive list of public DoH servers can be found here: [public encrypted DNS servers](https://github.com/DNSCrypt/dnscrypt-resolvers/blob/master/v3/public-resolvers.md).
|
||||
|
|
47
localhost.pem
Normal file
47
localhost.pem
Normal file
|
@ -0,0 +1,47 @@
|
|||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDb7g6EQhbfby97
|
||||
k4oMbZTzdi2TWFBs7qK/QwgOu+L6EhNHPO1ZEU29v0APFBFJO5zyyAk9bZ9k9tPB
|
||||
bCuVVI9jEUfLH3UCjEQPG6XI2w++uVh0yALvc/uurCvRHVlle/V7cAoikndc2SjE
|
||||
RQUALbACIqwD5g0F77BYwcsreB4GH253/R6Q2/CJZ4jNHPjkocOJiVr3ejA0kkoN
|
||||
MXpGUXWcrVVk20M2A1CeO7HAulLRcklEdoHE3v46pjp0iZK0F9LyZX1U1ql+4QL3
|
||||
iQttoZ4tMg83lFHSt4G9PrpIhzXr9W4NW822faSvrIwwN/JbItUmRa7n/3+MkuJQ
|
||||
IGGNDayXAgMBAAECggEBANs0fmGSocuXvYL1Pi4+9qxnCOwIpTi97Zam0BwnZwcL
|
||||
Bw4FCyiwV4UdX1LoFIailT9i49rHLYzre4oZL6OKgdQjQCSTuQOOHLPWQbpdpWba
|
||||
w/C5/jr+pkemMZIfJ6BAGiArPt7Qj4oKpFhj1qUj5H9sYXkNTcOx8Fm25rLv6TT9
|
||||
O7wg0oCpyG+iBSbCYBp9mDMz8pfo4P3BhcFiyKCKeiAC6KuHU81dvuKeFB4XQK+X
|
||||
no2NqDqe6MBkmTqjNNy+wi1COR7lu34LPiWU5Hq5PdIEqBBUMjlMI6oYlhlgNTdx
|
||||
SvsqFz3Xs6kpAhJTrSiAqscPYosgaMQxo+LI26PJnikCgYEA9n0OERkm0wSBHnHY
|
||||
Kx8jaxNYg93jEzVnEgI/MBTJZqEyCs9fF6Imv737VawEN/BhesZZX7bGZQfDo8AT
|
||||
aiSa5upkkSGXEqTu5ytyoKFTb+dJ/qmx3+zP6dPVzDnc8WPYMoUg7vvjZkXXJgZX
|
||||
+oMlMUW1wWiDNI3wP19W9Is6xssCgYEA5GqkUBEns6eTFJV0JKqbEORJJ7lx5NZe
|
||||
cIx+jPpLkILG4mOKOg1TBx0wkxa9cELtsNsM+bPtu9OqRMhsfPBmsXDHhJwg0Z6G
|
||||
eDTfYYPkpRhwZvl6jBZn9sLVR9wfg2hE+n0lfV3mceg336KOkwAehDU84SWZ2e0S
|
||||
esqkpbHJa+UCgYA7PY0O8POSzcdWkNf6bS5vAqRIdSCpMjGGc4HKRYSuJNnJHVPm
|
||||
czNK7Bcm3QPaiexzvI4oYd5G09niVjyUSx3rl7P56Y/MjFVau+d90agjAfyXtyMo
|
||||
BVtnAGGnBtUiMvP4GGT06xcZMnnmCqpEbBaZQ/7N8Bdwnxh5sqlMdtX2hwKBgAhL
|
||||
hyQRO2vezgyVUN50A6WdZLq4lVZGIq/bqkzcWhopZaebDc4F5doASV9OGBsXkyI1
|
||||
EkePLTcA/NH6pVX0NQaEnfpG4To7k46R/PrBm3ATbyGONdEYjzX65VvytoJDKx4d
|
||||
pVrkKhZA5KaOdLcJ7hHHDSrv/qJXZbBn44rQ5guxAoGBAJ6oeUsUUETakxlmIhmK
|
||||
xuQmWqLf97BKt8r6Z8CqHKWK7vpG2OmgFYCQGaR7angQ8hmAOv6jM56XhoagDBoc
|
||||
UoaoEyo9/uCk6NRUkUMj7Tk/5UQSiWLceVH27w+icMFhf1b7EmmNfk+APsiathO5
|
||||
j4edf1AinVCPwRVVu1dtLL5P
|
||||
-----END PRIVATE KEY-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDAjCCAeoCCQCptj0+TjjIJjANBgkqhkiG9w0BAQsFADBDMREwDwYDVQQKDAhE
|
||||
TlNDcnlwdDEaMBgGA1UECwwRTG9jYWwgdGVzdCBzZXJ2ZXIxEjAQBgNVBAMMCWxv
|
||||
Y2FsaG9zdDAeFw0xOTExMTgxNDA2MzBaFw0zMzA3MjcxNDA2MzBaMEMxETAPBgNV
|
||||
BAoMCEROU0NyeXB0MRowGAYDVQQLDBFMb2NhbCB0ZXN0IHNlcnZlcjESMBAGA1UE
|
||||
AwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2+4O
|
||||
hEIW328ve5OKDG2U83Ytk1hQbO6iv0MIDrvi+hITRzztWRFNvb9ADxQRSTuc8sgJ
|
||||
PW2fZPbTwWwrlVSPYxFHyx91AoxEDxulyNsPvrlYdMgC73P7rqwr0R1ZZXv1e3AK
|
||||
IpJ3XNkoxEUFAC2wAiKsA+YNBe+wWMHLK3geBh9ud/0ekNvwiWeIzRz45KHDiYla
|
||||
93owNJJKDTF6RlF1nK1VZNtDNgNQnjuxwLpS0XJJRHaBxN7+OqY6dImStBfS8mV9
|
||||
VNapfuEC94kLbaGeLTIPN5RR0reBvT66SIc16/VuDVvNtn2kr6yMMDfyWyLVJkWu
|
||||
5/9/jJLiUCBhjQ2slwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQA6Vz5HnGuy8jZz
|
||||
5i8ipbcDMCZNdpYYnxgD53hEKOfoSv7LaF0ztD8Kmg3s5LHv9EHlkK3+G6FWRGiP
|
||||
9f6IbtRITaiVQP3M13T78hpN5Qq5jgsqjR7ZcN7Etr6ZFd7G/0+mzqbyBuW/3szt
|
||||
RdX/YLy1csvjbZoNNuXGWRohXjg0Mjko2tRLmARvxA/gZV5zWycv3BD2BPzyCdS9
|
||||
MDMYSF0RPiL8+alfwLNqLcqMA5liHlmZa85uapQyoUI3ksKJkEgU53aD8cYhH9Yn
|
||||
6mVpsrvrcRLBiHlbi24QBolhFkCSRK8bXes8XDIPuD8iYRwlrVBwOakMFQWMqNfI
|
||||
IMOKJomU
|
||||
-----END CERTIFICATE-----
|
BIN
logo.png
Normal file
BIN
logo.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 58 KiB |
281
src/config.rs
Normal file
281
src/config.rs
Normal file
|
@ -0,0 +1,281 @@
|
|||
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
|
||||
#[cfg(feature = "tls")]
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::{Arg, ArgAction::SetTrue};
|
||||
use libdoh::*;
|
||||
|
||||
use crate::constants::*;
|
||||
|
||||
pub fn parse_opts(globals: &mut Globals) {
|
||||
use crate::utils::{verify_remote_server, verify_sock_addr};
|
||||
|
||||
let max_clients = MAX_CLIENTS.to_string();
|
||||
let timeout_sec = TIMEOUT_SEC.to_string();
|
||||
let max_concurrent_streams = MAX_CONCURRENT_STREAMS.to_string();
|
||||
let min_ttl = MIN_TTL.to_string();
|
||||
let max_ttl = MAX_TTL.to_string();
|
||||
let err_ttl = ERR_TTL.to_string();
|
||||
|
||||
let _ = include_str!("../Cargo.toml");
|
||||
let options = command!()
|
||||
.arg(
|
||||
Arg::new("hostname")
|
||||
.short('H')
|
||||
.long("hostname")
|
||||
.num_args(1)
|
||||
.help("Host name (not IP address) DoH clients will use to connect"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("public_address")
|
||||
.short('g')
|
||||
.long("public-address")
|
||||
.num_args(1)
|
||||
.help("External IP address DoH clients will connect to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("public_port")
|
||||
.short('j')
|
||||
.long("public-port")
|
||||
.num_args(1)
|
||||
.help("External port DoH clients will connect to, if not 443"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("listen_address")
|
||||
.short('l')
|
||||
.long("listen-address")
|
||||
.num_args(1)
|
||||
.default_value(LISTEN_ADDRESS)
|
||||
.value_parser(verify_sock_addr)
|
||||
.help("Address to listen to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("server_address")
|
||||
.short('u')
|
||||
.long("server-address")
|
||||
.num_args(1)
|
||||
.default_value(SERVER_ADDRESS)
|
||||
.value_parser(verify_remote_server)
|
||||
.help("Address to connect to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("local_bind_address")
|
||||
.short('b')
|
||||
.long("local-bind-address")
|
||||
.num_args(1)
|
||||
.value_parser(verify_sock_addr)
|
||||
.help("Address to connect from"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("path")
|
||||
.short('p')
|
||||
.long("path")
|
||||
.num_args(1)
|
||||
.default_value(PATH)
|
||||
.help("URI path"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("max_clients")
|
||||
.short('c')
|
||||
.long("max-clients")
|
||||
.num_args(1)
|
||||
.default_value(max_clients)
|
||||
.help("Maximum number of simultaneous clients"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("max_concurrent")
|
||||
.short('C')
|
||||
.long("max-concurrent")
|
||||
.num_args(1)
|
||||
.default_value(max_concurrent_streams)
|
||||
.help("Maximum number of concurrent requests per client"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("timeout")
|
||||
.short('t')
|
||||
.long("timeout")
|
||||
.num_args(1)
|
||||
.default_value(timeout_sec)
|
||||
.help("Timeout, in seconds"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("min_ttl")
|
||||
.short('T')
|
||||
.long("min-ttl")
|
||||
.num_args(1)
|
||||
.default_value(min_ttl)
|
||||
.help("Minimum TTL, in seconds"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("max_ttl")
|
||||
.short('X')
|
||||
.long("max-ttl")
|
||||
.num_args(1)
|
||||
.default_value(max_ttl)
|
||||
.help("Maximum TTL, in seconds"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("err_ttl")
|
||||
.short('E')
|
||||
.long("err-ttl")
|
||||
.num_args(1)
|
||||
.default_value(err_ttl)
|
||||
.help("TTL for errors, in seconds"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("disable_keepalive")
|
||||
.short('K')
|
||||
.action(SetTrue)
|
||||
.long("disable-keepalive")
|
||||
.help("Disable keepalive"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("disable_post")
|
||||
.short('P')
|
||||
.action(SetTrue)
|
||||
.long("disable-post")
|
||||
.help("Disable POST queries"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("allow_odoh_post")
|
||||
.short('O')
|
||||
.action(SetTrue)
|
||||
.long("allow-odoh-post")
|
||||
.help("Allow POST queries over ODoH even if they have been disabed for DoH"),
|
||||
);
|
||||
|
||||
#[cfg(feature = "tls")]
|
||||
let options = options
|
||||
.arg(
|
||||
Arg::new("tls_cert_path")
|
||||
.short('i')
|
||||
.long("tls-cert-path")
|
||||
.num_args(1)
|
||||
.help(
|
||||
"Path to the PEM/PKCS#8-encoded certificates (only required for built-in TLS)",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("tls_cert_key_path")
|
||||
.short('I')
|
||||
.long("tls-cert-key-path")
|
||||
.num_args(1)
|
||||
.help("Path to the PEM-encoded secret keys (only required for built-in TLS)"),
|
||||
);
|
||||
|
||||
let matches = options.get_matches();
|
||||
globals.listen_address = matches
|
||||
.get_one::<String>("listen_address")
|
||||
.unwrap()
|
||||
.parse()
|
||||
.unwrap();
|
||||
globals.server_address = matches
|
||||
.get_one::<String>("server_address")
|
||||
.unwrap()
|
||||
.to_socket_addrs()
|
||||
.unwrap()
|
||||
.next()
|
||||
.unwrap();
|
||||
globals.local_bind_address = match matches.get_one::<String>("local_bind_address") {
|
||||
Some(address) => address.parse().unwrap(),
|
||||
None => match globals.server_address {
|
||||
SocketAddr::V4(_) => SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0)),
|
||||
SocketAddr::V6(s) => SocketAddr::V6(SocketAddrV6::new(
|
||||
Ipv6Addr::UNSPECIFIED,
|
||||
0,
|
||||
s.flowinfo(),
|
||||
s.scope_id(),
|
||||
)),
|
||||
},
|
||||
};
|
||||
globals.path = matches.get_one::<String>("path").unwrap().to_string();
|
||||
if !globals.path.starts_with('/') {
|
||||
globals.path = format!("/{}", globals.path);
|
||||
}
|
||||
globals.max_clients = matches
|
||||
.get_one::<String>("max_clients")
|
||||
.unwrap()
|
||||
.parse()
|
||||
.unwrap();
|
||||
globals.timeout = Duration::from_secs(
|
||||
matches
|
||||
.get_one::<String>("timeout")
|
||||
.unwrap()
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
globals.max_concurrent_streams = matches
|
||||
.get_one::<String>("max_concurrent")
|
||||
.unwrap()
|
||||
.parse()
|
||||
.unwrap();
|
||||
globals.min_ttl = matches
|
||||
.get_one::<String>("min_ttl")
|
||||
.unwrap()
|
||||
.parse()
|
||||
.unwrap();
|
||||
globals.max_ttl = matches
|
||||
.get_one::<String>("max_ttl")
|
||||
.unwrap()
|
||||
.parse()
|
||||
.unwrap();
|
||||
globals.err_ttl = matches
|
||||
.get_one::<String>("err_ttl")
|
||||
.unwrap()
|
||||
.parse()
|
||||
.unwrap();
|
||||
globals.keepalive = !matches.get_flag("disable_keepalive");
|
||||
globals.disable_post = matches.get_flag("disable_post");
|
||||
globals.allow_odoh_post = matches.get_flag("allow_odoh_post");
|
||||
|
||||
#[cfg(feature = "tls")]
|
||||
{
|
||||
globals.tls_cert_path = matches
|
||||
.get_one::<String>("tls_cert_path")
|
||||
.map(PathBuf::from);
|
||||
globals.tls_cert_key_path = matches
|
||||
.get_one::<String>("tls_cert_key_path")
|
||||
.map(PathBuf::from)
|
||||
.or_else(|| globals.tls_cert_path.clone());
|
||||
}
|
||||
|
||||
match matches.get_one::<String>("hostname") {
|
||||
Some(hostname) => {
|
||||
let mut builder =
|
||||
dnsstamps::DoHBuilder::new(hostname.to_string(), globals.path.to_string());
|
||||
if let Some(public_address) = matches.get_one::<String>("public_address") {
|
||||
builder = builder.with_address(public_address.to_string());
|
||||
}
|
||||
if let Some(public_port) = matches.get_one::<String>("public_port") {
|
||||
let public_port = public_port.parse().expect("Invalid public port");
|
||||
builder = builder.with_port(public_port);
|
||||
}
|
||||
println!(
|
||||
"Test DNS stamp to reach [{}] over DoH: [{}]\n",
|
||||
hostname,
|
||||
builder.serialize().unwrap()
|
||||
);
|
||||
|
||||
let mut builder =
|
||||
dnsstamps::ODoHTargetBuilder::new(hostname.to_string(), globals.path.to_string());
|
||||
if let Some(public_port) = matches.get_one::<String>("public_port") {
|
||||
let public_port = public_port.parse().expect("Invalid public port");
|
||||
builder = builder.with_port(public_port);
|
||||
}
|
||||
println!(
|
||||
"Test DNS stamp to reach [{}] over Oblivious DoH: [{}]\n",
|
||||
hostname,
|
||||
builder.serialize().unwrap()
|
||||
);
|
||||
|
||||
println!("Check out https://dnscrypt.info/stamps/ to compute the actual stamps.\n")
|
||||
}
|
||||
_ => {
|
||||
println!(
|
||||
"Please provide a fully qualified hostname (-H <hostname> command-line option) to get \
|
||||
test DNS stamps for your server.\n"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
10
src/constants.rs
Normal file
10
src/constants.rs
Normal file
|
@ -0,0 +1,10 @@
|
|||
pub const LISTEN_ADDRESS: &str = "127.0.0.1:3000";
|
||||
pub const MAX_CLIENTS: usize = 512;
|
||||
pub const MAX_CONCURRENT_STREAMS: u32 = 16;
|
||||
pub const PATH: &str = "/dns-query";
|
||||
pub const ODOH_CONFIGS_PATH: &str = "/.well-known/odohconfigs";
|
||||
pub const SERVER_ADDRESS: &str = "9.9.9.9:53";
|
||||
pub const TIMEOUT_SEC: u64 = 10;
|
||||
pub const MAX_TTL: u32 = 86400 * 7;
|
||||
pub const MIN_TTL: u32 = 10;
|
||||
pub const ERR_TTL: u32 = 2;
|
127
src/dns.rs
127
src/dns.rs
|
@ -1,127 +0,0 @@
|
|||
const DNS_CLASS_IN: u16 = 1;
|
||||
const DNS_HEADER_SIZE: usize = 12;
|
||||
const DNS_MAX_HOSTNAME_LEN: usize = 256;
|
||||
const DNS_MAX_PACKET_SIZE: usize = 65_535;
|
||||
const DNS_OFFSET_QUESTION: usize = DNS_HEADER_SIZE;
|
||||
const DNS_TYPE_OPT: u16 = 41;
|
||||
|
||||
#[inline]
|
||||
fn qdcount(packet: &[u8]) -> u16 {
|
||||
(u16::from(packet[4]) << 8) | u16::from(packet[5])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn ancount(packet: &[u8]) -> u16 {
|
||||
(u16::from(packet[6]) << 8) | u16::from(packet[7])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn nscount(packet: &[u8]) -> u16 {
|
||||
(u16::from(packet[8]) << 8) | u16::from(packet[9])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn arcount(packet: &[u8]) -> u16 {
|
||||
(u16::from(packet[10]) << 8) | u16::from(packet[11])
|
||||
}
|
||||
|
||||
fn skip_name(packet: &[u8], offset: usize) -> Result<(usize, u16), &'static str> {
|
||||
let packet_len = packet.len();
|
||||
if offset >= packet_len - 1 {
|
||||
return Err("Short packet");
|
||||
}
|
||||
let mut name_len: usize = 0;
|
||||
let mut offset = offset;
|
||||
let mut labels_count = 0u16;
|
||||
loop {
|
||||
let label_len = match packet[offset] {
|
||||
len if len & 0xc0 == 0xc0 => {
|
||||
if 2 > packet_len - offset {
|
||||
return Err("Incomplete offset");
|
||||
}
|
||||
offset += 2;
|
||||
break;
|
||||
}
|
||||
len if len > 0x3f => return Err("Label too long"),
|
||||
len => len,
|
||||
} as usize;
|
||||
if label_len >= packet_len - offset - 1 {
|
||||
return Err("Malformed packet with an out-of-bounds name");
|
||||
}
|
||||
name_len += label_len + 1;
|
||||
if name_len > DNS_MAX_HOSTNAME_LEN {
|
||||
return Err("Name too long");
|
||||
}
|
||||
offset += label_len + 1;
|
||||
if label_len == 0 {
|
||||
break;
|
||||
}
|
||||
labels_count += 1;
|
||||
}
|
||||
Ok((offset, labels_count))
|
||||
}
|
||||
|
||||
pub fn min_ttl(
|
||||
packet: &[u8],
|
||||
min_ttl: u32,
|
||||
max_ttl: u32,
|
||||
failure_ttl: u32,
|
||||
) -> Result<u32, &'static str> {
|
||||
if qdcount(packet) != 1 {
|
||||
return Err("Unsupported number of questions");
|
||||
}
|
||||
let packet_len = packet.len();
|
||||
if packet_len <= DNS_OFFSET_QUESTION {
|
||||
return Err("Short packet");
|
||||
}
|
||||
if packet_len >= DNS_MAX_PACKET_SIZE {
|
||||
return Err("Large packet");
|
||||
}
|
||||
let mut offset = match skip_name(packet, DNS_OFFSET_QUESTION) {
|
||||
Ok(offset) => offset.0,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
assert!(offset > DNS_OFFSET_QUESTION);
|
||||
if 4 > packet_len - offset {
|
||||
return Err("Short packet");
|
||||
}
|
||||
offset += 4;
|
||||
let ancount = ancount(packet);
|
||||
let nscount = nscount(packet);
|
||||
let arcount = arcount(packet);
|
||||
let rrcount = ancount + nscount + arcount;
|
||||
let mut found_min_ttl = if rrcount > 0 { max_ttl } else { failure_ttl };
|
||||
for _ in 0..rrcount {
|
||||
offset = match skip_name(packet, offset) {
|
||||
Ok(offset) => offset.0,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
if 10 > packet_len - offset {
|
||||
return Err("Short packet");
|
||||
}
|
||||
let qtype = u16::from(packet[offset]) << 8 | u16::from(packet[offset + 1]);
|
||||
let qclass = u16::from(packet[offset + 2]) << 8 | u16::from(packet[offset + 3]);
|
||||
let ttl = u32::from(packet[offset + 4]) << 24
|
||||
| u32::from(packet[offset + 5]) << 16
|
||||
| u32::from(packet[offset + 6]) << 8
|
||||
| u32::from(packet[offset + 7]);
|
||||
let rdlen = (u16::from(packet[offset + 8]) << 8 | u16::from(packet[offset + 9])) as usize;
|
||||
offset += 10;
|
||||
if !(qtype == DNS_TYPE_OPT && qclass == DNS_CLASS_IN) {
|
||||
if ttl < found_min_ttl {
|
||||
found_min_ttl = ttl;
|
||||
}
|
||||
}
|
||||
if rdlen > packet_len - offset {
|
||||
return Err("Record length would exceed packet length");
|
||||
}
|
||||
offset += rdlen;
|
||||
}
|
||||
if found_min_ttl < min_ttl {
|
||||
found_min_ttl = min_ttl;
|
||||
}
|
||||
if offset != packet_len {
|
||||
return Err("Garbage after packet");
|
||||
}
|
||||
Ok(found_min_ttl)
|
||||
}
|
49
src/libdoh/Cargo.toml
Normal file
49
src/libdoh/Cargo.toml
Normal file
|
@ -0,0 +1,49 @@
|
|||
[package]
|
||||
name = "libdoh"
|
||||
version = "0.9.11"
|
||||
authors = ["Frank Denis <github@pureftpd.org>"]
|
||||
description = "DoH and Oblivious DoH library for the rust-doh app"
|
||||
keywords = ["dns", "https", "doh", "odoh", "proxy"]
|
||||
license = "MIT"
|
||||
homepage = "https://github.com/jedisct1/rust-doh"
|
||||
repository = "https://github.com/jedisct1/rust-doh"
|
||||
categories = ["asynchronous", "network-programming", "command-line-utilities"]
|
||||
edition = "2018"
|
||||
|
||||
[features]
|
||||
default = ["tls"]
|
||||
tls = ["tokio-rustls"]
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.97"
|
||||
arc-swap = "1.7.1"
|
||||
base64 = "0.22.1"
|
||||
byteorder = "1.5.0"
|
||||
bytes = "1.10.1"
|
||||
futures = "0.3.31"
|
||||
hyper = { version = "^0.14.32", default-features = false, features = [
|
||||
"server",
|
||||
"http1",
|
||||
"http2",
|
||||
"stream",
|
||||
"runtime",
|
||||
] }
|
||||
odoh-rs = "1.0.3"
|
||||
rand = "^0.8.5"
|
||||
tokio = { version = "1.44.1", features = [
|
||||
"net",
|
||||
"rt-multi-thread",
|
||||
"time",
|
||||
"sync",
|
||||
] }
|
||||
tokio-rustls = { version = "^0.24.1", features = [
|
||||
"early-data",
|
||||
], optional = true }
|
||||
rustls-pemfile = "^1.0.4"
|
||||
|
||||
[profile.release]
|
||||
codegen-units = 1
|
||||
incremental = false
|
||||
lto = "fat"
|
||||
opt-level = 3
|
||||
panic = "abort"
|
21
src/libdoh/LICENSE
Normal file
21
src/libdoh/LICENSE
Normal file
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2018-2025 Frank Denis
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
9
src/libdoh/src/constants.rs
Normal file
9
src/libdoh/src/constants.rs
Normal file
|
@ -0,0 +1,9 @@
|
|||
pub const DNS_QUERY_PARAM: &str = "dns";
|
||||
pub const MAX_DNS_QUESTION_LEN: usize = 512;
|
||||
pub const MAX_DNS_RESPONSE_LEN: usize = 4096;
|
||||
pub const MIN_DNS_PACKET_LEN: usize = 17;
|
||||
pub const STALE_IF_ERROR_SECS: u32 = 86400;
|
||||
pub const STALE_WHILE_REVALIDATE_SECS: u32 = 60;
|
||||
pub const CERTS_WATCH_DELAY_SECS: u32 = 10;
|
||||
pub const ODOH_KEY_ROTATION_SECS: u32 = 86400;
|
||||
pub const UDP_TCP_RATIO: usize = 8;
|
291
src/libdoh/src/dns.rs
Normal file
291
src/libdoh/src/dns.rs
Normal file
|
@ -0,0 +1,291 @@
|
|||
use anyhow::{ensure, Error};
|
||||
use byteorder::{BigEndian, ByteOrder};
|
||||
|
||||
const DNS_HEADER_SIZE: usize = 12;
|
||||
pub const DNS_OFFSET_FLAGS: usize = 2;
|
||||
const DNS_MAX_HOSTNAME_SIZE: usize = 256;
|
||||
const DNS_MAX_PACKET_SIZE: usize = 4096;
|
||||
const DNS_OFFSET_QUESTION: usize = DNS_HEADER_SIZE;
|
||||
|
||||
const DNS_FLAGS_TC: u16 = 1u16 << 9;
|
||||
|
||||
const DNS_TYPE_OPT: u16 = 41;
|
||||
|
||||
const DNS_PTYPE_PADDING: u16 = 12;
|
||||
|
||||
const DNS_RCODE_SERVFAIL: u8 = 2;
|
||||
const DNS_RCODE_REFUSED: u8 = 5;
|
||||
|
||||
#[inline]
|
||||
pub fn rcode(packet: &[u8]) -> u8 {
|
||||
packet[3] & 0x0f
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn qdcount(packet: &[u8]) -> u16 {
|
||||
BigEndian::read_u16(&packet[4..])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn ancount(packet: &[u8]) -> u16 {
|
||||
BigEndian::read_u16(&packet[6..])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn arcount(packet: &[u8]) -> u16 {
|
||||
BigEndian::read_u16(&packet[10..])
|
||||
}
|
||||
|
||||
fn arcount_inc(packet: &mut [u8]) -> Result<(), Error> {
|
||||
let mut arcount = arcount(packet);
|
||||
ensure!(arcount < 0xffff, "Too many additional records");
|
||||
arcount += 1;
|
||||
BigEndian::write_u16(&mut packet[10..], arcount);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn nscount(packet: &[u8]) -> u16 {
|
||||
BigEndian::read_u16(&packet[8..])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_recoverable_error(packet: &[u8]) -> bool {
|
||||
let rcode = rcode(packet);
|
||||
rcode == DNS_RCODE_SERVFAIL || rcode == DNS_RCODE_REFUSED
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_truncated(packet: &[u8]) -> bool {
|
||||
BigEndian::read_u16(&packet[DNS_OFFSET_FLAGS..]) & DNS_FLAGS_TC == DNS_FLAGS_TC
|
||||
}
|
||||
|
||||
fn skip_name(packet: &[u8], offset: usize) -> Result<usize, Error> {
|
||||
let packet_len = packet.len();
|
||||
ensure!(offset < packet_len - 1, "Short packet");
|
||||
let mut qname_len: usize = 0;
|
||||
let mut offset = offset;
|
||||
loop {
|
||||
let label_len = match packet[offset] as usize {
|
||||
label_len if label_len & 0xc0 == 0xc0 => {
|
||||
ensure!(packet_len - offset >= 2, "Incomplete offset");
|
||||
offset += 2;
|
||||
break;
|
||||
}
|
||||
label_len => label_len,
|
||||
} as usize;
|
||||
ensure!(label_len < 0x40, "Long label");
|
||||
ensure!(
|
||||
packet_len - offset - 1 > label_len,
|
||||
"Malformed packet with an out-of-bounds name"
|
||||
);
|
||||
qname_len += label_len + 1;
|
||||
ensure!(qname_len <= DNS_MAX_HOSTNAME_SIZE, "Name too long");
|
||||
offset += label_len + 1;
|
||||
if label_len == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(offset)
|
||||
}
|
||||
|
||||
fn traverse_rrs<F: FnMut(usize) -> Result<(), Error>>(
|
||||
packet: &[u8],
|
||||
mut offset: usize,
|
||||
rrcount: usize,
|
||||
mut cb: F,
|
||||
) -> Result<usize, Error> {
|
||||
let packet_len = packet.len();
|
||||
for _ in 0..rrcount {
|
||||
offset = skip_name(packet, offset)?;
|
||||
ensure!(packet_len - offset >= 10, "Short packet");
|
||||
cb(offset)?;
|
||||
let rdlen = BigEndian::read_u16(&packet[offset + 8..]) as usize;
|
||||
offset += 10;
|
||||
ensure!(
|
||||
packet_len - offset >= rdlen,
|
||||
"Record length would exceed packet length"
|
||||
);
|
||||
offset += rdlen;
|
||||
}
|
||||
Ok(offset)
|
||||
}
|
||||
|
||||
fn traverse_rrs_mut<F: FnMut(&mut [u8], usize) -> Result<(), Error>>(
|
||||
packet: &mut [u8],
|
||||
mut offset: usize,
|
||||
rrcount: usize,
|
||||
mut cb: F,
|
||||
) -> Result<usize, Error> {
|
||||
let packet_len = packet.len();
|
||||
for _ in 0..rrcount {
|
||||
offset = skip_name(packet, offset)?;
|
||||
ensure!(packet_len - offset >= 10, "Short packet");
|
||||
cb(packet, offset)?;
|
||||
let rdlen = BigEndian::read_u16(&packet[offset + 8..]) as usize;
|
||||
offset += 10;
|
||||
ensure!(
|
||||
packet_len - offset >= rdlen,
|
||||
"Record length would exceed packet length"
|
||||
);
|
||||
offset += rdlen;
|
||||
}
|
||||
Ok(offset)
|
||||
}
|
||||
|
||||
pub fn min_ttl(packet: &[u8], min_ttl: u32, max_ttl: u32, failure_ttl: u32) -> Result<u32, Error> {
|
||||
let packet_len = packet.len();
|
||||
ensure!(packet_len > DNS_OFFSET_QUESTION, "Short packet");
|
||||
ensure!(packet_len <= DNS_MAX_PACKET_SIZE, "Large packet");
|
||||
ensure!(qdcount(packet) == 1, "No question");
|
||||
let mut offset = skip_name(packet, DNS_OFFSET_QUESTION)?;
|
||||
assert!(offset > DNS_OFFSET_QUESTION);
|
||||
ensure!(packet_len - offset > 4, "Short packet");
|
||||
offset += 4;
|
||||
let (ancount, nscount, arcount) = (ancount(packet), nscount(packet), arcount(packet));
|
||||
let rrcount = ancount as usize + nscount as usize + arcount as usize;
|
||||
let mut found_min_ttl = if rrcount > 0 { max_ttl } else { failure_ttl };
|
||||
|
||||
offset = traverse_rrs(packet, offset, rrcount, |offset| {
|
||||
let qtype = BigEndian::read_u16(&packet[offset..]);
|
||||
let ttl = BigEndian::read_u32(&packet[offset + 4..]);
|
||||
if qtype != DNS_TYPE_OPT && ttl < found_min_ttl {
|
||||
found_min_ttl = ttl;
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
if found_min_ttl < min_ttl {
|
||||
found_min_ttl = min_ttl;
|
||||
}
|
||||
ensure!(packet_len == offset, "Garbage after packet");
|
||||
Ok(found_min_ttl)
|
||||
}
|
||||
|
||||
fn add_edns_section(packet: &mut Vec<u8>, max_payload_size: u16) -> Result<(), Error> {
|
||||
let opt_rr: [u8; 11] = [
|
||||
0,
|
||||
(DNS_TYPE_OPT >> 8) as u8,
|
||||
DNS_TYPE_OPT as u8,
|
||||
(max_payload_size >> 8) as u8,
|
||||
max_payload_size as u8,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
];
|
||||
ensure!(
|
||||
DNS_MAX_PACKET_SIZE - packet.len() >= opt_rr.len(),
|
||||
"Packet would be too large to add a new record"
|
||||
);
|
||||
arcount_inc(packet)?;
|
||||
packet.extend(opt_rr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_edns_max_payload_size(packet: &mut Vec<u8>, max_payload_size: u16) -> Result<(), Error> {
|
||||
let packet_len = packet.len();
|
||||
ensure!(packet_len > DNS_OFFSET_QUESTION, "Short packet");
|
||||
ensure!(packet_len <= DNS_MAX_PACKET_SIZE, "Large packet");
|
||||
ensure!(qdcount(packet) == 1, "No question");
|
||||
let mut offset = skip_name(packet, DNS_OFFSET_QUESTION)?;
|
||||
assert!(offset > DNS_OFFSET_QUESTION);
|
||||
ensure!(packet_len - offset >= 4, "Short packet");
|
||||
offset += 4;
|
||||
let (ancount, nscount, arcount) = (ancount(packet), nscount(packet), arcount(packet));
|
||||
offset = traverse_rrs(
|
||||
packet,
|
||||
offset,
|
||||
ancount as usize + nscount as usize,
|
||||
|_offset| Ok(()),
|
||||
)?;
|
||||
let mut edns_payload_set = false;
|
||||
traverse_rrs_mut(packet, offset, arcount as _, |packet, offset| {
|
||||
let qtype = BigEndian::read_u16(&packet[offset..]);
|
||||
if qtype == DNS_TYPE_OPT {
|
||||
ensure!(!edns_payload_set, "Duplicate OPT RR found");
|
||||
BigEndian::write_u16(&mut packet[offset + 2..], max_payload_size);
|
||||
edns_payload_set = true;
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
if edns_payload_set {
|
||||
return Ok(());
|
||||
}
|
||||
add_edns_section(packet, max_payload_size)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn padded_len(unpadded_len: usize) -> usize {
|
||||
const BOUNDARIES: [usize; 16] = [
|
||||
64, 128, 192, 256, 320, 384, 512, 704, 768, 896, 960, 1024, 1088, 1152, 2688, 4080,
|
||||
];
|
||||
BOUNDARIES
|
||||
.iter()
|
||||
.find(|&&boundary| boundary >= unpadded_len)
|
||||
.copied()
|
||||
.unwrap_or(DNS_MAX_PACKET_SIZE)
|
||||
}
|
||||
|
||||
pub fn add_edns_padding(packet: &mut Vec<u8>) -> Result<(), Error> {
|
||||
let mut packet_len = packet.len();
|
||||
ensure!(packet_len > DNS_OFFSET_QUESTION, "Short packet");
|
||||
ensure!(packet_len <= DNS_MAX_PACKET_SIZE, "Large packet");
|
||||
ensure!(qdcount(packet) == 1, "No question");
|
||||
let mut offset = skip_name(packet, DNS_OFFSET_QUESTION)?;
|
||||
assert!(offset > DNS_OFFSET_QUESTION);
|
||||
ensure!(packet_len - offset >= 4, "Short packet");
|
||||
offset += 4;
|
||||
let (ancount, nscount, arcount) = (ancount(packet), nscount(packet), arcount(packet));
|
||||
offset = traverse_rrs(
|
||||
packet,
|
||||
offset,
|
||||
ancount as usize + nscount as usize,
|
||||
|_offset| Ok(()),
|
||||
)?;
|
||||
let mut edns_offset = None;
|
||||
traverse_rrs_mut(packet, offset, arcount as _, |packet, offset| {
|
||||
let qtype = BigEndian::read_u16(&packet[offset..]);
|
||||
if qtype == DNS_TYPE_OPT {
|
||||
ensure!(edns_offset.is_none(), "Duplicate OPT RR found");
|
||||
edns_offset = Some(offset)
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
let edns_offset = match edns_offset {
|
||||
Some(edns_offset) => edns_offset,
|
||||
None => {
|
||||
let edns_offset = packet.len() + 1;
|
||||
add_edns_section(packet, DNS_MAX_PACKET_SIZE as _)?;
|
||||
packet_len = packet.len();
|
||||
edns_offset
|
||||
}
|
||||
};
|
||||
let padding_len = padded_len(packet_len) - packet_len;
|
||||
let mut edns_padding_prr = vec![b'X'; 4 + padding_len];
|
||||
BigEndian::write_u16(&mut edns_padding_prr[0..], DNS_PTYPE_PADDING);
|
||||
BigEndian::write_u16(&mut edns_padding_prr[2..], padding_len as u16);
|
||||
let edns_padding_prr_len = edns_padding_prr.len();
|
||||
let edns_rdlen_offset: usize = edns_offset + 8;
|
||||
ensure!(packet_len - edns_rdlen_offset >= 2, "Short packet");
|
||||
let edns_rdlen = BigEndian::read_u16(&packet[edns_rdlen_offset..]);
|
||||
ensure!(
|
||||
edns_offset + edns_rdlen as usize <= packet_len,
|
||||
"Out of range EDNS size"
|
||||
);
|
||||
ensure!(
|
||||
0xffff - edns_rdlen as usize >= edns_padding_prr_len,
|
||||
"EDNS section too large for padding"
|
||||
);
|
||||
ensure!(
|
||||
DNS_MAX_PACKET_SIZE - packet_len >= edns_padding_prr_len,
|
||||
"Large packet"
|
||||
);
|
||||
BigEndian::write_u16(
|
||||
&mut packet[edns_rdlen_offset..],
|
||||
edns_rdlen + edns_padding_prr_len as u16,
|
||||
);
|
||||
packet.extend(&edns_padding_prr);
|
||||
Ok(())
|
||||
}
|
53
src/libdoh/src/errors.rs
Normal file
53
src/libdoh/src/errors.rs
Normal file
|
@ -0,0 +1,53 @@
|
|||
use std::io;
|
||||
|
||||
use hyper::StatusCode;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum DoHError {
|
||||
Incomplete,
|
||||
InvalidData,
|
||||
TooLarge,
|
||||
UpstreamIssue,
|
||||
UpstreamTimeout,
|
||||
StaleKey,
|
||||
Hyper(hyper::Error),
|
||||
Io(io::Error),
|
||||
ODoHConfigError(anyhow::Error),
|
||||
TooManyTcpSessions,
|
||||
}
|
||||
|
||||
impl std::error::Error for DoHError {}
|
||||
|
||||
impl std::fmt::Display for DoHError {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
|
||||
match self {
|
||||
DoHError::Incomplete => write!(fmt, "Incomplete"),
|
||||
DoHError::InvalidData => write!(fmt, "Invalid data"),
|
||||
DoHError::TooLarge => write!(fmt, "Too large"),
|
||||
DoHError::UpstreamIssue => write!(fmt, "Upstream error"),
|
||||
DoHError::UpstreamTimeout => write!(fmt, "Upstream timeout"),
|
||||
DoHError::StaleKey => write!(fmt, "Stale key material"),
|
||||
DoHError::Hyper(e) => write!(fmt, "HTTP error: {e}"),
|
||||
DoHError::Io(e) => write!(fmt, "IO error: {e}"),
|
||||
DoHError::ODoHConfigError(e) => write!(fmt, "ODoH config error: {e}"),
|
||||
DoHError::TooManyTcpSessions => write!(fmt, "Too many TCP sessions"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DoHError> for StatusCode {
|
||||
fn from(e: DoHError) -> StatusCode {
|
||||
match e {
|
||||
DoHError::Incomplete => StatusCode::UNPROCESSABLE_ENTITY,
|
||||
DoHError::InvalidData => StatusCode::BAD_REQUEST,
|
||||
DoHError::TooLarge => StatusCode::PAYLOAD_TOO_LARGE,
|
||||
DoHError::UpstreamIssue => StatusCode::BAD_GATEWAY,
|
||||
DoHError::UpstreamTimeout => StatusCode::BAD_GATEWAY,
|
||||
DoHError::StaleKey => StatusCode::UNAUTHORIZED,
|
||||
DoHError::Hyper(_) => StatusCode::SERVICE_UNAVAILABLE,
|
||||
DoHError::Io(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
DoHError::ODoHConfigError(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
DoHError::TooManyTcpSessions => StatusCode::SERVICE_UNAVAILABLE,
|
||||
}
|
||||
}
|
||||
}
|
64
src/libdoh/src/globals.rs
Normal file
64
src/libdoh/src/globals.rs
Normal file
|
@ -0,0 +1,64 @@
|
|||
use std::net::SocketAddr;
|
||||
#[cfg(feature = "tls")]
|
||||
use std::path::PathBuf;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::runtime;
|
||||
|
||||
use crate::odoh::ODoHRotator;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Globals {
|
||||
#[cfg(feature = "tls")]
|
||||
pub tls_cert_path: Option<PathBuf>,
|
||||
|
||||
#[cfg(feature = "tls")]
|
||||
pub tls_cert_key_path: Option<PathBuf>,
|
||||
|
||||
pub listen_address: SocketAddr,
|
||||
pub local_bind_address: SocketAddr,
|
||||
pub server_address: SocketAddr,
|
||||
pub path: String,
|
||||
pub max_clients: usize,
|
||||
pub timeout: Duration,
|
||||
pub clients_count: ClientsCount,
|
||||
pub max_concurrent_streams: u32,
|
||||
pub min_ttl: u32,
|
||||
pub max_ttl: u32,
|
||||
pub err_ttl: u32,
|
||||
pub keepalive: bool,
|
||||
pub disable_post: bool,
|
||||
pub allow_odoh_post: bool,
|
||||
pub odoh_configs_path: String,
|
||||
pub odoh_rotator: Arc<ODoHRotator>,
|
||||
|
||||
pub runtime_handle: runtime::Handle,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ClientsCount(Arc<AtomicUsize>);
|
||||
|
||||
impl ClientsCount {
|
||||
pub fn current(&self) -> usize {
|
||||
self.0.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn increment(&self) -> usize {
|
||||
self.0.fetch_add(1, Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn decrement(&self) -> usize {
|
||||
let mut count;
|
||||
while {
|
||||
count = self.0.load(Ordering::Relaxed);
|
||||
count > 0
|
||||
&& self
|
||||
.0
|
||||
.compare_exchange(count, count - 1, Ordering::Relaxed, Ordering::Relaxed)
|
||||
!= Ok(count)
|
||||
} {}
|
||||
count
|
||||
}
|
||||
}
|
526
src/libdoh/src/lib.rs
Normal file
526
src/libdoh/src/lib.rs
Normal file
|
@ -0,0 +1,526 @@
|
|||
mod constants;
|
||||
pub mod dns;
|
||||
mod errors;
|
||||
mod globals;
|
||||
pub mod odoh;
|
||||
#[cfg(feature = "tls")]
|
||||
mod tls;
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use base64::engine::Engine;
|
||||
use byteorder::{BigEndian, ByteOrder};
|
||||
use futures::prelude::*;
|
||||
use futures::task::{Context, Poll};
|
||||
use hyper::http;
|
||||
use hyper::server::conn::Http;
|
||||
use hyper::{Body, HeaderMap, Method, Request, Response, StatusCode};
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use tokio::net::{TcpListener, TcpSocket, UdpSocket};
|
||||
use tokio::runtime;
|
||||
|
||||
use crate::constants::*;
|
||||
pub use crate::errors::*;
|
||||
pub use crate::globals::*;
|
||||
|
||||
pub mod reexports {
|
||||
pub use tokio;
|
||||
}
|
||||
|
||||
const BASE64_URL_SAFE_NO_PAD: base64::engine::GeneralPurpose =
|
||||
base64::engine::general_purpose::GeneralPurpose::new(
|
||||
&base64::alphabet::URL_SAFE,
|
||||
base64::engine::general_purpose::GeneralPurposeConfig::new()
|
||||
.with_encode_padding(false)
|
||||
.with_decode_padding_mode(base64::engine::DecodePaddingMode::Indifferent),
|
||||
);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct DnsResponse {
|
||||
packet: Vec<u8>,
|
||||
ttl: u32,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum DoHType {
|
||||
Standard,
|
||||
Oblivious,
|
||||
}
|
||||
|
||||
impl DoHType {
|
||||
fn as_str(&self) -> String {
|
||||
match self {
|
||||
DoHType::Standard => String::from("application/dns-message"),
|
||||
DoHType::Oblivious => String::from("application/oblivious-dns-message"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DoH {
|
||||
pub globals: Arc<Globals>,
|
||||
}
|
||||
|
||||
#[allow(clippy::unnecessary_wraps)]
|
||||
fn http_error(status_code: StatusCode) -> Result<Response<Body>, http::Error> {
|
||||
let response = Response::builder()
|
||||
.status(status_code)
|
||||
.body(Body::empty())
|
||||
.unwrap();
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LocalExecutor {
|
||||
runtime_handle: runtime::Handle,
|
||||
}
|
||||
|
||||
impl LocalExecutor {
|
||||
fn new(runtime_handle: runtime::Handle) -> Self {
|
||||
LocalExecutor { runtime_handle }
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> hyper::rt::Executor<F> for LocalExecutor
|
||||
where
|
||||
F: std::future::Future + Send + 'static,
|
||||
F::Output: Send,
|
||||
{
|
||||
fn execute(&self, fut: F) {
|
||||
self.runtime_handle.spawn(fut);
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
impl hyper::service::Service<http::Request<Body>> for DoH {
|
||||
type Error = http::Error;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
|
||||
type Response = Response<Body>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Request<Body>) -> Self::Future {
|
||||
let globals = &self.globals;
|
||||
let self_inner = self.clone();
|
||||
if req.uri().path() == globals.path {
|
||||
match *req.method() {
|
||||
Method::POST => Box::pin(async move { self_inner.serve_post(req).await }),
|
||||
Method::GET => Box::pin(async move { self_inner.serve_get(req).await }),
|
||||
_ => Box::pin(async { http_error(StatusCode::METHOD_NOT_ALLOWED) }),
|
||||
}
|
||||
} else if req.uri().path() == globals.odoh_configs_path {
|
||||
match *req.method() {
|
||||
Method::GET => Box::pin(async move { self_inner.serve_odoh_configs().await }),
|
||||
_ => Box::pin(async { http_error(StatusCode::METHOD_NOT_ALLOWED) }),
|
||||
}
|
||||
} else {
|
||||
Box::pin(async { http_error(StatusCode::NOT_FOUND) })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DoH {
|
||||
async fn serve_get(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
|
||||
match Self::parse_content_type(&req) {
|
||||
Ok(DoHType::Standard) => self.serve_doh_get(req).await,
|
||||
Ok(DoHType::Oblivious) => self.serve_odoh_get(req).await,
|
||||
Err(response) => Ok(response),
|
||||
}
|
||||
}
|
||||
|
||||
async fn serve_post(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
|
||||
match Self::parse_content_type(&req) {
|
||||
Ok(DoHType::Standard) => self.serve_doh_post(req).await,
|
||||
Ok(DoHType::Oblivious) => self.serve_odoh_post(req).await,
|
||||
Err(response) => Ok(response),
|
||||
}
|
||||
}
|
||||
|
||||
async fn serve_doh_query(&self, query: Vec<u8>) -> Result<Response<Body>, http::Error> {
|
||||
let resp = match self.proxy(query).await {
|
||||
Ok(resp) => {
|
||||
self.build_response(resp.packet, resp.ttl, DoHType::Standard.as_str(), true)
|
||||
}
|
||||
Err(e) => return http_error(StatusCode::from(e)),
|
||||
};
|
||||
match resp {
|
||||
Ok(resp) => Ok(resp),
|
||||
Err(e) => http_error(StatusCode::from(e)),
|
||||
}
|
||||
}
|
||||
|
||||
fn query_from_query_string(&self, req: Request<Body>) -> Option<Vec<u8>> {
|
||||
let http_query = req.uri().query().unwrap_or("");
|
||||
let mut question_str = None;
|
||||
for parts in http_query.split('&') {
|
||||
let mut kv = parts.split('=');
|
||||
if let Some(k) = kv.next() {
|
||||
if k == DNS_QUERY_PARAM {
|
||||
question_str = kv.next();
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(question_str) = question_str {
|
||||
if question_str.len() > MAX_DNS_QUESTION_LEN * 4 / 3 {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
let query = match question_str
|
||||
.and_then(|question_str| BASE64_URL_SAFE_NO_PAD.decode(question_str).ok())
|
||||
{
|
||||
Some(query) => query,
|
||||
_ => return None,
|
||||
};
|
||||
Some(query)
|
||||
}
|
||||
|
||||
async fn serve_doh_get(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
|
||||
let query = match self.query_from_query_string(req) {
|
||||
Some(query) => query,
|
||||
_ => return http_error(StatusCode::BAD_REQUEST),
|
||||
};
|
||||
self.serve_doh_query(query).await
|
||||
}
|
||||
|
||||
async fn serve_doh_post(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
|
||||
if self.globals.disable_post {
|
||||
return http_error(StatusCode::METHOD_NOT_ALLOWED);
|
||||
}
|
||||
let query = match self.read_body(req.into_body()).await {
|
||||
Ok(q) => q,
|
||||
Err(e) => return http_error(StatusCode::from(e)),
|
||||
};
|
||||
self.serve_doh_query(query).await
|
||||
}
|
||||
|
||||
async fn serve_odoh(&self, encrypted_query: Vec<u8>) -> Result<Response<Body>, http::Error> {
|
||||
let odoh_public_key = (*self.globals.odoh_rotator).clone().current_public_key();
|
||||
let (query, context) = match (*odoh_public_key).clone().decrypt_query(encrypted_query) {
|
||||
Ok((q, context)) => (q.to_vec(), context),
|
||||
Err(e) => return http_error(StatusCode::from(e)),
|
||||
};
|
||||
let resp = match self.proxy(query).await {
|
||||
Ok(resp) => resp,
|
||||
Err(e) => return http_error(StatusCode::from(e)),
|
||||
};
|
||||
let encrypted_resp = match context.encrypt_response(resp.packet) {
|
||||
Ok(resp) => self.build_response(resp, 0u32, DoHType::Oblivious.as_str(), false),
|
||||
Err(e) => return http_error(StatusCode::from(e)),
|
||||
};
|
||||
|
||||
match encrypted_resp {
|
||||
Ok(resp) => Ok(resp),
|
||||
Err(e) => http_error(StatusCode::from(e)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn serve_odoh_get(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
|
||||
let encrypted_query = match self.query_from_query_string(req) {
|
||||
Some(encrypted_query) => encrypted_query,
|
||||
_ => return http_error(StatusCode::BAD_REQUEST),
|
||||
};
|
||||
self.serve_odoh(encrypted_query).await
|
||||
}
|
||||
|
||||
async fn serve_odoh_post(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
|
||||
if self.globals.disable_post && !self.globals.allow_odoh_post {
|
||||
return http_error(StatusCode::METHOD_NOT_ALLOWED);
|
||||
}
|
||||
let encrypted_query = match self.read_body(req.into_body()).await {
|
||||
Ok(q) => q,
|
||||
Err(e) => return http_error(StatusCode::from(e)),
|
||||
};
|
||||
self.serve_odoh(encrypted_query).await
|
||||
}
|
||||
|
||||
async fn serve_odoh_configs(&self) -> Result<Response<Body>, http::Error> {
|
||||
let odoh_public_key = (*self.globals.odoh_rotator).clone().current_public_key();
|
||||
let configs = (*odoh_public_key).clone().into_config();
|
||||
match self.build_response(
|
||||
configs,
|
||||
ODOH_KEY_ROTATION_SECS,
|
||||
"application/octet-stream".to_string(),
|
||||
true,
|
||||
) {
|
||||
Ok(resp) => Ok(resp),
|
||||
Err(e) => http_error(StatusCode::from(e)),
|
||||
}
|
||||
}
|
||||
|
||||
fn acceptable_content_type(
|
||||
headers: &HeaderMap,
|
||||
content_types: &[&'static str],
|
||||
) -> Option<&'static str> {
|
||||
let accept = headers.get(hyper::header::ACCEPT);
|
||||
let accept = accept?;
|
||||
for part in accept.to_str().unwrap_or("").split(',').map(|s| s.trim()) {
|
||||
if let Some(found) = part
|
||||
.split(';')
|
||||
.next()
|
||||
.map(|s| s.trim().to_ascii_lowercase())
|
||||
{
|
||||
if let Some(&content_type) = content_types
|
||||
.iter()
|
||||
.find(|&&content_type| content_type == found)
|
||||
{
|
||||
return Some(content_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn parse_content_type(req: &Request<Body>) -> Result<DoHType, Response<Body>> {
|
||||
const CT_DOH: &str = "application/dns-message";
|
||||
const CT_ODOH: &str = "application/oblivious-dns-message";
|
||||
|
||||
let headers = req.headers();
|
||||
let content_type = match headers.get(hyper::header::CONTENT_TYPE) {
|
||||
None => {
|
||||
let acceptable_content_type =
|
||||
Self::acceptable_content_type(headers, &[CT_DOH, CT_ODOH]);
|
||||
match acceptable_content_type {
|
||||
None => {
|
||||
let response = Response::builder()
|
||||
.status(StatusCode::NOT_ACCEPTABLE)
|
||||
.body(Body::empty())
|
||||
.unwrap();
|
||||
return Err(response);
|
||||
}
|
||||
Some(content_type) => content_type,
|
||||
}
|
||||
}
|
||||
Some(content_type) => match content_type.to_str() {
|
||||
Err(_) => {
|
||||
let response = Response::builder()
|
||||
.status(StatusCode::BAD_REQUEST)
|
||||
.body(Body::empty())
|
||||
.unwrap();
|
||||
return Err(response);
|
||||
}
|
||||
Ok(content_type) => content_type,
|
||||
},
|
||||
};
|
||||
|
||||
match content_type.to_ascii_lowercase().as_str() {
|
||||
CT_DOH => Ok(DoHType::Standard),
|
||||
CT_ODOH => Ok(DoHType::Oblivious),
|
||||
_ => {
|
||||
let response = Response::builder()
|
||||
.status(StatusCode::UNSUPPORTED_MEDIA_TYPE)
|
||||
.body(Body::empty())
|
||||
.unwrap();
|
||||
Err(response)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn read_body(&self, mut body: Body) -> Result<Vec<u8>, DoHError> {
|
||||
let mut sum_size = 0;
|
||||
let mut query = vec![];
|
||||
while let Some(chunk) = body.next().await {
|
||||
let chunk = chunk.map_err(|_| DoHError::TooLarge)?;
|
||||
sum_size += chunk.len();
|
||||
if sum_size >= MAX_DNS_QUESTION_LEN {
|
||||
return Err(DoHError::TooLarge);
|
||||
}
|
||||
query.extend(chunk);
|
||||
}
|
||||
Ok(query)
|
||||
}
|
||||
|
||||
async fn proxy(&self, query: Vec<u8>) -> Result<DnsResponse, DoHError> {
|
||||
let proxy_timeout = self.globals.timeout;
|
||||
let timeout_res = tokio::time::timeout(proxy_timeout, self._proxy(query)).await;
|
||||
timeout_res.map_err(|_| DoHError::UpstreamTimeout)?
|
||||
}
|
||||
|
||||
async fn _proxy(&self, mut query: Vec<u8>) -> Result<DnsResponse, DoHError> {
|
||||
if query.len() < MIN_DNS_PACKET_LEN {
|
||||
return Err(DoHError::Incomplete);
|
||||
}
|
||||
let _ = dns::set_edns_max_payload_size(&mut query, MAX_DNS_RESPONSE_LEN as _);
|
||||
let globals = &self.globals;
|
||||
let mut packet = vec![0; MAX_DNS_RESPONSE_LEN];
|
||||
let (min_ttl, max_ttl, err_ttl) = (globals.min_ttl, globals.max_ttl, globals.err_ttl);
|
||||
|
||||
// UDP
|
||||
{
|
||||
let socket = UdpSocket::bind(&globals.local_bind_address)
|
||||
.await
|
||||
.map_err(DoHError::Io)?;
|
||||
let expected_server_address = globals.server_address;
|
||||
socket
|
||||
.send_to(&query, &globals.server_address)
|
||||
.map_err(DoHError::Io)
|
||||
.await?;
|
||||
let (len, response_server_address) =
|
||||
socket.recv_from(&mut packet).map_err(DoHError::Io).await?;
|
||||
if len < MIN_DNS_PACKET_LEN || expected_server_address != response_server_address {
|
||||
return Err(DoHError::UpstreamIssue);
|
||||
}
|
||||
packet.truncate(len);
|
||||
}
|
||||
|
||||
// TCP
|
||||
if dns::is_truncated(&packet) {
|
||||
let clients_count = self.globals.clients_count.current();
|
||||
if self.globals.max_clients >= UDP_TCP_RATIO
|
||||
&& clients_count >= self.globals.max_clients / UDP_TCP_RATIO
|
||||
{
|
||||
return Err(DoHError::TooManyTcpSessions);
|
||||
}
|
||||
let socket = match globals.server_address {
|
||||
SocketAddr::V4(_) => TcpSocket::new_v4(),
|
||||
SocketAddr::V6(_) => TcpSocket::new_v6(),
|
||||
}
|
||||
.map_err(DoHError::Io)?;
|
||||
let mut ext_socket = socket
|
||||
.connect(globals.server_address)
|
||||
.await
|
||||
.map_err(DoHError::Io)?;
|
||||
ext_socket.set_nodelay(true).map_err(DoHError::Io)?;
|
||||
let mut binlen = [0u8, 0];
|
||||
BigEndian::write_u16(&mut binlen, query.len() as u16);
|
||||
ext_socket.write_all(&binlen).await.map_err(DoHError::Io)?;
|
||||
ext_socket.write_all(&query).await.map_err(DoHError::Io)?;
|
||||
ext_socket.flush().await.map_err(DoHError::Io)?;
|
||||
ext_socket
|
||||
.read_exact(&mut binlen)
|
||||
.await
|
||||
.map_err(DoHError::Io)?;
|
||||
let packet_len = BigEndian::read_u16(&binlen) as usize;
|
||||
if !(MIN_DNS_PACKET_LEN..=MAX_DNS_RESPONSE_LEN).contains(&packet_len) {
|
||||
return Err(DoHError::UpstreamIssue);
|
||||
}
|
||||
packet = vec![0u8; packet_len];
|
||||
ext_socket
|
||||
.read_exact(&mut packet)
|
||||
.await
|
||||
.map_err(DoHError::Io)?;
|
||||
}
|
||||
|
||||
let ttl = if dns::is_recoverable_error(&packet) {
|
||||
err_ttl
|
||||
} else {
|
||||
match dns::min_ttl(&packet, min_ttl, max_ttl, err_ttl) {
|
||||
Err(_) => return Err(DoHError::UpstreamIssue),
|
||||
Ok(ttl) => ttl,
|
||||
}
|
||||
};
|
||||
dns::add_edns_padding(&mut packet)
|
||||
.map_err(|_| DoHError::TooLarge)
|
||||
.ok();
|
||||
Ok(DnsResponse { packet, ttl })
|
||||
}
|
||||
|
||||
fn build_response(
|
||||
&self,
|
||||
packet: Vec<u8>,
|
||||
ttl: u32,
|
||||
content_type: String,
|
||||
cors: bool,
|
||||
) -> Result<Response<Body>, DoHError> {
|
||||
let packet_len = packet.len();
|
||||
let mut response_builder = Response::builder()
|
||||
.header(hyper::header::CONTENT_LENGTH, packet_len)
|
||||
.header(hyper::header::CONTENT_TYPE, content_type.as_str())
|
||||
.header(
|
||||
hyper::header::CACHE_CONTROL,
|
||||
format!(
|
||||
"max-age={ttl}, stale-if-error={STALE_IF_ERROR_SECS}, \
|
||||
stale-while-revalidate={STALE_WHILE_REVALIDATE_SECS}"
|
||||
)
|
||||
.as_str(),
|
||||
);
|
||||
if cors {
|
||||
response_builder =
|
||||
response_builder.header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*");
|
||||
}
|
||||
let response = response_builder
|
||||
.body(Body::from(packet))
|
||||
.map_err(|_| DoHError::InvalidData)?;
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn client_serve<I>(self, stream: I, server: Http<LocalExecutor>)
|
||||
where
|
||||
I: AsyncRead + AsyncWrite + Send + Unpin + 'static,
|
||||
{
|
||||
let clients_count = self.globals.clients_count.clone();
|
||||
if clients_count.increment() > self.globals.max_clients {
|
||||
clients_count.decrement();
|
||||
return;
|
||||
}
|
||||
self.globals.runtime_handle.clone().spawn(async move {
|
||||
tokio::time::timeout(
|
||||
self.globals.timeout + Duration::from_secs(1),
|
||||
server.serve_connection(stream, self),
|
||||
)
|
||||
.await
|
||||
.ok();
|
||||
clients_count.decrement();
|
||||
});
|
||||
}
|
||||
|
||||
async fn start_without_tls(
|
||||
self,
|
||||
listener: TcpListener,
|
||||
server: Http<LocalExecutor>,
|
||||
) -> Result<(), DoHError> {
|
||||
let listener_service = async {
|
||||
while let Ok((stream, _client_addr)) = listener.accept().await {
|
||||
self.clone().client_serve(stream, server.clone()).await;
|
||||
}
|
||||
Ok(()) as Result<(), DoHError>
|
||||
};
|
||||
listener_service.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn entrypoint(self) -> Result<(), DoHError> {
|
||||
let listen_address = self.globals.listen_address;
|
||||
let listener = TcpListener::bind(&listen_address)
|
||||
.await
|
||||
.map_err(DoHError::Io)?;
|
||||
let path = &self.globals.path;
|
||||
|
||||
let tls_enabled: bool;
|
||||
#[cfg(not(feature = "tls"))]
|
||||
{
|
||||
tls_enabled = false;
|
||||
}
|
||||
#[cfg(feature = "tls")]
|
||||
{
|
||||
tls_enabled =
|
||||
self.globals.tls_cert_path.is_some() && self.globals.tls_cert_key_path.is_some();
|
||||
}
|
||||
if tls_enabled {
|
||||
println!("Listening on https://{listen_address}{path}");
|
||||
} else {
|
||||
println!("Listening on http://{listen_address}{path}");
|
||||
}
|
||||
|
||||
let mut server = Http::new();
|
||||
server.http1_keep_alive(self.globals.keepalive);
|
||||
server.http2_max_concurrent_streams(self.globals.max_concurrent_streams);
|
||||
server.pipeline_flush(true);
|
||||
let executor = LocalExecutor::new(self.globals.runtime_handle.clone());
|
||||
let server = server.with_executor(executor);
|
||||
|
||||
#[cfg(feature = "tls")]
|
||||
{
|
||||
if tls_enabled {
|
||||
self.start_with_tls(listener, server).await?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
self.start_without_tls(listener, server).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
132
src/libdoh/src/odoh.rs
Normal file
132
src/libdoh/src/odoh.rs
Normal file
|
@ -0,0 +1,132 @@
|
|||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use arc_swap::ArcSwap;
|
||||
use odoh_rs::{
|
||||
Deserialize, ObliviousDoHConfig, ObliviousDoHConfigs, ObliviousDoHKeyPair, ObliviousDoHMessage,
|
||||
ObliviousDoHMessagePlaintext, OdohSecret, ResponseNonce, Serialize,
|
||||
};
|
||||
use rand::Rng;
|
||||
use tokio::runtime;
|
||||
|
||||
use crate::constants::ODOH_KEY_ROTATION_SECS;
|
||||
use crate::errors::DoHError;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ODoHPublicKey {
|
||||
key_pair: ObliviousDoHKeyPair,
|
||||
serialized_configs: Vec<u8>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ODoHPublicKey {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("ODoHPublicKey").finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ODoHQueryContext {
|
||||
query: ObliviousDoHMessagePlaintext,
|
||||
server_secret: OdohSecret,
|
||||
}
|
||||
|
||||
impl ODoHPublicKey {
|
||||
pub fn new() -> Result<ODoHPublicKey, DoHError> {
|
||||
let key_pair = ObliviousDoHKeyPair::new(&mut rand::thread_rng());
|
||||
let config = ObliviousDoHConfig::from(key_pair.public().clone());
|
||||
let mut serialized_configs = Vec::new();
|
||||
ObliviousDoHConfigs::from(vec![config])
|
||||
.serialize(&mut serialized_configs)
|
||||
.map_err(|e| DoHError::ODoHConfigError(e.into()))?;
|
||||
Ok(ODoHPublicKey {
|
||||
key_pair,
|
||||
serialized_configs,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn into_config(self) -> Vec<u8> {
|
||||
self.serialized_configs
|
||||
}
|
||||
|
||||
pub fn decrypt_query(
|
||||
self,
|
||||
encrypted_query: Vec<u8>,
|
||||
) -> Result<(Vec<u8>, ODoHQueryContext), DoHError> {
|
||||
let odoh_query = ObliviousDoHMessage::deserialize(&mut bytes::Bytes::from(encrypted_query))
|
||||
.map_err(|_| DoHError::InvalidData)?;
|
||||
match self.key_pair.public().identifier() {
|
||||
Ok(key_id) => {
|
||||
if !key_id.eq(&odoh_query.key_id()) {
|
||||
return Err(DoHError::StaleKey);
|
||||
}
|
||||
}
|
||||
Err(_) => return Err(DoHError::InvalidData),
|
||||
};
|
||||
let (query, server_secret) = match odoh_rs::decrypt_query(&odoh_query, &self.key_pair) {
|
||||
Ok((pq, ss)) => (pq, ss),
|
||||
Err(_) => return Err(DoHError::InvalidData),
|
||||
};
|
||||
let context = ODoHQueryContext {
|
||||
query: query.clone(),
|
||||
server_secret,
|
||||
};
|
||||
Ok((query.into_msg().to_vec(), context))
|
||||
}
|
||||
}
|
||||
|
||||
impl ODoHQueryContext {
|
||||
pub fn encrypt_response(self, response_body: Vec<u8>) -> Result<Vec<u8>, DoHError> {
|
||||
let response_nonce = rand::thread_rng().r#gen::<ResponseNonce>();
|
||||
let response_body_ = ObliviousDoHMessagePlaintext::new(response_body, 0);
|
||||
let encrypted_response = odoh_rs::encrypt_response(
|
||||
&self.query,
|
||||
&response_body_,
|
||||
self.server_secret,
|
||||
response_nonce,
|
||||
)
|
||||
.map_err(|_| DoHError::InvalidData)?;
|
||||
let mut encrypted_response_bytes = Vec::new();
|
||||
encrypted_response
|
||||
.serialize(&mut encrypted_response_bytes)
|
||||
.map_err(|_| DoHError::InvalidData)?;
|
||||
Ok(encrypted_response_bytes)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ODoHRotator {
|
||||
key: Arc<ArcSwap<ODoHPublicKey>>,
|
||||
}
|
||||
|
||||
impl ODoHRotator {
|
||||
pub fn new(runtime_handle: runtime::Handle) -> Result<ODoHRotator, DoHError> {
|
||||
let public_key = match ODoHPublicKey::new() {
|
||||
Ok(key) => Arc::new(ArcSwap::from_pointee(key)),
|
||||
Err(e) => panic!("ODoH key rotation error: {}", e),
|
||||
};
|
||||
|
||||
let current_key = Arc::clone(&public_key);
|
||||
|
||||
runtime_handle.spawn(async move {
|
||||
loop {
|
||||
tokio::time::sleep(Duration::from_secs(ODOH_KEY_ROTATION_SECS.into())).await;
|
||||
match ODoHPublicKey::new() {
|
||||
Ok(key) => {
|
||||
current_key.store(Arc::new(key));
|
||||
}
|
||||
Err(e) => eprintln!("ODoH key rotation error: {e}"),
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
Ok(ODoHRotator {
|
||||
key: Arc::clone(&public_key),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn current_public_key(&self) -> Arc<ODoHPublicKey> {
|
||||
let key = Arc::clone(&self.key);
|
||||
Arc::clone(&key.load())
|
||||
}
|
||||
}
|
165
src/libdoh/src/tls.rs
Normal file
165
src/libdoh/src/tls.rs
Normal file
|
@ -0,0 +1,165 @@
|
|||
use std::fs::File;
|
||||
use std::io::{self, BufReader, Cursor, Read};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use futures::{future::FutureExt, join, select};
|
||||
use hyper::server::conn::Http;
|
||||
use tokio::{
|
||||
net::TcpListener,
|
||||
sync::mpsc::{self, Receiver},
|
||||
};
|
||||
use tokio_rustls::{
|
||||
rustls::{Certificate, PrivateKey, ServerConfig},
|
||||
TlsAcceptor,
|
||||
};
|
||||
|
||||
use crate::constants::CERTS_WATCH_DELAY_SECS;
|
||||
use crate::errors::*;
|
||||
use crate::{DoH, LocalExecutor};
|
||||
|
||||
pub fn create_tls_acceptor<P, P2>(certs_path: P, certs_keys_path: P2) -> io::Result<TlsAcceptor>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
P2: AsRef<Path>,
|
||||
{
|
||||
let certs: Vec<_> = {
|
||||
let certs_path_str = certs_path.as_ref().display().to_string();
|
||||
let mut reader = BufReader::new(File::open(certs_path).map_err(|e| {
|
||||
io::Error::new(
|
||||
e.kind(),
|
||||
format!("Unable to load the certificates [{certs_path_str}]: {e}"),
|
||||
)
|
||||
})?);
|
||||
rustls_pemfile::certs(&mut reader).map_err(|_| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Unable to parse the certificates",
|
||||
)
|
||||
})?
|
||||
}
|
||||
.drain(..)
|
||||
.map(Certificate)
|
||||
.collect();
|
||||
let certs_keys: Vec<_> = {
|
||||
let certs_keys_path_str = certs_keys_path.as_ref().display().to_string();
|
||||
let encoded_keys = {
|
||||
let mut encoded_keys = vec![];
|
||||
File::open(certs_keys_path)
|
||||
.map_err(|e| {
|
||||
io::Error::new(
|
||||
e.kind(),
|
||||
format!("Unable to load the certificate keys [{certs_keys_path_str}]: {e}"),
|
||||
)
|
||||
})?
|
||||
.read_to_end(&mut encoded_keys)?;
|
||||
encoded_keys
|
||||
};
|
||||
let mut reader = Cursor::new(encoded_keys);
|
||||
let pkcs8_keys = rustls_pemfile::pkcs8_private_keys(&mut reader).map_err(|_| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Unable to parse the certificates private keys (PKCS8)",
|
||||
)
|
||||
})?;
|
||||
reader.set_position(0);
|
||||
let mut rsa_keys = rustls_pemfile::rsa_private_keys(&mut reader).map_err(|_| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Unable to parse the certificates private keys (RSA)",
|
||||
)
|
||||
})?;
|
||||
let mut keys = pkcs8_keys;
|
||||
keys.append(&mut rsa_keys);
|
||||
if keys.is_empty() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"No private keys found - Make sure that they are in PKCS#8/PEM format",
|
||||
));
|
||||
}
|
||||
keys.drain(..).map(PrivateKey).collect()
|
||||
};
|
||||
|
||||
let mut server_config = certs_keys
|
||||
.into_iter()
|
||||
.find_map(|certs_key| {
|
||||
let server_config_builder = ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth();
|
||||
match server_config_builder.with_single_cert(certs.clone(), certs_key) {
|
||||
Ok(found_config) => Some(found_config),
|
||||
_ => None,
|
||||
}
|
||||
})
|
||||
.ok_or_else(|| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Unable to find a valid certificate and key",
|
||||
)
|
||||
})?;
|
||||
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
|
||||
Ok(TlsAcceptor::from(Arc::new(server_config)))
|
||||
}
|
||||
|
||||
impl DoH {
|
||||
async fn start_https_service(
|
||||
self,
|
||||
mut tls_acceptor_receiver: Receiver<TlsAcceptor>,
|
||||
listener: TcpListener,
|
||||
server: Http<LocalExecutor>,
|
||||
) -> Result<(), DoHError> {
|
||||
let mut tls_acceptor: Option<TlsAcceptor> = None;
|
||||
let listener_service = async {
|
||||
loop {
|
||||
select! {
|
||||
tcp_cnx = listener.accept().fuse() => {
|
||||
if tls_acceptor.is_none() || tcp_cnx.is_err() {
|
||||
continue;
|
||||
}
|
||||
let (raw_stream, _client_addr) = tcp_cnx.unwrap();
|
||||
if let Ok(stream) = tls_acceptor.as_ref().unwrap().accept(raw_stream).await {
|
||||
self.clone().client_serve(stream, server.clone()).await
|
||||
}
|
||||
}
|
||||
new_tls_acceptor = tls_acceptor_receiver.recv().fuse() => {
|
||||
if new_tls_acceptor.is_none() {
|
||||
break;
|
||||
}
|
||||
tls_acceptor = new_tls_acceptor;
|
||||
}
|
||||
complete => break
|
||||
}
|
||||
}
|
||||
Ok(()) as Result<(), DoHError>
|
||||
};
|
||||
listener_service.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn start_with_tls(
|
||||
self,
|
||||
listener: TcpListener,
|
||||
server: Http<LocalExecutor>,
|
||||
) -> Result<(), DoHError> {
|
||||
let certs_path = self.globals.tls_cert_path.as_ref().unwrap().clone();
|
||||
let certs_keys_path = self.globals.tls_cert_key_path.as_ref().unwrap().clone();
|
||||
let (tls_acceptor_sender, tls_acceptor_receiver) = mpsc::channel(1);
|
||||
let https_service = self.start_https_service(tls_acceptor_receiver, listener, server);
|
||||
let cert_service = async {
|
||||
loop {
|
||||
match create_tls_acceptor(&certs_path, &certs_keys_path) {
|
||||
Ok(tls_acceptor) => {
|
||||
if tls_acceptor_sender.send(tls_acceptor).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(e) => eprintln!("TLS certificates error: {e}"),
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(CERTS_WATCH_DELAY_SECS.into())).await;
|
||||
}
|
||||
Ok::<_, DoHError>(())
|
||||
};
|
||||
join!(https_service, cert_service).0
|
||||
}
|
||||
}
|
314
src/main.rs
314
src/main.rs
|
@ -1,283 +1,63 @@
|
|||
#![cfg_attr(feature = "clippy", feature(plugin))]
|
||||
#![cfg_attr(feature = "clippy", plugin(clippy))]
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
extern crate base64;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate futures;
|
||||
extern crate hyper;
|
||||
extern crate tokio;
|
||||
extern crate tokio_io;
|
||||
extern crate tokio_timer;
|
||||
|
||||
mod dns;
|
||||
mod config;
|
||||
mod constants;
|
||||
mod utils;
|
||||
|
||||
use clap::{App, Arg};
|
||||
use futures::future;
|
||||
use futures::prelude::*;
|
||||
use hyper::header::{CacheControl, CacheDirective, ContentLength, ContentType};
|
||||
use hyper::server::{Http, Request, Response, Service};
|
||||
use hyper::{Body, Method, StatusCode};
|
||||
use std::cell::RefCell;
|
||||
use std::net::SocketAddr;
|
||||
use std::rc::Rc;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::executor::current_thread;
|
||||
use tokio::net::{TcpListener, UdpSocket};
|
||||
use tokio_timer::Timer;
|
||||
|
||||
const DNS_QUERY_PARAM: &str = "dns";
|
||||
const LISTEN_ADDRESS: &str = "127.0.0.1:3000";
|
||||
const LOCAL_BIND_ADDRESS: &str = "0.0.0.0:0";
|
||||
const MAX_CLIENTS: u32 = 512;
|
||||
const MAX_DNS_QUESTION_LEN: usize = 512;
|
||||
const MAX_DNS_RESPONSE_LEN: usize = 4096;
|
||||
const MIN_DNS_PACKET_LEN: usize = 17;
|
||||
const PATH: &str = "/dns-query";
|
||||
const SERVER_ADDRESS: &str = "9.9.9.9:53";
|
||||
const TIMEOUT_SEC: u64 = 10;
|
||||
const MAX_TTL: u32 = 86400 * 7;
|
||||
const MIN_TTL: u32 = 1;
|
||||
const ERR_TTL: u32 = 1;
|
||||
use libdoh::odoh::ODoHRotator;
|
||||
use libdoh::reexports::tokio;
|
||||
use libdoh::*;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct DoH {
|
||||
listen_address: SocketAddr,
|
||||
local_bind_address: SocketAddr,
|
||||
server_address: SocketAddr,
|
||||
path: String,
|
||||
max_clients: u32,
|
||||
timeout: Duration,
|
||||
timers: Timer,
|
||||
clients_count: Rc<RefCell<u32>>,
|
||||
}
|
||||
|
||||
impl Service for DoH {
|
||||
type Request = Request;
|
||||
type Response = Response;
|
||||
type Error = hyper::Error;
|
||||
type Future = Box<Future<Item = Self::Response, Error = Self::Error>>;
|
||||
|
||||
fn call(&self, req: Request) -> Self::Future {
|
||||
{
|
||||
let count = self.clients_count.borrow_mut();
|
||||
if *count > self.max_clients {
|
||||
let mut response = Response::new();
|
||||
response.set_status(StatusCode::TooManyRequests);
|
||||
return Box::new(future::ok(response));
|
||||
}
|
||||
(*count).saturating_add(1);
|
||||
}
|
||||
let clients_count_inner = self.clients_count.clone();
|
||||
let fut = self
|
||||
.handle_client(req)
|
||||
.then(move |fut| {
|
||||
(*clients_count_inner).borrow_mut().saturating_sub(1);
|
||||
fut
|
||||
})
|
||||
.map_err(|err| {
|
||||
eprintln!("server error: {:?}", err);
|
||||
err
|
||||
});
|
||||
let timed = self
|
||||
.timers
|
||||
.timeout(fut.map_err(|_| {}), self.timeout)
|
||||
.map_err(|_| hyper::Error::Timeout);
|
||||
Box::new(timed)
|
||||
}
|
||||
}
|
||||
|
||||
impl DoH {
|
||||
fn handle_client(&self, req: Request) -> Box<Future<Item = Response, Error = hyper::Error>> {
|
||||
let mut response = Response::new();
|
||||
if req.path() != self.path {
|
||||
response.set_status(StatusCode::NotFound);
|
||||
return Box::new(future::ok(response));
|
||||
}
|
||||
match *req.method() {
|
||||
Method::Post => {
|
||||
let fut = self.read_body_and_proxy(req.body());
|
||||
return Box::new(fut.map_err(|_| hyper::Error::Incomplete));
|
||||
}
|
||||
Method::Get => {
|
||||
let query = req.query().unwrap_or("");
|
||||
let mut question_str = None;
|
||||
for parts in query.split('&') {
|
||||
let mut kv = parts.split('=');
|
||||
if let Some(k) = kv.next() {
|
||||
if k == DNS_QUERY_PARAM {
|
||||
question_str = kv.next();
|
||||
}
|
||||
}
|
||||
}
|
||||
let question = match question_str.and_then(|question_str| {
|
||||
base64::decode_config(question_str, base64::URL_SAFE_NO_PAD).ok()
|
||||
}) {
|
||||
Some(question) => question,
|
||||
_ => {
|
||||
response.set_status(StatusCode::BadRequest);
|
||||
return Box::new(future::ok(response));
|
||||
}
|
||||
};
|
||||
let fut = self.proxy(question);
|
||||
return Box::new(fut.map_err(|_| hyper::Error::Incomplete));
|
||||
}
|
||||
_ => {
|
||||
response.set_status(StatusCode::MethodNotAllowed);
|
||||
}
|
||||
};
|
||||
Box::new(future::ok(response))
|
||||
}
|
||||
|
||||
fn proxy(&self, query: Vec<u8>) -> Box<Future<Item = Response, Error = ()>> {
|
||||
let socket = UdpSocket::bind(&self.local_bind_address).unwrap();
|
||||
let expected_server_address = self.server_address;
|
||||
let fut = socket
|
||||
.send_dgram(query, &self.server_address)
|
||||
.map_err(|_| ())
|
||||
.and_then(move |(socket, _)| {
|
||||
let packet = vec![0; MAX_DNS_RESPONSE_LEN];
|
||||
socket.recv_dgram(packet).map_err(|_| {})
|
||||
})
|
||||
.and_then(move |(_socket, mut packet, len, response_server_address)| {
|
||||
if len < MIN_DNS_PACKET_LEN || expected_server_address != response_server_address {
|
||||
return future::err(());
|
||||
}
|
||||
packet.truncate(len);
|
||||
let ttl = match dns::min_ttl(&packet, MIN_TTL, MAX_TTL, ERR_TTL) {
|
||||
Err(_) => return future::err(()),
|
||||
Ok(min_ttl) => min_ttl,
|
||||
};
|
||||
let packet_len = packet.len();
|
||||
let mut response = Response::new();
|
||||
response.set_body(packet);
|
||||
let response = response
|
||||
.with_header(ContentLength(packet_len as u64))
|
||||
.with_header(ContentType(
|
||||
"application/dns-message".parse().unwrap(),
|
||||
))
|
||||
.with_header(CacheControl(vec![CacheDirective::MaxAge(ttl)]));
|
||||
future::ok(response)
|
||||
});
|
||||
Box::new(fut)
|
||||
}
|
||||
|
||||
fn read_body_and_proxy(&self, body: Body) -> Box<Future<Item = Response, Error = ()>> {
|
||||
let mut sum_size = 0;
|
||||
let inner = self.clone();
|
||||
let fut =
|
||||
body.and_then(move |chunk| {
|
||||
sum_size += chunk.len();
|
||||
if sum_size > MAX_DNS_QUESTION_LEN {
|
||||
Err(hyper::error::Error::TooLarge)
|
||||
} else {
|
||||
Ok(chunk)
|
||||
}
|
||||
}).concat2()
|
||||
.map_err(move |_err| ())
|
||||
.map(move |chunk| chunk.to_vec())
|
||||
.and_then(move |query| {
|
||||
if query.len() < MIN_DNS_PACKET_LEN {
|
||||
return Box::new(future::err(())) as Box<Future<Item = _, Error = _>>;
|
||||
}
|
||||
Box::new(inner.proxy(query))
|
||||
});
|
||||
Box::new(fut)
|
||||
}
|
||||
}
|
||||
use crate::config::*;
|
||||
use crate::constants::*;
|
||||
|
||||
fn main() {
|
||||
let mut doh = DoH {
|
||||
let mut runtime_builder = tokio::runtime::Builder::new_multi_thread();
|
||||
runtime_builder.enable_all();
|
||||
runtime_builder.thread_name("doh-proxy");
|
||||
let runtime = runtime_builder.build().unwrap();
|
||||
|
||||
let rotator = match ODoHRotator::new(runtime.handle().clone()) {
|
||||
Ok(r) => r,
|
||||
Err(_) => panic!("Failed to create ODoHRotator"),
|
||||
};
|
||||
|
||||
let mut globals = Globals {
|
||||
#[cfg(feature = "tls")]
|
||||
tls_cert_path: None,
|
||||
#[cfg(feature = "tls")]
|
||||
tls_cert_key_path: None,
|
||||
|
||||
listen_address: LISTEN_ADDRESS.parse().unwrap(),
|
||||
local_bind_address: LOCAL_BIND_ADDRESS.parse().unwrap(),
|
||||
local_bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
|
||||
server_address: SERVER_ADDRESS.parse().unwrap(),
|
||||
path: PATH.to_string(),
|
||||
max_clients: MAX_CLIENTS,
|
||||
timeout: Duration::from_secs(TIMEOUT_SEC),
|
||||
clients_count: Rc::new(RefCell::new(0u32)),
|
||||
timers: tokio_timer::wheel().build(),
|
||||
};
|
||||
parse_opts(&mut doh);
|
||||
let listen_address = doh.listen_address;
|
||||
let listener = TcpListener::bind(&listen_address).unwrap();
|
||||
println!("Listening on http://{}", listen_address);
|
||||
let doh = Rc::new(doh);
|
||||
let server = Http::new()
|
||||
.keep_alive(false)
|
||||
.serve_incoming(listener.incoming(), move || Ok(doh.clone()));
|
||||
let fut = server.for_each(move |client_fut| {
|
||||
current_thread::spawn(client_fut.map(|_| {}).map_err(|_| {}));
|
||||
Ok(())
|
||||
});
|
||||
current_thread::block_on_all(fut).unwrap();
|
||||
}
|
||||
clients_count: Default::default(),
|
||||
max_concurrent_streams: MAX_CONCURRENT_STREAMS,
|
||||
min_ttl: MIN_TTL,
|
||||
max_ttl: MAX_TTL,
|
||||
err_ttl: ERR_TTL,
|
||||
keepalive: true,
|
||||
disable_post: false,
|
||||
allow_odoh_post: false,
|
||||
odoh_configs_path: ODOH_CONFIGS_PATH.to_string(),
|
||||
odoh_rotator: Arc::new(rotator),
|
||||
|
||||
fn parse_opts(doh: &mut DoH) {
|
||||
let max_clients = MAX_CLIENTS.to_string();
|
||||
let timeout_sec = TIMEOUT_SEC.to_string();
|
||||
let matches = App::new("doh-proxy")
|
||||
.about("A DNS-over-HTTP server proxy")
|
||||
.arg(
|
||||
Arg::with_name("listen_address")
|
||||
.short("l")
|
||||
.long("listen-address")
|
||||
.takes_value(true)
|
||||
.default_value(LISTEN_ADDRESS)
|
||||
.help("Address to listen to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("server_address")
|
||||
.short("u")
|
||||
.long("server-address")
|
||||
.takes_value(true)
|
||||
.default_value(SERVER_ADDRESS)
|
||||
.help("Address to connect to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("local_bind_address")
|
||||
.short("b")
|
||||
.long("local-bind-address")
|
||||
.takes_value(true)
|
||||
.default_value(LOCAL_BIND_ADDRESS)
|
||||
.help("Address to connect from"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("path")
|
||||
.short("p")
|
||||
.long("path")
|
||||
.takes_value(true)
|
||||
.default_value(PATH)
|
||||
.help("URI path"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("max_clients")
|
||||
.short("c")
|
||||
.long("max-clients")
|
||||
.takes_value(true)
|
||||
.default_value(&max_clients)
|
||||
.help("Maximum number of simultaneous clients"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("timeout")
|
||||
.short("t")
|
||||
.long("timeout")
|
||||
.takes_value(true)
|
||||
.default_value(&timeout_sec)
|
||||
.help("Timeout, in seconds"),
|
||||
)
|
||||
.get_matches();
|
||||
if let Some(listen_address) = matches.value_of("listen_address") {
|
||||
doh.listen_address = listen_address.parse().unwrap();
|
||||
}
|
||||
if let Some(server_address) = matches.value_of("server_address") {
|
||||
doh.server_address = server_address.parse().unwrap();
|
||||
}
|
||||
if let Some(local_bind_address) = matches.value_of("local_bind_address") {
|
||||
doh.local_bind_address = local_bind_address.parse().unwrap();
|
||||
}
|
||||
if let Some(max_clients) = matches.value_of("max_clients") {
|
||||
doh.max_clients = max_clients.parse().unwrap();
|
||||
}
|
||||
if let Some(timeout) = matches.value_of("timeout") {
|
||||
doh.timeout = Duration::from_secs(timeout.parse().unwrap());
|
||||
}
|
||||
runtime_handle: runtime.handle().clone(),
|
||||
};
|
||||
parse_opts(&mut globals);
|
||||
let doh = DoH {
|
||||
globals: Arc::new(globals),
|
||||
};
|
||||
runtime.block_on(doh.entrypoint()).unwrap();
|
||||
}
|
||||
|
|
24
src/utils.rs
Normal file
24
src/utils.rs
Normal file
|
@ -0,0 +1,24 @@
|
|||
// functions to verify the startup arguments as correct
|
||||
|
||||
use std::net::{SocketAddr, ToSocketAddrs};
|
||||
|
||||
pub(crate) fn verify_sock_addr(arg_val: &str) -> Result<String, String> {
|
||||
match arg_val.parse::<SocketAddr>() {
|
||||
Ok(_addr) => Ok(arg_val.to_string()),
|
||||
Err(_) => Err(format!(
|
||||
"Could not parse \"{arg_val}\" as a valid socket address (with port)."
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn verify_remote_server(arg_val: &str) -> Result<String, String> {
|
||||
match arg_val.to_socket_addrs() {
|
||||
Ok(mut addr_iter) => match addr_iter.next() {
|
||||
Some(_) => Ok(arg_val.to_string()),
|
||||
None => Err(format!(
|
||||
"Could not parse \"{arg_val}\" as a valid remote uri"
|
||||
)),
|
||||
},
|
||||
Err(err) => Err(format!("{err}")),
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue