Compare commits

...

312 commits

Author SHA1 Message Date
Frank Denis
f0242354d3 Update deps 2025-03-20 00:43:38 +01:00
Frank Denis
25fa6946e6 tar cJpf -> tar cjpf in order to build bz2 archives
Fixes #103
2025-03-20 00:37:34 +01:00
Frank Denis
2254632d33 Update deps 2025-02-20 20:37:23 +01:00
Frank Denis
672d1a11f1 2025 2025-02-20 20:33:01 +01:00
Frank Denis
9e4a931bce Nits 2025-02-20 20:32:42 +01:00
Frank Denis
40b0b02972 Add issues.yml 2024-12-31 14:54:55 +01:00
Frank Denis
bf443c33b9 Switch to mlugg/setup-zig@v1 2024-11-04 00:11:49 +01:00
Frank Denis
1a0a0566c4 Back to Zig 0.10.1 2024-07-03 14:38:16 +02:00
Frank Denis
890a74276f Downgrade to Zig 0.12.0 2024-07-03 14:26:44 +02:00
Frank Denis
34f614e938 0.13 -> 0.13.0 2024-07-03 14:17:17 +02:00
Frank Denis
d6635eebb7 up 2024-07-03 14:16:01 +02:00
Frank Denis
c79501aea3 Use Zig 0.13 2024-07-03 14:03:51 +02:00
Frank Denis
e73964fa1d Update deps 2024-07-03 13:52:56 +02:00
Frank Denis
bafbdc0926 Try creating RPM packages
Fixes #98
2024-07-03 13:27:29 +02:00
Frank Denis
30a55a0f2f Merge branch 'master' of github.com:jedisct1/rust-doh
* 'master' of github.com:jedisct1/rust-doh:
  Add Let's Encrypt R10
  Update common hashes
2024-07-03 12:33:38 +02:00
Frank Denis
7bb8293c28 package.metadata.generate-rpm 2024-07-03 12:33:29 +02:00
Frank Denis
a6517472d5
Merge pull request #97 from demarcush/patch-1
Update common hashes
2024-07-03 00:11:22 +02:00
demarcush
3511672d49
Add Let's Encrypt R10 2024-07-02 20:47:53 +00:00
demarcush
bd85572368
Update common hashes 2024-05-14 03:44:17 +00:00
Frank Denis
02b3a67a00 Update hyper to 0.14.28 2024-05-06 12:22:21 +02:00
Frank Denis
66c66c7a28 Update mimalloc 2024-05-05 18:01:19 +02:00
Frank Denis
1165fab90c Update a few deps 2024-03-06 18:25:38 +01:00
Frank Denis
c92308ccbb Update deps 2023-09-02 00:20:06 +02:00
Frank Denis
78c47830ff Update deps 2023-07-15 21:18:46 +02:00
Frank Denis
9e2853da86 Update deps 2023-05-03 17:35:23 +02:00
Frank Denis
e5f6f2a5d6 Bump 2023-04-14 12:44:40 +02:00
Frank Denis
e8df0458ac Bump hyper. Again. 2023-04-14 12:38:08 +02:00
Frank Denis
19040f1e88 Nits 2023-04-14 09:45:20 +02:00
Frank Denis
6f9f63e754 Update deps, especially hyper 2023-04-13 17:13:03 +02:00
Frank Denis
678bd04bed Update deps 2023-04-13 17:12:29 +02:00
Frank Denis
ffa0828515 Update tokio 2023-03-02 19:05:11 +01:00
Frank Denis
6580f6ffb5 Fix CI 2023-02-19 21:50:08 +01:00
Frank Denis
f64770bdd7 Install zig 0.10.1 2023-02-19 21:44:31 +01:00
Frank Denis
18297228c7 Bump 2023-02-19 21:10:14 +01:00
Frank Denis
908e7d64db Update base64 2023-02-19 21:05:34 +01:00
Frank Denis
c54b3303fc Update base64, accept padding on decoding 2023-02-19 21:02:28 +01:00
Frank Denis
1c5c83803a Remove optional requirement 2023-02-09 17:21:29 +01:00
Frank Denis
1386b7d13a Mention HTTP/3 2023-02-09 17:18:27 +01:00
Frank Denis
920d31b502 Update relayd URL 2023-02-09 17:16:34 +01:00
Frank Denis
651224d900 Format 2023-02-09 16:58:58 +01:00
Frank Denis
b5d525abcd Update deps 2023-02-09 16:58:31 +01:00
Frank Denis
11d8f4cb31 Add a logo 2023-02-01 20:28:37 +01:00
Frank Denis
47330ebcad Update deps 2023-01-31 00:00:21 +01:00
Frank Denis
d5fd8231ff Sorry, Debian-aarch64 users 2023-01-12 01:38:53 +01:00
Frank Denis
8cba04338e Debian again... 2023-01-12 01:27:43 +01:00
Frank Denis
85280f4525 Try to build Debian packages 2023-01-12 01:25:34 +01:00
Frank Denis
1c28a28b78 Bump 2023-01-12 01:16:47 +01:00
Frank Denis
fbf82068d1 Only retrieve clap arguments as String, don't expect it to be smart
Fixes #94
2023-01-12 01:14:33 +01:00
Frank Denis
c9e084b2b4 Title 2023-01-09 21:34:33 +01:00
Frank Denis
37dc663b6e Simpler incantation to build Debian packages 2023-01-03 11:53:35 +01:00
Frank Denis
b81cc3e5d2 Remove glibc dependency for the Debian package
Fixes #93
2023-01-03 11:06:31 +01:00
Frank Denis
3f1bbcd8dc link-args=-s -> -C strip=symbols 2023-01-03 11:01:39 +01:00
Frank Denis
e92fddb165 Use zip -9 -r 2023-01-02 22:29:51 +01:00
Frank Denis
d573a20c86 Use v3 for the checkout action 2023-01-02 21:48:23 +01:00
Frank Denis
f5c07a205b Ah 2023-01-02 21:15:00 +01:00
Frank Denis
d277c0a806 Update upload-release actions 2023-01-02 21:14:00 +01:00
Frank Denis
fc61c79a9f Windows requires a .exe suffix 2023-01-02 21:00:07 +01:00
Frank Denis
a92f4a77ae Rust requires every single target to be installed individually 2023-01-02 20:47:01 +01:00
Frank Denis
a373957045 Install Zig 2023-01-02 20:37:09 +01:00
Frank Denis
6f5213838b Actions require unique names 2023-01-02 20:21:24 +01:00
Frank Denis
eede3f4ab3 2023 2023-01-02 20:19:56 +01:00
Frank Denis
fdcc797fcb Bump 2023-01-02 20:19:04 +01:00
Frank Denis
3e59f42558 Bump 2023-01-02 20:17:50 +01:00
Frank Denis
a1fc5bbffc CI: try to package a build for Windows 2023-01-02 20:17:20 +01:00
Frank Denis
4b887d6705 Update arc-swap 2023-01-02 20:05:19 +01:00
Frank Denis
6818fbe8a1 Update to clap 4
The new API is confusing and very error-prone, with errors being
thrown at runtime rather than compile-time.

Hopefully nothing got broken in the process.
2022-12-25 12:37:48 +01:00
Frank Denis
c82fb339ed Update deps 2022-12-25 11:23:13 +01:00
Frank Denis
06a3fa0499 Clarify what -H and -g do
Fixes #68
2022-12-25 10:21:19 +01:00
Frank Denis
8b9f9377b3 Update deps 2022-10-11 22:25:59 +02:00
Frank Denis
767b3e17b1 Update odoh-rs to the final version 2022-10-11 22:25:29 +02:00
Frank Denis
a60ced8782 Update deps 2022-09-21 12:21:00 +02:00
Frank Denis
25d1261730 Remove unneeded reference 2022-09-15 12:41:40 +02:00
Frank Denis
ff62b6a24b Disable the parking_lot feature in tokio.
Mutexes from the standard library have improved in recent Rust
versions. On Linux only, though.
2022-07-02 17:44:42 +02:00
Frank Denis
fd65582aa6 Nits 2022-06-24 23:49:04 +02:00
Frank Denis
d12b9deb35 Bump 2022-06-06 09:03:15 +02:00
Frank Denis
965bca7fde Remove Travis 2022-06-06 09:02:14 +02:00
Frank Denis
5b11bc520e Format 2022-06-06 09:01:36 +02:00
Frank Denis
ab4c27ef86 Update deps 2022-06-06 09:00:51 +02:00
Frank Denis
db9c8634e3 Replace jemalloc with mimalloc 2022-05-25 13:39:09 +02:00
Frank Denis
533c29ec1e Update rustls-pemfile 2022-05-14 13:02:12 +02:00
Frank Denis
e27ab7dee9 Format 2022-05-14 01:33:03 +02:00
Frank Denis
511b0b4388 Reorder 2022-05-07 15:38:04 +02:00
Frank Denis
74939bdc6c Bump 2022-03-05 16:04:30 +01:00
Frank Denis
054beb390c Update deps 2022-03-05 16:04:01 +01:00
Frank Denis
16ab626cc2 Update rustls-pemfile 2022-02-11 20:32:46 +01:00
Frank Denis
115938f90f Y++ 2022-01-01 10:29:39 +01:00
Frank Denis
c6c9d64681 Update clap to v3 2022-01-01 10:29:15 +01:00
Frank Denis
d586c50019 Uglify 2021-10-29 20:23:41 +02:00
Frank Denis
46be8b9662 Painful update of rustls 2021-10-29 20:13:47 +02:00
Frank Denis
e6fe51647d Bump 2021-09-07 13:25:16 +02:00
Frank Denis
379a7abc7e Add CORS header (only for DoH), for web browsers 2021-09-07 13:22:55 +02:00
Frank Denis
5770f9da33 Remove retired Let's Encrypt certificate 2021-09-07 12:08:28 +02:00
Frank Denis
b77f10cd9d Bump 2021-09-06 19:28:30 +02:00
Frank Denis
63eac2a622 Add the ability to specify an alternative port number 2021-09-06 19:24:19 +02:00
Frank Denis
a727c4b9fa Keep the LICENSE file, in addition to its name in Cargo 2021-08-20 01:13:44 +02:00
Frank Denis
2918061786 license -> license-file 2021-08-20 01:12:14 +02:00
Frank Denis
7657d5a2b2 Require tokio 1.10 2021-08-13 22:21:28 +02:00
Frank Denis
f9d2a0fc94 Bump 2021-06-12 14:01:52 +02:00
Frank Denis
4f1e0f2abe Print ODoH DNS stamps 2021-06-12 14:00:24 +02:00
Frank Denis
a988eb42a2 Properly use the odoh-rs API 2021-06-12 13:32:38 +02:00
Frank Denis
a19c523cf2 Nits 2021-06-12 10:46:38 +02:00
Frank Denis
b637bb1ec9 Downgrade hpke 2021-06-12 10:33:51 +02:00
Frank Denis
f4a1dee971 Update odoh-rs
Same thing, just more complicated to use
2021-06-12 10:28:27 +02:00
Frank Denis
f4cc9bb0f9 Merge branch 'master' of github.com:jedisct1/rust-doh
* 'master' of github.com:jedisct1/rust-doh:
  Add retries over TCP
2021-06-10 22:28:33 +02:00
Frank Denis
485afd5976 Add retries over TCP
Fixes #62
2021-06-09 10:50:28 +02:00
Frank Denis
0f268055b7 up 2021-06-07 14:48:03 +02:00
Frank Denis
324bbcde60 Add Cargo keyword 2021-06-07 14:47:36 +02:00
Frank Denis
474701ec1e Up 2021-06-07 14:45:21 +02:00
Frank Denis
3b77ff2e34 Typo 2021-06-07 14:42:47 +02:00
Frank Denis
ece8a445cb DOcument --allow-odoh-post 2021-06-07 14:42:21 +02:00
Frank Denis
eebd6b8356 Add a note on keys 2021-06-06 22:52:54 +02:00
Frank Denis
fd1081e0b0 up 2021-06-06 22:50:50 +02:00
Frank Denis
5c369fc610 Don't use a 0 TTL for ODoH configs 2021-06-06 17:46:18 +02:00
Frank Denis
3bc0d22f69 Add --allow-odoh-post 2021-06-06 17:41:48 +02:00
Frank Denis
a746e2822a Reject large query strings 2021-06-06 17:36:04 +02:00
Frank Denis
9be0d1ed74 Check Accept: if there is no Content-Type: 2021-06-06 17:31:15 +02:00
Frank Denis
62744d5390 Handle ODoH queries using GET 2021-06-06 16:11:45 +02:00
Frank Denis
21fc7441b3 Shuffle 2021-06-06 15:58:58 +02:00
Frank Denis
6edccca03e Factor DoH serving code 2021-06-06 15:53:40 +02:00
Frank Denis
90c30c8905 Avoid unwrap() 2021-06-05 17:24:50 +02:00
Frank Denis
1389c82872 Move file comment up 2021-06-05 17:11:09 +02:00
Frank Denis
9445e95014 Update deps, format 2021-05-14 23:36:37 +02:00
Frank Denis
338d6436c0 Let's Encrypt retired X3, introduced E1 2021-05-12 16:06:39 +02:00
Frank Denis
4e54008b10
Merge pull request #59 from chris-wood/caw/add-odoh
Add Oblivious DoH target support as a default feature.
2021-05-11 22:49:57 +02:00
Christopher Wood
822d3d9a51 Implement ODoH key rotation. 2021-05-01 07:56:10 -07:00
Frank Denis
4cb88417ba
Merge pull request #60 from jedisct1/dependabot/add-v2-config-file
Upgrade to GitHub-native Dependabot
2021-04-30 16:05:35 +02:00
dependabot-preview[bot]
e34f60e2eb
Upgrade to GitHub-native Dependabot 2021-04-29 20:50:32 +00:00
Christopher Wood
25a9c285db Remove dead comment. 2021-04-26 13:08:54 -07:00
Christopher Wood
05a60818ce Add Oblivious DoH target support as a default feature.
This change adds Oblivious DoH (ODoH) target support to doh-server.
This change does include support for ODoH key rotation or algorithm
agility. ODoH is a default feature and not conditionally compiled
out.
2021-04-26 13:05:52 -07:00
Frank Denis
42211d4f5e Sync usage 2021-03-06 22:22:34 +01:00
Frank Denis
0403de66f1 Compute a preliminary stamp 2021-03-06 22:21:19 +01:00
Frank Denis
00cc43e2bb Clarify 2021-03-06 22:02:28 +01:00
Frank Denis
63d672895f Clarify 2021-03-06 21:46:39 +01:00
Frank Denis
4de5310430 fullchain.cer works 2021-03-06 21:45:33 +01:00
Frank Denis
4d685d8948 LE ECDSA certs don't play well with (at least Go) 2021-03-06 21:41:39 +01:00
Frank Denis
6f40f792e3 Clarify more 2021-03-06 21:32:59 +01:00
Frank Denis
eb8ea3dc84 Clarify 2021-03-06 21:32:01 +01:00
Frank Denis
ecacd6eca9 Add the command to convert SEC1 to PKCS8 2021-03-06 21:14:04 +01:00
Frank Denis
623328d37f Mention that certs must be RSA 2021-03-06 21:05:35 +01:00
Frank Denis
0404b8f8a7 space 2021-02-16 01:42:09 +01:00
Frank Denis
de0e8a39c3 Nits 2021-02-16 01:36:49 +01:00
Frank Denis
30abc95e48 CI: add a release task 2021-02-16 01:23:41 +01:00
Frank Denis
03581234b5 CI simplification 2021-02-16 00:54:06 +01:00
Frank Denis
dbc5dc702f Documentation updates 2021-02-16 00:13:44 +01:00
Frank Denis
ba663ef4d9 Reload certs every 10 sec 2021-02-16 00:02:42 +01:00
Frank Denis
518341df37 Reorganize a bit 2021-02-16 00:00:02 +01:00
Frank Denis
39124df9fc Reexport tokio 2021-02-15 23:46:24 +01:00
Frank Denis
b4d4eaae50 Limit the number of concurrent streams per client 2021-02-15 23:43:36 +01:00
Frank Denis
a2f342379e Automatically update the certificates without restarting 2021-02-15 23:31:45 +01:00
Frank Denis
0a99d0d212 Bump 2021-02-15 00:42:08 +01:00
Frank Denis
4326f1afa7 Set ALPN config to advertise HTTP/2 2021-02-15 00:41:16 +01:00
Frank Denis
c6c5c71458 Enable support for early data 2021-02-15 00:11:52 +01:00
Frank Denis
2179ceae67 Rewind cursor 2021-02-15 00:06:50 +01:00
Frank Denis
9f092224cd Parse PKCS8 and RSA keys separately 2021-02-15 00:00:57 +01:00
Frank Denis
c3f724118c Properly parse the -I and -i options (key file and cert file) 2021-02-14 23:48:24 +01:00
Frank Denis
dbe14da43e Disable default features for libdoh
Fixes #56
2021-02-12 21:57:21 +01:00
Frank Denis
d7fa144671 Bump 2021-02-12 19:32:41 +01:00
Frank Denis
2e95a50f9f Update deps; tokio now require the "time" feature 2021-02-12 19:23:08 +01:00
Frank Denis
b281555860 compare_and_swap() was too simple and has been deprecated 2021-02-12 19:20:01 +01:00
Frank Denis
741d28557d Update deps 2021-01-27 20:21:17 +01:00
Frank Denis
c176eeff5f Shrink tokio features 2021-01-03 14:26:29 +01:00
Frank Denis
226d8fe52a Update deps; fix for new tokio 2021-01-03 14:19:26 +01:00
Frank Denis
b544ca3daa year++ 2021-01-01 00:03:32 +01:00
Frank Denis
3ee8477ffa Add a link to rustup 2020-12-12 23:29:05 +01:00
Frank Denis
548adf7810 Remove links to precompiled packages 2020-12-12 23:28:31 +01:00
Frank Denis
1174d2c5b7 Update some base versions 2020-12-12 23:20:49 +01:00
Frank Denis
2b706345a4 Add more Let's Encrypt certificates hashes 2020-11-29 20:43:25 +01:00
Frank Denis
62226b12e4 Update deps 2020-11-23 22:57:55 +01:00
Frank Denis
5fabdbf2d1 Default tls_cert_key_path to tls_cert_path 2020-10-12 12:39:17 +02:00
Frank Denis
cf8ba631e6 Update deps 2020-08-31 22:07:37 +02:00
Frank Denis
d535650ed4 Update server list URL 2020-08-31 22:06:01 +02:00
Frank Denis
bf589911de STALE_IF_ERROR_SECS can be bumped up 2020-07-09 21:10:31 +02:00
Frank Denis
64cd83a440 Don't use the TTL for stale-if-error and stale-while-revalidate
Use constant, reasonable values instead
2020-07-09 21:08:34 +02:00
Frank Denis
b8c8dacb5d Remove max-stale 2020-07-09 20:26:07 +02:00
Frank Denis
1a0acbea44 Add stale-if-error and max-stale 2020-07-09 20:24:58 +02:00
Frank Denis
1812880562 Add stale-while-revalidate to Cache-Control 2020-07-07 14:36:04 +02:00
Frank Denis
5058aeaf07 Remove SARIF upload 2020-06-11 10:46:34 +02:00
Frank Denis
61b5c694c6
Add ShiftLeft scan 2020-06-11 10:43:34 +02:00
Frank Denis
e204f18609 Update links to precompiled binaries 2020-05-19 10:12:26 +02:00
Frank Denis
cf246f929c up 2020-05-19 10:11:34 +02:00
Frank Denis
8e4d66f68a Update deps, especially for tokio-rustls 2020-05-19 10:03:13 +02:00
Frank Denis
8d72413eaf Minibump 2020-04-14 19:25:01 +02:00
Frank Denis
f2215aa52f 'hyper::server::conn::Http::<E>::keep_alive': renamed to http1_keep_alive 2020-04-14 19:23:38 +02:00
Frank Denis
87954b5012 Bump minimal dependency versions 2020-04-14 19:22:49 +02:00
Frank Denis
868c41b9b8 Update minimal tokio version
0.2.13 seems to be stable
2020-03-19 12:07:15 +01:00
Frank Denis
657005b427 Quotes 2020-03-09 22:50:07 +01:00
Frank Denis
88eb1114eb Add more explanations about DNS stamps and certificate hashes 2020-03-09 22:38:20 +01:00
Frank Denis
4bdad2e075 Enable parking_lot for tokio 2020-03-09 22:28:49 +01:00
Frank Denis
73e2a5e345 Update deps 2020-03-09 22:26:01 +01:00
Frank Denis
13229624a7 tls is now enabled by default 2020-02-01 22:27:25 +01:00
Frank Denis
cb8c324def Update precompiled packages 2020-02-01 22:25:19 +01:00
Frank Denis
75b54ab57b Bump 2020-02-01 21:29:16 +01:00
Frank Denis
16cb57c1e1 Replace native-tls with rust-tls, switch to PEM format 2020-02-01 20:46:36 +01:00
Frank Denis
4914572894 Bump libdoh 2020-01-20 20:40:31 +01:00
Frank Denis
a91241afed Bump 2020-01-20 20:28:36 +01:00
Frank Denis
072a6c0672 Change padding length strategy; PADME is a bad fit for DNS 2020-01-20 20:26:46 +01:00
Frank Denis
76c28cf48e Implement the Display trait for DoHError 2020-01-08 22:19:28 -07:00
Frank Denis
071af94eba Switch to the PADME padding scheme 2020-01-08 22:04:14 -07:00
Frank Denis
ccca660359 Update precompiled binaries 2019-12-24 14:36:15 +01:00
Frank Denis
65607f6dc7 Bump 2019-12-24 14:22:39 +01:00
Frank Denis
a9397bc33e Padding PRR fix 2019-12-24 14:19:35 +01:00
Frank Denis
aaf0d50726 Add a version to the library dependency 2019-12-24 13:57:05 +01:00
Frank Denis
a08dfba08a Prepare for publishing 2019-12-24 13:54:59 +01:00
Frank Denis
04375a035d Return a BAD GATEWAY error on upstream timeout
Fixes #21
2019-12-24 13:17:57 +01:00
Frank Denis
9e5db2a218 Enforce a timeout on recv() 2019-12-24 13:09:05 +01:00
Frank Denis
595f809432 Use different padding to better catch bugs 2019-12-24 12:56:01 +01:00
Frank Denis
07088fd0a7 Fix tls feature 2019-12-24 12:54:15 +01:00
Frank Denis
c75ebff959 Don't trust Hyper's executor 2019-12-24 12:01:47 +01:00
Frank Denis
4f2846966e Nits 2019-12-24 10:36:14 +01:00
Frank Denis
c27e2059bb Check for question count after the length 2019-12-24 10:17:59 +01:00
Frank Denis
58b64ce077 A total number of records shouldn't be limited to a u16 2019-12-24 10:14:50 +01:00
Frank Denis
c66c13568a Export the dns module 2019-12-24 10:01:09 +01:00
Frank Denis
06b91af009 Reorganize a little bit 2019-12-24 09:44:43 +01:00
Frank Denis
bf42e95368 edns0 padding
TODO: check that a padding pseudorecord is not already present
2019-12-23 23:27:21 +01:00
Frank Denis
31953b2605 Refuse long labels 2019-12-23 20:57:09 +01:00
Frank Denis
4e4ddcf8e5 Sync DNS library with EDS code 2019-12-23 20:52:50 +01:00
Frank Denis
a4938aa962 Factorize a bit 2019-12-23 20:22:00 +01:00
Frank Denis
f2e5f13e85 Enable pipeline_flush 2019-12-23 17:01:33 +01:00
Frank Denis
02ce4c9e9b Move the TLS stuff to a dedicated file 2019-12-23 16:56:56 +01:00
Frank Denis
f7770951da Require less features from hyper 2019-12-23 16:44:29 +01:00
Frank Denis
ad36120e02 Mention what the password of the test certificate is 2019-12-23 16:36:24 +01:00
Frank Denis
0d55bf73c6 Refactor 2019-12-23 16:32:02 +01:00
Frank Denis
1b850b2f41 Prepare for tokio 0.2/hyper 0.13/async-await migration 2019-12-23 15:26:27 +01:00
Frank Denis
1cb4a11a7b Remove unused crates 2019-12-22 22:11:47 +01:00
Frank Denis
49c5ebd9fb Fix link 2019-11-18 15:32:37 +01:00
Frank Denis
c52fa0b8d3 Link to the raw file 2019-11-18 15:22:18 +01:00
Frank Denis
a44974d73c Doc 2019-11-18 15:21:06 +01:00
Frank Denis
740e23dd93 Add an example with EDS 2019-11-18 15:15:34 +01:00
Frank Denis
7102a173fb Build with TLS 2019-11-18 15:10:47 +01:00
Frank Denis
57d4c6de9f Always use lto="fat" 2019-11-18 15:08:39 +01:00
Frank Denis
cb31469004 Add Github actions 2019-11-18 15:04:18 +01:00
Frank Denis
3b3bb6f0e7 There's a new version of base64 around. Okay. 2019-11-01 22:32:57 +01:00
Frank Denis
0301b25d1c Bump 2019-10-13 13:08:41 +02:00
Frank Denis
91ba886ee5 up 2019-10-13 13:00:49 +02:00
Frank Denis
d3b92d782d up 2019-10-13 13:00:17 +02:00
Frank Denis
37d40e88a3 Default bind to IPv6 socket if connecting to server over IPv6
Fixes #37
2019-10-13 12:57:32 +02:00
Frank Denis
7bd2bf2131 Mention encrypted-dns-server 2019-10-13 12:55:26 +02:00
Frank Denis
51adc024ee Bump 2019-10-01 16:50:22 +02:00
Frank Denis
eb2e2afb75 temporary->recoverable 2019-10-01 16:50:13 +02:00
Frank Denis
a3bb77fa61 Don't cache recoverable errors for too long 2019-10-01 16:49:42 +02:00
Frank Denis
8c96ecdc10 Rename signatures to hashes for clarity 2019-08-15 11:53:57 +02:00
Frank Denis
c65ce1210d Merge branch 'master' of github.com:jedisct1/rust-doh
* 'master' of github.com:jedisct1/rust-doh:
  Bump, just for the import fix
  It's just proxy_cache
  Add some documentation
  Update deps
  cmd argument validation and remote hostname support
2019-08-12 16:08:45 +02:00
Frank Denis
10a99040e1 Get help info from Cargo.toml
Fixes #35
Fixes #13
2019-08-12 16:07:57 +02:00
Frank Denis
46cdc9168d Bump, just for the import fix 2019-08-12 12:00:10 +02:00
Frank Denis
abaa8e4da0 It's just proxy_cache 2019-07-20 22:25:08 +02:00
Frank Denis
48ddb237b4 Add some documentation 2019-07-20 22:08:08 +02:00
Frank Denis
d12defaf2c Update deps 2019-07-20 21:54:21 +02:00
Frank Denis
98d1a8c8d7
Merge pull request #26 from Dieff/master
Added some cli argument validation and ability to use hostname to specify upstream server
2019-07-13 09:03:30 +02:00
Nick Dieff
524168795f cmd argument validation and remote hostname support 2019-07-12 22:03:17 -04:00
Frank Denis
8fbcdfc2a7 Bump 2019-06-15 11:03:37 +02:00
Frank Denis
7ff594de3b Only check the content type on POST queries
Fixes #25
2019-06-15 11:02:01 +02:00
Frank Denis
cc00e62f82 Bump 2019-06-11 13:13:29 +02:00
Frank Denis
224609ea9b Check content-type instead of accept
Fixes #23
2019-06-04 23:24:15 +02:00
Frank Denis
217bb90320 Return various HTTP error codes on invalid queries
Fixes #23
2019-06-04 18:34:32 +02:00
Frank Denis
de99e6a8b2 Return BAD_REQUEST on short queries for POST queries, too
Fixes #20
2019-06-01 00:58:43 +02:00
Frank Denis
dbef23142a Update tokiox 2019-06-01 00:58:34 +02:00
Frank Denis
30f2a7e86a Bump 2019-05-27 14:34:53 +02:00
Frank Denis
d02fa3d393 Add an option to disabe POST queries
CDNs such as Cloudflare may not cache POST queries.

Refusing POST queries is not a big deal, since dnscrypt-proxy will
transparently switch to GET.
2019-05-27 14:31:22 +02:00
Frank Denis
7612e9a57f Bump 2019-05-19 12:41:22 +02:00
Frank Denis
764d7c3a64 Update the documentation 2019-05-19 12:41:07 +02:00
Frank Denis
ed47ab8a1b LTO 2019-05-19 12:18:14 +02:00
Frank Denis
1706ec0dcb Make TLS an optional feature 2019-05-19 11:38:55 +02:00
Frank Denis
badcb6104d Update the documentation 2019-05-16 17:51:15 +02:00
Frank Denis
58a0ef3347 Enable keepalive by default, add a switch to disable it 2019-05-16 17:15:07 +02:00
Frank Denis
dc7480c3f9 Bump deps 2019-05-16 17:00:33 +02:00
Frank Denis
d691562f60 Add preliminary support for TLS 2019-05-16 16:59:52 +02:00
Frank Denis
ed9c50777a Bump 2019-05-12 02:05:32 +02:00
Frank Denis
b723245bec Allow Cargo.lock 2019-05-12 02:00:49 +02:00
Frank Denis
4b7956bc78 pub(crate) 2019-05-12 01:57:04 +02:00
Frank Denis
a65e1883ee Import utils 2019-05-12 01:55:36 +02:00
Frank Denis
08675c6243 Update deps 2019-05-12 01:46:49 +02:00
Frank Denis
4cdb23d9fa Add padding 2019-05-12 01:46:44 +02:00
Frank Denis
0fc3a23d04 Update a few dependencies 2019-04-19 20:51:20 +02:00
Frank Denis
587c725ca5 Update the README file 2019-03-16 21:07:40 +01:00
Frank Denis
4e4f9aeea8 Bump and require specific versions for tokio/futures/hyper 2019-03-16 21:06:47 +01:00
Frank Denis
b3587f9694 Make TTLs configurable and slightly bump the defaults 2019-03-16 21:04:11 +01:00
Frank Denis
d17e6d767f Bump 2019-03-16 20:29:48 +01:00
Frank Denis
5a07001129 Use jemalloc, because glibc's allocator is crap 2019-03-13 20:30:17 +01:00
Frank Denis
d22331d106 Factorize RR traversal code 2019-03-13 19:52:54 +01:00
Frank Denis
8ad7edbf0e Set the max payload size in the outgoing queries 2019-03-13 19:20:13 +01:00
Frank Denis
2f85b58f33 Use Rust 2018 2019-03-12 02:09:26 +01:00
Frank Denis
4ae4c5d974 Bump 2019-03-12 02:06:43 +01:00
Frank Denis
9ecdb3dcd7 Handle timeouts 2019-03-12 01:56:54 +01:00
Frank Denis
37c0024e10 Update base64 2019-03-12 01:16:20 +01:00
Frank Denis
a8cd28fff8 2019 2019-02-18 14:53:07 +01:00
Frank Denis
98a0c59047 Don't use tokio_current_thread; update tokio-timer (which doesn't work) 2018-10-01 11:57:46 +02:00
Frank Denis
2723bd3ac6 Refactor clients_count things 2018-10-01 00:37:02 +02:00
Frank Denis
9dcbc328d4 Decrement the clients count on timeout 2018-10-01 00:32:48 +02:00
Frank Denis
737c5b4d24 Compatibility with ancient rust versions
Maybe
fixes #14
2018-10-01 00:04:02 +02:00
Frank Denis
34c043288a Bump 2018-08-24 19:40:14 +02:00
Frank Denis
82587db5f6 Use tokio-current-thread 2018-08-24 19:39:30 +02:00
Frank Denis
81518fe385 Bleh 2018-08-24 19:35:45 +02:00
Frank Denis
2d03cf4b55 Decrease the client count on server error 2018-08-24 19:30:43 +02:00
Frank Denis
93d90acd32 Bump version, remove clippy dev dependency by the way 2018-08-21 14:09:46 +02:00
Frank Denis
61d1e7cb0e Implement the (deprecated) description() function for old Rust versions
Fixes #10
2018-08-21 12:05:06 +00:00
Frank Denis
22b51a4c20 Bump 2018-08-16 21:14:14 +02:00
Frank Denis
a31c9d225a The VSCode crates plugin is enough 2018-08-16 21:13:53 +02:00
Frank Denis
12ce22060c Do not ignore the -p (path) command-line parameter
Print the full URI at startup
2018-08-16 21:13:10 +02:00
Frank Denis
b480b85e84 Remove useless comment 2018-08-16 19:49:53 +02:00
Frank Denis
d14dd35ab0 Back to tokio-timer 0.1 2018-07-07 22:59:29 +02:00
Frank Denis
82630f4a31 Get rid of the Mutex 2018-07-07 22:29:41 +02:00
Frank Denis
bc925cc2d5 Replace the clients count Mutex with an Atomic 2018-07-07 22:14:41 +02:00
Frank Denis
eddb36b541 Ignore the class for OPT records 2018-07-07 22:01:35 +02:00
Frank Denis
a3ee3f3198 Clippify 2018-07-07 21:35:50 +02:00
Frank Denis
1e2f123a35 Format 2018-07-07 21:18:15 +02:00
Frank Denis
d042aa0f5a
Merge pull request #8 from bluetech/hyper-0.12
Upgrade code to hyper 0.12
2018-07-07 21:17:05 +02:00
Ran Benita
eebca570ed Upgrade code to hyper 0.12
This keeps the same structure without refactoring anything. However
there are several necessary changes:

1. The returned future needs to be Send because hyper requires it.
2. The typed headers were removed from hyper, so use plain code instead.
3. Use the even-lower-level API because it was actually easier.
2018-07-07 16:57:26 +03:00
Ran Benita
b66fde914f Remove ~ from dependency specifications
In cargo ~ is the default and it is conventional to omit it.
2018-07-07 15:29:50 +03:00
Ran Benita
c45da71704 Remove unused tokio-io dependency
It is included in tokio::io and not used directly.
2018-07-07 15:29:11 +03:00
Frank Denis
dff63386f7 Update to tokio-timer 0.2 2018-07-02 14:19:09 +02:00
Frank Denis
4ca54eb71b Revert "..."
This reverts commit 5571b6c405.
2018-07-02 13:43:26 +02:00
Frank Denis
5571b6c405 ... 2018-07-02 12:41:44 +02:00
27 changed files with 2186 additions and 430 deletions

1
.github/FUNDING.yml vendored Normal file
View file

@ -0,0 +1 @@
open_collective: dnscrypt

8
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,8 @@
version: 2
updates:
- package-ecosystem: cargo
directory: "/"
schedule:
interval: daily
time: "04:00"
open-pull-requests-limit: 10

17
.github/workflows/issues.yml vendored Normal file
View file

@ -0,0 +1,17 @@
name: Close inactive issues
on:
schedule:
- cron: "30 1 * * *"
jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v9
with:
stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
repo-token: ${{ secrets.GITHUB_TOKEN }}

164
.github/workflows/release.yml vendored Normal file
View file

@ -0,0 +1,164 @@
name: Release
on:
push:
tags:
- "*"
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Get the version
id: get_version
run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/}
- uses: actions/checkout@v3
- uses: mlugg/setup-zig@v1
with:
version: 0.10.1
- uses: hecrj/setup-rust-action@master
with:
rust-version: stable
- name: Check Cargo availability
run: cargo --version
- name: Check Rustup default toolchain
run: rustup default | grep stable
- name: Install cargo-deb
run: cargo install cargo-deb
- name: Install cargo-generate-rpm
run: cargo install cargo-generate-rpm
- name: Install cargo-zigbuild
run: cargo install cargo-zigbuild
- name: Release build Linux-x86-64
run: |
rustup target add x86_64-unknown-linux-musl
env RUSTFLAGS="-C strip=symbols" cargo zigbuild --release --target x86_64-unknown-linux-musl
mkdir doh-proxy
mv target/x86_64-unknown-linux-musl/release/doh-proxy doh-proxy/
cp README.md localhost.pem doh-proxy/
tar cjpf doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2 doh-proxy
rm -fr doh-proxy
- name: Release build Linux-aarch64
run: |
rustup target add aarch64-unknown-linux-musl
env RUSTFLAGS="-C strip=symbols" cargo zigbuild --release --target aarch64-unknown-linux-musl
mkdir doh-proxy
mv target/aarch64-unknown-linux-musl/release/doh-proxy doh-proxy/
cp README.md localhost.pem doh-proxy/
tar cjpf doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-aarch64.tar.bz2 doh-proxy
rm -fr doh-proxy
- name: Release build Windows-x86_64
run: |
rustup target add x86_64-pc-windows-gnu
env RUSTFLAGS="-C strip=symbols" cargo zigbuild --release --target x86_64-pc-windows-gnu
mkdir doh-proxy
mv target/x86_64-pc-windows-gnu/release/doh-proxy.exe doh-proxy/
cp README.md localhost.pem doh-proxy/
zip -9 -r doh-proxy_${{ steps.get_version.outputs.VERSION }}_windows-x86_64.zip doh-proxy
rm -fr doh-proxy
- name: Debian packages
run: |
rustup target add x86_64-unknown-linux-musl
env RUSTFLAGS="-C strip=symbols" cargo deb --no-strip --cargo-build=zigbuild --target=x86_64-unknown-linux-musl
rustup target add aarch64-unknown-linux-musl
env RUSTFLAGS="-C strip=symbols" cargo deb --no-strip --cargo-build=zigbuild --target=aarch64-unknown-linux-musl
- name: RPM packages
run: |
rustup target add x86_64-unknown-linux-gnu
env RUSTFLAGS="-C strip=symbols" cargo-zigbuild build --target=x86_64-unknown-linux-gnu.2.17 --release
mv target/x86_64-unknown-linux-musl/release/doh-proxy target/release/
cargo generate-rpm --target x86_64-unknown-linux-gnu
rustup target add aarch64-unknown-linux-gnu
env RUSTFLAGS="-C strip=symbols" cargo-zigbuild build --target=aarch64-unknown-linux-gnu.2.17 --release
cargo generate-rpm --target aarch64-unknown-linux-gnu
- name: Create release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Release ${{ github.ref }}
draft: true
prerelease: false
- name: Upload Debian package for x86_64
id: upload-release-asset-debian-x86_64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}-1_amd64.deb"
asset_path: "target/x86_64-unknown-linux-musl/debian/doh-proxy_${{ steps.get_version.outputs.VERSION }}-1_amd64.deb"
asset_content_type: application/x-debian-package
- name: Upload RPM package for x86_64
id: upload-release-asset-rpm-x86_64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.x86_64.rpm"
asset_path: "target/x86_64-unknown-linux-gnu/generate-rpm/doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.x86_64.rpm"
asset_content_type: application/x-redhat-package-manager
- name: Upload RPM package for aarch64
id: upload-release-asset-rpm-aarch64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.aarch64.rpm"
asset_path: "target/aarch64-unknown-linux-gnu/generate-rpm/doh-proxy-${{ steps.get_version.outputs.VERSION }}-1.aarch64.rpm"
asset_content_type: application/x-redhat-package-manager
- name: Upload tarball for linux-x86_64
id: upload-release-asset-tarball-linux-x86_64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2"
asset_path: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-x86_64.tar.bz2"
asset_content_type: application/x-tar
- name: Upload tarball for linux-aarch64
id: upload-release-asset-tarball-linux-aarch64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-aarch64.tar.bz2"
asset_path: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_linux-aarch64.tar.bz2"
asset_content_type: application/x-tar
- name: Upload tarball for windows-x86_64
id: upload-release-asset-tarball-windows-x86_64
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_windows-x86_64.zip"
asset_path: "doh-proxy_${{ steps.get_version.outputs.VERSION }}_windows-x86_64.zip"
asset_content_type: application/zip

View file

@ -0,0 +1,17 @@
name: ShiftLeft Scan
on: push
jobs:
Scan-Build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Perform ShiftLeft Scan
uses: ShiftLeftSecurity/scan-action@master
env:
WORKSPACE: ""
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
output: reports

23
.github/workflows/test.yml vendored Normal file
View file

@ -0,0 +1,23 @@
name: Rust
on: [push]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- uses: hecrj/setup-rust-action@master
with:
rust-version: stable
- name: Check Cargo availability
run: cargo --version
- name: Check Rustup default toolchain
run: rustup default | grep stable
- name: Build without default features
run: |
env RUSTFLAGS="-C link-arg=-s" cargo check --no-default-features
- name: Build with default features
run: |
env RUSTFLAGS="-C link-arg=-s" cargo check

4
.gitignore vendored
View file

@ -1,5 +1,7 @@
#*#
**/*.rs.bk
*~
/target/
Cargo.lock
/target/
/src/libdoh/target/

View file

@ -1,4 +0,0 @@
language: rust
rust:
- nightly
- stable

View file

@ -1,20 +1,54 @@
[package]
name = "doh-proxy"
version = "0.1.7"
version = "0.9.11"
authors = ["Frank Denis <github@pureftpd.org>"]
description = "A DNS-over-HTTPS (DoH) proxy"
keywords = ["dns","https","doh","proxy"]
description = "A DNS-over-HTTPS (DoH) and ODoH (Oblivious DoH) proxy"
keywords = ["dns", "https", "doh", "odoh", "proxy"]
license = "MIT"
homepage = "https://github.com/jedisct1/rust-doh"
repository = "https://github.com/jedisct1/rust-doh"
categories = ["asynchronous", "network-programming","command-line-utilities"]
categories = ["asynchronous", "network-programming", "command-line-utilities"]
edition = "2018"
readme = "README.md"
[features]
default = ["tls"]
tls = ["libdoh/tls"]
[dependencies]
base64 = "~0.9"
clap = "~2"
futures = "~0.1"
hyper = "~0.11"
tokio = "~0.1"
tokio-io = "~0.1"
tokio-timer = "~0.1"
clippy = {version = ">=0", optional = true}
libdoh = { path = "src/libdoh", version = "0.9.9", default-features = false }
clap = { version = "4", features = ["std", "cargo", "wrap_help", "string"] }
dnsstamps = "0.1.10"
mimalloc = { version = "0.1.44", default-features = false }
[package.metadata.generate-rpm]
assets = [
{ source = "target/release/doh-proxy", dest = "/usr/bin/doh-proxy", mode = "755" },
{ source = "README.md", dest = "/usr/share/doc/doh-proxy/README.md", mode = "644", doc = true },
]
[package.metadata.deb]
extended-description = """\
A fast and secure DoH (DNS-over-HTTPS) and ODoH server written in Rust."""
assets = [
[
"target/release/doh-proxy",
"usr/bin/",
"755",
],
[
"README.md",
"usr/share/doc/doh-proxy/README.md",
"644",
],
]
section = "network"
depends = "$auto"
priority = "optional"
[profile.release]
codegen-units = 1
incremental = false
lto = "fat"
opt-level = 3
panic = "abort"

View file

@ -1,6 +1,6 @@
MIT License
Copyright (c) 2018 Frank Denis
Copyright (c) 2018-2025 Frank Denis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

205
README.md
View file

@ -1,44 +1,213 @@
[![dependency status](https://deps.rs/repo/github/jedisct1/rust-doh/status.svg)](https://deps.rs/repo/github/jedisct1/rust-doh)
# ![DoH server (and ODoH - Oblivious DoH server)](logo.png)
# doh-proxy
A fast and secure DoH (DNS-over-HTTPS) and ODoH (Oblivious DoH) server.
A DNS-over-HTTP server proxy in Rust. Add a webserver and you get DNS-over-HTTPS, which is actually DNS-over-HTTP/2.
`doh-proxy` is written in Rust, and has been battle-tested in production since February 2018. It doesn't do DNS resolution on its own, but can sit in front of any DNS resolver in order to augment it with DoH support.
## Installation
### Option 1: precompiled binaries for Linux
Precompiled tarballs and Debian packages for Linux/x86_64 [can be downloaded here](https://github.com/jedisct1/doh-server/releases/latest).
### Option 2: from source code
This requires the [`rust`](https://rustup.rs) compiler to be installed.
* With built-in support for HTTPS (default):
```sh
cargo install doh-proxy
```
* Without built-in support for HTTPS:
```sh
cargo install doh-proxy --no-default-features
```
## Usage
```text
doh-proxy
A DNS-over-HTTP server proxy
USAGE:
doh-proxy [OPTIONS]
doh-proxy [FLAGS] [OPTIONS]
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
-O, --allow-odoh-post Allow POST queries over ODoH even if they have been disabed for DoH
-K, --disable-keepalive Disable keepalive
-P, --disable-post Disable POST queries
-h, --help Prints help information
-V, --version Prints version information
OPTIONS:
-l, --listen_address <listen_address> Address to listen to [default: 127.0.0.1:3000]
-b, --local_bind_address <local_bind_address> Address to connect from [default: 0.0.0.0:0]
-c, --max_clients <max_clients> Maximum number of simultaneous clients [default: 512]
-E, --err-ttl <err_ttl> TTL for errors, in seconds [default: 2]
-H, --hostname <hostname> Host name (not IP address) DoH clients will use to connect
-l, --listen-address <listen_address> Address to listen to [default: 127.0.0.1:3000]
-b, --local-bind-address <local_bind_address> Address to connect from
-c, --max-clients <max_clients> Maximum number of simultaneous clients [default: 512]
-C, --max-concurrent <max_concurrent> Maximum number of concurrent requests per client [default: 16]
-X, --max-ttl <max_ttl> Maximum TTL, in seconds [default: 604800]
-T, --min-ttl <min_ttl> Minimum TTL, in seconds [default: 10]
-p, --path <path> URI path [default: /dns-query]
-u, --server_address <server_address> Address to connect to [default: 9.9.9.9:53]
-g, --public-address <public_address> External IP address DoH clients will connect to
-j, --public-port <public_port> External port DoH clients will connect to, if not 443
-u, --server-address <server_address> Address to connect to [default: 9.9.9.9:53]
-t, --timeout <timeout> Timeout, in seconds [default: 10]
-I, --tls-cert-key-path <tls_cert_key_path>
Path to the PEM-encoded secret keys (only required for built-in TLS)
-i, --tls-cert-path <tls_cert_path>
Path to the PEM/PKCS#8-encoded certificates (only required for built-in TLS)
```
Serves HTTP requests only. DoH is mostly useful to leverage an existing webserver, so just configure your webserver or CDN to proxy connections to this.
Example command-line:
```sh
doh-proxy -H 'doh.example.com' -u 127.0.0.1:53 -g 233.252.0.5
```
Here, `doh.example.com` is the host name (which should match a name included in the TLS certificate), `127.0.0.1:53` is the address of the DNS resolver, and `233.252.0.5` is the public IP address of the DoH server.
## HTTP/2 and HTTP/3 termination
The recommended way to use `doh-proxy` is to use a TLS termination proxy (such as [hitch](https://github.com/varnish/hitch) or [relayd](https://man.openbsd.org/relayd.8)), a CDN or a web server with proxying abilities as a front-end.
That way, the DoH service can be exposed as a virtual host, sharing the same IP addresses as existing websites.
If `doh-proxy` and the HTTP/2 (/ HTTP/3) front-end run on the same host, using the HTTP protocol to communicate between both is fine.
If both are on distinct networks, such as when using a CDN, `doh-proxy` can handle HTTPS requests, provided that it was compiled with the `tls` feature.
The certificates and private keys must be encoded in PEM/PKCS#8 format. They can be stored in the same file.
If you are using ECDSA certificates and ECDSA private keys start with `-----BEGIN EC PRIVATE KEY-----` and not `-----BEGIN PRIVATE KEY-----`, convert them to PKCS#8 with (in this example, `example.key` is the original file):
```sh
openssl pkcs8 -topk8 -nocrypt -in example.key -out example.pkcs8.pem
```
In order to enable built-in HTTPS support, add the `--tls-cert-path` option to specify the location of the certificates file, as well as the private keys file using `--tls-cert-key-path`.
Once HTTPS is enabled, HTTP connections will not be accepted.
A sample self-signed certificate [`localhost.pem`](https://github.com/jedisct1/doh-server/raw/master/localhost.pem) can be used for testing.
The file also includes the private key.
[`acme.sh`](https://github.com/acmesh-official/acme.sh) can be used to create and update TLS certificates using Let's Encrypt and other ACME-compliant providers. If you are using it to create ECDSA keys, see above for converting the secret key into PKCS#8.
The certificates path must be set to the full certificates chain (`fullchain.cer`) and the key path to the secret keys (the `.key` file):
```sh
doh-proxy -i /path/to/fullchain.cer -I /path/to/domain.key ...
```
Once started, `doh-proxy` automatically reloads the certificates as they change; there is no need to restart the server.
If clients are getting the `x509: certificate signed by unknown authority` error, double check that the certificate file is the full chain, not the other `.cer` file.
## Accepting both DNSCrypt and DoH connections on port 443
DNSCrypt is an alternative encrypted DNS protocol that is faster and more lightweight than DoH.
Both DNSCrypt and DoH connections can be accepted on the same TCP port using [Encrypted DNS Server](https://github.com/jedisct1/encrypted-dns-server).
Encrypted DNS Server forwards DoH queries to Nginx or `doh-proxy` when a TLS connection is detected, or directly responds to DNSCrypt queries.
It also provides DNS caching, server-side filtering, metrics, and TCP connection reuse in order to mitigate exhaustion attacks.
Unless the front-end is a CDN, an ideal setup is to use `doh-proxy` behind `Encrypted DNS Server`.
## Oblivious DoH (ODoH)
Oblivious DoH is similar to Anonymized DNSCrypt, but for DoH. It requires relays, but also upstream DoH servers that support the protocol.
This proxy supports ODoH termination (not relaying) out of the box.
However, ephemeral keys are currently only stored in memory. In a load-balanced configuration, sticky sessions must be used.
Currently available ODoH relays only use `POST` queries.
So, `POST` queries have been disabled for regular DoH queries, accepting them is required to be compatible with ODoH relays.
This can be achieved with the `--allow-odoh-post` command-line switch.
## Operational recommendations
* DoH can be easily detected and blocked using SNI inspection. As a mitigation, DoH endpoints should preferably share the same virtual host as existing, popular websites, rather than being on dedicated virtual hosts.
* When using DoH, DNS stamps should include a resolver IP address in order to remove a dependency on non-encrypted, non-authenticated, easy-to-block resolvers.
* Unlike DNSCrypt where users must explicitly trust a DNS server's public key, the security of DoH relies on traditional public Certificate Authorities. Additional root certificates (required by governments, security software, enterprise gateways) installed on a client immediately make DoH vulnerable to MITM. In order to prevent this, DNS stamps should include the hash of the parent certificate.
* TLS certificates are tied to host names. But domains expire, get reassigned and switch hands all the time. If a domain originally used for a DoH service gets a new, possibly malicious owner, clients still configured to use the service will blindly keep trusting it if the CA is the same. As a mitigation, the CA should sign an intermediate certificate (the only one present in the stamp), itself used to sign the name used by the DoH server. While commercial CAs offer this, Let's Encrypt currently doesn't.
* Make sure that the front-end supports at least HTTP/2 and TLS 1.3.
* Internal DoH servers still require TLS certificates. So, if you are planning to deploy an internal server, you need to set up an internal CA, or add self-signed certificates to every single client.
## Example usage with `encrypted-dns-server`
Add the following section to the configuration file:
```toml
[tls]
upstream_addr = "127.0.0.1:3000"
```
## Example usage with `nginx`
In an existing `server`, a `/dns-query` endpoint can be exposed that way:
```text
location /dns-query {
proxy_pass http://127.0.0.1:3000;
}
```
This example assumes that the DoH proxy is listening locally to port `3000`.
HTTP caching can be added (see the `proxy_cache_path` and `proxy_cache` directives in the Nginx documentation), but be aware that a DoH server will quickly create a gigantic amount of files.
## DNS Stamp and certificate hashes
Use the online [DNS stamp calculator](https://dnscrypt.info/stamps/) to compute the stamp for your server.
Add it to the `[static]` section of [`dnscrypt-proxy`](https://github.com/DNSCrypt/dnscrypt-proxy) and check that everything works as expected.
Then, start `dnscrypt-proxy` with the `-show-certs` command-line flag to print the hashes for your certificate chain.
Here is an example output:
```text
[NOTICE] Advertised cert: [CN=dohtrial.att.net,O=AT&T Services\, Inc.,L=Dallas,ST=Texas,C=US] [f679e8451940f06141854dc94e1eb79fa5e04463c15b88f3b392da793c16c353]
[NOTICE] Advertised cert: [CN=DigiCert Global CA G2,O=DigiCert Inc,C=US] [f61e576877da9650294cccb5f96c75fcb71bda1bbc4646367c4ebeda89d7318f]
```
The first printed certificate is the certificate of the server itself. The next line is the one that signed that certificate. As you keep going down, you are getting closer to the certificate authority.
Unless you are using intermediate certificates, your safest option is probably to include the last printed hash certificate in your DNS stamp.
Go back to the online DNS stamp calculator, and copy&paste the hash (in this example: `f61e576877da9650294cccb5f96c75fcb71bda1bbc4646367c4ebeda89d7318f`).
If you are using Let's Encrypt, the last line is likely to be:
```text
Advertised cert: [CN=Let's Encrypt Authority R3,O=Let's Encrypt,C=US] [444ebd67bb83f8807b3921e938ac9178b882bd50aadb11231f044cf5f08df7ce]
```
There you have it. Your certificate hash is `444ebd67bb83f8807b3921e938ac9178b882bd50aadb11231f044cf5f08df7ce`.
This [Go code snippet](https://gist.github.com/d6cb41742a1ceb54d48cc286f3d5c5fa) can also compute the hash of certificates given a `.der` file.
### Common certificate hashes
* Let's Encrypt E1:
* `cc1060d39c8329b62b6fbc7d0d6df9309869b981e7e6392d5cd8fa408f4d80e6`
* Let's Encrypt R3:
* `444ebd67bb83f8807b3921e938ac9178b882bd50aadb11231f044cf5f08df7ce`
* Let's Encrypt R10:
* `e644ba6963e335fe765cb9976b12b10eb54294b42477764ccb3a3acca3acb2fc`
* ZeroSSL:
* `9a3a34f727deb9bca51003d9ce9c39f8f27dd9c5242901c2bab1a44e635a0219`
## Clients
`doh-proxy` can be used with [dnscrypt-proxy](https://github.com/jedisct1/dnscrypt-proxy)
as a client.
`doh-proxy` can be used with [dnscrypt-proxy](https://github.com/DNSCrypt/dnscrypt-proxy) as a client.
`doh-proxy` is currently being used by the `doh.crypto.sx` public DNS resolver.
`doh-proxy` is used in production for the `doh.crypto.sx` public DNS resolver and many others.
Other public DoH servers can be found here: [public encrypted DNS servers](https://dnscrypt.info/public-servers).
An extensive list of public DoH servers can be found here: [public encrypted DNS servers](https://github.com/DNSCrypt/dnscrypt-resolvers/blob/master/v3/public-resolvers.md).

47
localhost.pem Normal file
View file

@ -0,0 +1,47 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDb7g6EQhbfby97
k4oMbZTzdi2TWFBs7qK/QwgOu+L6EhNHPO1ZEU29v0APFBFJO5zyyAk9bZ9k9tPB
bCuVVI9jEUfLH3UCjEQPG6XI2w++uVh0yALvc/uurCvRHVlle/V7cAoikndc2SjE
RQUALbACIqwD5g0F77BYwcsreB4GH253/R6Q2/CJZ4jNHPjkocOJiVr3ejA0kkoN
MXpGUXWcrVVk20M2A1CeO7HAulLRcklEdoHE3v46pjp0iZK0F9LyZX1U1ql+4QL3
iQttoZ4tMg83lFHSt4G9PrpIhzXr9W4NW822faSvrIwwN/JbItUmRa7n/3+MkuJQ
IGGNDayXAgMBAAECggEBANs0fmGSocuXvYL1Pi4+9qxnCOwIpTi97Zam0BwnZwcL
Bw4FCyiwV4UdX1LoFIailT9i49rHLYzre4oZL6OKgdQjQCSTuQOOHLPWQbpdpWba
w/C5/jr+pkemMZIfJ6BAGiArPt7Qj4oKpFhj1qUj5H9sYXkNTcOx8Fm25rLv6TT9
O7wg0oCpyG+iBSbCYBp9mDMz8pfo4P3BhcFiyKCKeiAC6KuHU81dvuKeFB4XQK+X
no2NqDqe6MBkmTqjNNy+wi1COR7lu34LPiWU5Hq5PdIEqBBUMjlMI6oYlhlgNTdx
SvsqFz3Xs6kpAhJTrSiAqscPYosgaMQxo+LI26PJnikCgYEA9n0OERkm0wSBHnHY
Kx8jaxNYg93jEzVnEgI/MBTJZqEyCs9fF6Imv737VawEN/BhesZZX7bGZQfDo8AT
aiSa5upkkSGXEqTu5ytyoKFTb+dJ/qmx3+zP6dPVzDnc8WPYMoUg7vvjZkXXJgZX
+oMlMUW1wWiDNI3wP19W9Is6xssCgYEA5GqkUBEns6eTFJV0JKqbEORJJ7lx5NZe
cIx+jPpLkILG4mOKOg1TBx0wkxa9cELtsNsM+bPtu9OqRMhsfPBmsXDHhJwg0Z6G
eDTfYYPkpRhwZvl6jBZn9sLVR9wfg2hE+n0lfV3mceg336KOkwAehDU84SWZ2e0S
esqkpbHJa+UCgYA7PY0O8POSzcdWkNf6bS5vAqRIdSCpMjGGc4HKRYSuJNnJHVPm
czNK7Bcm3QPaiexzvI4oYd5G09niVjyUSx3rl7P56Y/MjFVau+d90agjAfyXtyMo
BVtnAGGnBtUiMvP4GGT06xcZMnnmCqpEbBaZQ/7N8Bdwnxh5sqlMdtX2hwKBgAhL
hyQRO2vezgyVUN50A6WdZLq4lVZGIq/bqkzcWhopZaebDc4F5doASV9OGBsXkyI1
EkePLTcA/NH6pVX0NQaEnfpG4To7k46R/PrBm3ATbyGONdEYjzX65VvytoJDKx4d
pVrkKhZA5KaOdLcJ7hHHDSrv/qJXZbBn44rQ5guxAoGBAJ6oeUsUUETakxlmIhmK
xuQmWqLf97BKt8r6Z8CqHKWK7vpG2OmgFYCQGaR7angQ8hmAOv6jM56XhoagDBoc
UoaoEyo9/uCk6NRUkUMj7Tk/5UQSiWLceVH27w+icMFhf1b7EmmNfk+APsiathO5
j4edf1AinVCPwRVVu1dtLL5P
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIDAjCCAeoCCQCptj0+TjjIJjANBgkqhkiG9w0BAQsFADBDMREwDwYDVQQKDAhE
TlNDcnlwdDEaMBgGA1UECwwRTG9jYWwgdGVzdCBzZXJ2ZXIxEjAQBgNVBAMMCWxv
Y2FsaG9zdDAeFw0xOTExMTgxNDA2MzBaFw0zMzA3MjcxNDA2MzBaMEMxETAPBgNV
BAoMCEROU0NyeXB0MRowGAYDVQQLDBFMb2NhbCB0ZXN0IHNlcnZlcjESMBAGA1UE
AwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2+4O
hEIW328ve5OKDG2U83Ytk1hQbO6iv0MIDrvi+hITRzztWRFNvb9ADxQRSTuc8sgJ
PW2fZPbTwWwrlVSPYxFHyx91AoxEDxulyNsPvrlYdMgC73P7rqwr0R1ZZXv1e3AK
IpJ3XNkoxEUFAC2wAiKsA+YNBe+wWMHLK3geBh9ud/0ekNvwiWeIzRz45KHDiYla
93owNJJKDTF6RlF1nK1VZNtDNgNQnjuxwLpS0XJJRHaBxN7+OqY6dImStBfS8mV9
VNapfuEC94kLbaGeLTIPN5RR0reBvT66SIc16/VuDVvNtn2kr6yMMDfyWyLVJkWu
5/9/jJLiUCBhjQ2slwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQA6Vz5HnGuy8jZz
5i8ipbcDMCZNdpYYnxgD53hEKOfoSv7LaF0ztD8Kmg3s5LHv9EHlkK3+G6FWRGiP
9f6IbtRITaiVQP3M13T78hpN5Qq5jgsqjR7ZcN7Etr6ZFd7G/0+mzqbyBuW/3szt
RdX/YLy1csvjbZoNNuXGWRohXjg0Mjko2tRLmARvxA/gZV5zWycv3BD2BPzyCdS9
MDMYSF0RPiL8+alfwLNqLcqMA5liHlmZa85uapQyoUI3ksKJkEgU53aD8cYhH9Yn
6mVpsrvrcRLBiHlbi24QBolhFkCSRK8bXes8XDIPuD8iYRwlrVBwOakMFQWMqNfI
IMOKJomU
-----END CERTIFICATE-----

BIN
logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

281
src/config.rs Normal file
View file

@ -0,0 +1,281 @@
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
#[cfg(feature = "tls")]
use std::path::PathBuf;
use std::time::Duration;
use clap::{Arg, ArgAction::SetTrue};
use libdoh::*;
use crate::constants::*;
pub fn parse_opts(globals: &mut Globals) {
use crate::utils::{verify_remote_server, verify_sock_addr};
let max_clients = MAX_CLIENTS.to_string();
let timeout_sec = TIMEOUT_SEC.to_string();
let max_concurrent_streams = MAX_CONCURRENT_STREAMS.to_string();
let min_ttl = MIN_TTL.to_string();
let max_ttl = MAX_TTL.to_string();
let err_ttl = ERR_TTL.to_string();
let _ = include_str!("../Cargo.toml");
let options = command!()
.arg(
Arg::new("hostname")
.short('H')
.long("hostname")
.num_args(1)
.help("Host name (not IP address) DoH clients will use to connect"),
)
.arg(
Arg::new("public_address")
.short('g')
.long("public-address")
.num_args(1)
.help("External IP address DoH clients will connect to"),
)
.arg(
Arg::new("public_port")
.short('j')
.long("public-port")
.num_args(1)
.help("External port DoH clients will connect to, if not 443"),
)
.arg(
Arg::new("listen_address")
.short('l')
.long("listen-address")
.num_args(1)
.default_value(LISTEN_ADDRESS)
.value_parser(verify_sock_addr)
.help("Address to listen to"),
)
.arg(
Arg::new("server_address")
.short('u')
.long("server-address")
.num_args(1)
.default_value(SERVER_ADDRESS)
.value_parser(verify_remote_server)
.help("Address to connect to"),
)
.arg(
Arg::new("local_bind_address")
.short('b')
.long("local-bind-address")
.num_args(1)
.value_parser(verify_sock_addr)
.help("Address to connect from"),
)
.arg(
Arg::new("path")
.short('p')
.long("path")
.num_args(1)
.default_value(PATH)
.help("URI path"),
)
.arg(
Arg::new("max_clients")
.short('c')
.long("max-clients")
.num_args(1)
.default_value(max_clients)
.help("Maximum number of simultaneous clients"),
)
.arg(
Arg::new("max_concurrent")
.short('C')
.long("max-concurrent")
.num_args(1)
.default_value(max_concurrent_streams)
.help("Maximum number of concurrent requests per client"),
)
.arg(
Arg::new("timeout")
.short('t')
.long("timeout")
.num_args(1)
.default_value(timeout_sec)
.help("Timeout, in seconds"),
)
.arg(
Arg::new("min_ttl")
.short('T')
.long("min-ttl")
.num_args(1)
.default_value(min_ttl)
.help("Minimum TTL, in seconds"),
)
.arg(
Arg::new("max_ttl")
.short('X')
.long("max-ttl")
.num_args(1)
.default_value(max_ttl)
.help("Maximum TTL, in seconds"),
)
.arg(
Arg::new("err_ttl")
.short('E')
.long("err-ttl")
.num_args(1)
.default_value(err_ttl)
.help("TTL for errors, in seconds"),
)
.arg(
Arg::new("disable_keepalive")
.short('K')
.action(SetTrue)
.long("disable-keepalive")
.help("Disable keepalive"),
)
.arg(
Arg::new("disable_post")
.short('P')
.action(SetTrue)
.long("disable-post")
.help("Disable POST queries"),
)
.arg(
Arg::new("allow_odoh_post")
.short('O')
.action(SetTrue)
.long("allow-odoh-post")
.help("Allow POST queries over ODoH even if they have been disabed for DoH"),
);
#[cfg(feature = "tls")]
let options = options
.arg(
Arg::new("tls_cert_path")
.short('i')
.long("tls-cert-path")
.num_args(1)
.help(
"Path to the PEM/PKCS#8-encoded certificates (only required for built-in TLS)",
),
)
.arg(
Arg::new("tls_cert_key_path")
.short('I')
.long("tls-cert-key-path")
.num_args(1)
.help("Path to the PEM-encoded secret keys (only required for built-in TLS)"),
);
let matches = options.get_matches();
globals.listen_address = matches
.get_one::<String>("listen_address")
.unwrap()
.parse()
.unwrap();
globals.server_address = matches
.get_one::<String>("server_address")
.unwrap()
.to_socket_addrs()
.unwrap()
.next()
.unwrap();
globals.local_bind_address = match matches.get_one::<String>("local_bind_address") {
Some(address) => address.parse().unwrap(),
None => match globals.server_address {
SocketAddr::V4(_) => SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0)),
SocketAddr::V6(s) => SocketAddr::V6(SocketAddrV6::new(
Ipv6Addr::UNSPECIFIED,
0,
s.flowinfo(),
s.scope_id(),
)),
},
};
globals.path = matches.get_one::<String>("path").unwrap().to_string();
if !globals.path.starts_with('/') {
globals.path = format!("/{}", globals.path);
}
globals.max_clients = matches
.get_one::<String>("max_clients")
.unwrap()
.parse()
.unwrap();
globals.timeout = Duration::from_secs(
matches
.get_one::<String>("timeout")
.unwrap()
.parse()
.unwrap(),
);
globals.max_concurrent_streams = matches
.get_one::<String>("max_concurrent")
.unwrap()
.parse()
.unwrap();
globals.min_ttl = matches
.get_one::<String>("min_ttl")
.unwrap()
.parse()
.unwrap();
globals.max_ttl = matches
.get_one::<String>("max_ttl")
.unwrap()
.parse()
.unwrap();
globals.err_ttl = matches
.get_one::<String>("err_ttl")
.unwrap()
.parse()
.unwrap();
globals.keepalive = !matches.get_flag("disable_keepalive");
globals.disable_post = matches.get_flag("disable_post");
globals.allow_odoh_post = matches.get_flag("allow_odoh_post");
#[cfg(feature = "tls")]
{
globals.tls_cert_path = matches
.get_one::<String>("tls_cert_path")
.map(PathBuf::from);
globals.tls_cert_key_path = matches
.get_one::<String>("tls_cert_key_path")
.map(PathBuf::from)
.or_else(|| globals.tls_cert_path.clone());
}
match matches.get_one::<String>("hostname") {
Some(hostname) => {
let mut builder =
dnsstamps::DoHBuilder::new(hostname.to_string(), globals.path.to_string());
if let Some(public_address) = matches.get_one::<String>("public_address") {
builder = builder.with_address(public_address.to_string());
}
if let Some(public_port) = matches.get_one::<String>("public_port") {
let public_port = public_port.parse().expect("Invalid public port");
builder = builder.with_port(public_port);
}
println!(
"Test DNS stamp to reach [{}] over DoH: [{}]\n",
hostname,
builder.serialize().unwrap()
);
let mut builder =
dnsstamps::ODoHTargetBuilder::new(hostname.to_string(), globals.path.to_string());
if let Some(public_port) = matches.get_one::<String>("public_port") {
let public_port = public_port.parse().expect("Invalid public port");
builder = builder.with_port(public_port);
}
println!(
"Test DNS stamp to reach [{}] over Oblivious DoH: [{}]\n",
hostname,
builder.serialize().unwrap()
);
println!("Check out https://dnscrypt.info/stamps/ to compute the actual stamps.\n")
}
_ => {
println!(
"Please provide a fully qualified hostname (-H <hostname> command-line option) to get \
test DNS stamps for your server.\n"
);
}
}
}

10
src/constants.rs Normal file
View file

@ -0,0 +1,10 @@
pub const LISTEN_ADDRESS: &str = "127.0.0.1:3000";
pub const MAX_CLIENTS: usize = 512;
pub const MAX_CONCURRENT_STREAMS: u32 = 16;
pub const PATH: &str = "/dns-query";
pub const ODOH_CONFIGS_PATH: &str = "/.well-known/odohconfigs";
pub const SERVER_ADDRESS: &str = "9.9.9.9:53";
pub const TIMEOUT_SEC: u64 = 10;
pub const MAX_TTL: u32 = 86400 * 7;
pub const MIN_TTL: u32 = 10;
pub const ERR_TTL: u32 = 2;

View file

@ -1,127 +0,0 @@
const DNS_CLASS_IN: u16 = 1;
const DNS_HEADER_SIZE: usize = 12;
const DNS_MAX_HOSTNAME_LEN: usize = 256;
const DNS_MAX_PACKET_SIZE: usize = 65_535;
const DNS_OFFSET_QUESTION: usize = DNS_HEADER_SIZE;
const DNS_TYPE_OPT: u16 = 41;
#[inline]
fn qdcount(packet: &[u8]) -> u16 {
(u16::from(packet[4]) << 8) | u16::from(packet[5])
}
#[inline]
fn ancount(packet: &[u8]) -> u16 {
(u16::from(packet[6]) << 8) | u16::from(packet[7])
}
#[inline]
fn nscount(packet: &[u8]) -> u16 {
(u16::from(packet[8]) << 8) | u16::from(packet[9])
}
#[inline]
fn arcount(packet: &[u8]) -> u16 {
(u16::from(packet[10]) << 8) | u16::from(packet[11])
}
fn skip_name(packet: &[u8], offset: usize) -> Result<(usize, u16), &'static str> {
let packet_len = packet.len();
if offset >= packet_len - 1 {
return Err("Short packet");
}
let mut name_len: usize = 0;
let mut offset = offset;
let mut labels_count = 0u16;
loop {
let label_len = match packet[offset] {
len if len & 0xc0 == 0xc0 => {
if 2 > packet_len - offset {
return Err("Incomplete offset");
}
offset += 2;
break;
}
len if len > 0x3f => return Err("Label too long"),
len => len,
} as usize;
if label_len >= packet_len - offset - 1 {
return Err("Malformed packet with an out-of-bounds name");
}
name_len += label_len + 1;
if name_len > DNS_MAX_HOSTNAME_LEN {
return Err("Name too long");
}
offset += label_len + 1;
if label_len == 0 {
break;
}
labels_count += 1;
}
Ok((offset, labels_count))
}
pub fn min_ttl(
packet: &[u8],
min_ttl: u32,
max_ttl: u32,
failure_ttl: u32,
) -> Result<u32, &'static str> {
if qdcount(packet) != 1 {
return Err("Unsupported number of questions");
}
let packet_len = packet.len();
if packet_len <= DNS_OFFSET_QUESTION {
return Err("Short packet");
}
if packet_len >= DNS_MAX_PACKET_SIZE {
return Err("Large packet");
}
let mut offset = match skip_name(packet, DNS_OFFSET_QUESTION) {
Ok(offset) => offset.0,
Err(e) => return Err(e),
};
assert!(offset > DNS_OFFSET_QUESTION);
if 4 > packet_len - offset {
return Err("Short packet");
}
offset += 4;
let ancount = ancount(packet);
let nscount = nscount(packet);
let arcount = arcount(packet);
let rrcount = ancount + nscount + arcount;
let mut found_min_ttl = if rrcount > 0 { max_ttl } else { failure_ttl };
for _ in 0..rrcount {
offset = match skip_name(packet, offset) {
Ok(offset) => offset.0,
Err(e) => return Err(e),
};
if 10 > packet_len - offset {
return Err("Short packet");
}
let qtype = u16::from(packet[offset]) << 8 | u16::from(packet[offset + 1]);
let qclass = u16::from(packet[offset + 2]) << 8 | u16::from(packet[offset + 3]);
let ttl = u32::from(packet[offset + 4]) << 24
| u32::from(packet[offset + 5]) << 16
| u32::from(packet[offset + 6]) << 8
| u32::from(packet[offset + 7]);
let rdlen = (u16::from(packet[offset + 8]) << 8 | u16::from(packet[offset + 9])) as usize;
offset += 10;
if !(qtype == DNS_TYPE_OPT && qclass == DNS_CLASS_IN) {
if ttl < found_min_ttl {
found_min_ttl = ttl;
}
}
if rdlen > packet_len - offset {
return Err("Record length would exceed packet length");
}
offset += rdlen;
}
if found_min_ttl < min_ttl {
found_min_ttl = min_ttl;
}
if offset != packet_len {
return Err("Garbage after packet");
}
Ok(found_min_ttl)
}

49
src/libdoh/Cargo.toml Normal file
View file

@ -0,0 +1,49 @@
[package]
name = "libdoh"
version = "0.9.11"
authors = ["Frank Denis <github@pureftpd.org>"]
description = "DoH and Oblivious DoH library for the rust-doh app"
keywords = ["dns", "https", "doh", "odoh", "proxy"]
license = "MIT"
homepage = "https://github.com/jedisct1/rust-doh"
repository = "https://github.com/jedisct1/rust-doh"
categories = ["asynchronous", "network-programming", "command-line-utilities"]
edition = "2018"
[features]
default = ["tls"]
tls = ["tokio-rustls"]
[dependencies]
anyhow = "1.0.97"
arc-swap = "1.7.1"
base64 = "0.22.1"
byteorder = "1.5.0"
bytes = "1.10.1"
futures = "0.3.31"
hyper = { version = "^0.14.32", default-features = false, features = [
"server",
"http1",
"http2",
"stream",
"runtime",
] }
odoh-rs = "1.0.3"
rand = "^0.8.5"
tokio = { version = "1.44.1", features = [
"net",
"rt-multi-thread",
"time",
"sync",
] }
tokio-rustls = { version = "^0.24.1", features = [
"early-data",
], optional = true }
rustls-pemfile = "^1.0.4"
[profile.release]
codegen-units = 1
incremental = false
lto = "fat"
opt-level = 3
panic = "abort"

21
src/libdoh/LICENSE Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2018-2025 Frank Denis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -0,0 +1,9 @@
pub const DNS_QUERY_PARAM: &str = "dns";
pub const MAX_DNS_QUESTION_LEN: usize = 512;
pub const MAX_DNS_RESPONSE_LEN: usize = 4096;
pub const MIN_DNS_PACKET_LEN: usize = 17;
pub const STALE_IF_ERROR_SECS: u32 = 86400;
pub const STALE_WHILE_REVALIDATE_SECS: u32 = 60;
pub const CERTS_WATCH_DELAY_SECS: u32 = 10;
pub const ODOH_KEY_ROTATION_SECS: u32 = 86400;
pub const UDP_TCP_RATIO: usize = 8;

291
src/libdoh/src/dns.rs Normal file
View file

@ -0,0 +1,291 @@
use anyhow::{ensure, Error};
use byteorder::{BigEndian, ByteOrder};
const DNS_HEADER_SIZE: usize = 12;
pub const DNS_OFFSET_FLAGS: usize = 2;
const DNS_MAX_HOSTNAME_SIZE: usize = 256;
const DNS_MAX_PACKET_SIZE: usize = 4096;
const DNS_OFFSET_QUESTION: usize = DNS_HEADER_SIZE;
const DNS_FLAGS_TC: u16 = 1u16 << 9;
const DNS_TYPE_OPT: u16 = 41;
const DNS_PTYPE_PADDING: u16 = 12;
const DNS_RCODE_SERVFAIL: u8 = 2;
const DNS_RCODE_REFUSED: u8 = 5;
#[inline]
pub fn rcode(packet: &[u8]) -> u8 {
packet[3] & 0x0f
}
#[inline]
pub fn qdcount(packet: &[u8]) -> u16 {
BigEndian::read_u16(&packet[4..])
}
#[inline]
pub fn ancount(packet: &[u8]) -> u16 {
BigEndian::read_u16(&packet[6..])
}
#[inline]
pub fn arcount(packet: &[u8]) -> u16 {
BigEndian::read_u16(&packet[10..])
}
fn arcount_inc(packet: &mut [u8]) -> Result<(), Error> {
let mut arcount = arcount(packet);
ensure!(arcount < 0xffff, "Too many additional records");
arcount += 1;
BigEndian::write_u16(&mut packet[10..], arcount);
Ok(())
}
#[inline]
fn nscount(packet: &[u8]) -> u16 {
BigEndian::read_u16(&packet[8..])
}
#[inline]
pub fn is_recoverable_error(packet: &[u8]) -> bool {
let rcode = rcode(packet);
rcode == DNS_RCODE_SERVFAIL || rcode == DNS_RCODE_REFUSED
}
#[inline]
pub fn is_truncated(packet: &[u8]) -> bool {
BigEndian::read_u16(&packet[DNS_OFFSET_FLAGS..]) & DNS_FLAGS_TC == DNS_FLAGS_TC
}
fn skip_name(packet: &[u8], offset: usize) -> Result<usize, Error> {
let packet_len = packet.len();
ensure!(offset < packet_len - 1, "Short packet");
let mut qname_len: usize = 0;
let mut offset = offset;
loop {
let label_len = match packet[offset] as usize {
label_len if label_len & 0xc0 == 0xc0 => {
ensure!(packet_len - offset >= 2, "Incomplete offset");
offset += 2;
break;
}
label_len => label_len,
} as usize;
ensure!(label_len < 0x40, "Long label");
ensure!(
packet_len - offset - 1 > label_len,
"Malformed packet with an out-of-bounds name"
);
qname_len += label_len + 1;
ensure!(qname_len <= DNS_MAX_HOSTNAME_SIZE, "Name too long");
offset += label_len + 1;
if label_len == 0 {
break;
}
}
Ok(offset)
}
fn traverse_rrs<F: FnMut(usize) -> Result<(), Error>>(
packet: &[u8],
mut offset: usize,
rrcount: usize,
mut cb: F,
) -> Result<usize, Error> {
let packet_len = packet.len();
for _ in 0..rrcount {
offset = skip_name(packet, offset)?;
ensure!(packet_len - offset >= 10, "Short packet");
cb(offset)?;
let rdlen = BigEndian::read_u16(&packet[offset + 8..]) as usize;
offset += 10;
ensure!(
packet_len - offset >= rdlen,
"Record length would exceed packet length"
);
offset += rdlen;
}
Ok(offset)
}
fn traverse_rrs_mut<F: FnMut(&mut [u8], usize) -> Result<(), Error>>(
packet: &mut [u8],
mut offset: usize,
rrcount: usize,
mut cb: F,
) -> Result<usize, Error> {
let packet_len = packet.len();
for _ in 0..rrcount {
offset = skip_name(packet, offset)?;
ensure!(packet_len - offset >= 10, "Short packet");
cb(packet, offset)?;
let rdlen = BigEndian::read_u16(&packet[offset + 8..]) as usize;
offset += 10;
ensure!(
packet_len - offset >= rdlen,
"Record length would exceed packet length"
);
offset += rdlen;
}
Ok(offset)
}
pub fn min_ttl(packet: &[u8], min_ttl: u32, max_ttl: u32, failure_ttl: u32) -> Result<u32, Error> {
let packet_len = packet.len();
ensure!(packet_len > DNS_OFFSET_QUESTION, "Short packet");
ensure!(packet_len <= DNS_MAX_PACKET_SIZE, "Large packet");
ensure!(qdcount(packet) == 1, "No question");
let mut offset = skip_name(packet, DNS_OFFSET_QUESTION)?;
assert!(offset > DNS_OFFSET_QUESTION);
ensure!(packet_len - offset > 4, "Short packet");
offset += 4;
let (ancount, nscount, arcount) = (ancount(packet), nscount(packet), arcount(packet));
let rrcount = ancount as usize + nscount as usize + arcount as usize;
let mut found_min_ttl = if rrcount > 0 { max_ttl } else { failure_ttl };
offset = traverse_rrs(packet, offset, rrcount, |offset| {
let qtype = BigEndian::read_u16(&packet[offset..]);
let ttl = BigEndian::read_u32(&packet[offset + 4..]);
if qtype != DNS_TYPE_OPT && ttl < found_min_ttl {
found_min_ttl = ttl;
}
Ok(())
})?;
if found_min_ttl < min_ttl {
found_min_ttl = min_ttl;
}
ensure!(packet_len == offset, "Garbage after packet");
Ok(found_min_ttl)
}
fn add_edns_section(packet: &mut Vec<u8>, max_payload_size: u16) -> Result<(), Error> {
let opt_rr: [u8; 11] = [
0,
(DNS_TYPE_OPT >> 8) as u8,
DNS_TYPE_OPT as u8,
(max_payload_size >> 8) as u8,
max_payload_size as u8,
0,
0,
0,
0,
0,
0,
];
ensure!(
DNS_MAX_PACKET_SIZE - packet.len() >= opt_rr.len(),
"Packet would be too large to add a new record"
);
arcount_inc(packet)?;
packet.extend(opt_rr);
Ok(())
}
pub fn set_edns_max_payload_size(packet: &mut Vec<u8>, max_payload_size: u16) -> Result<(), Error> {
let packet_len = packet.len();
ensure!(packet_len > DNS_OFFSET_QUESTION, "Short packet");
ensure!(packet_len <= DNS_MAX_PACKET_SIZE, "Large packet");
ensure!(qdcount(packet) == 1, "No question");
let mut offset = skip_name(packet, DNS_OFFSET_QUESTION)?;
assert!(offset > DNS_OFFSET_QUESTION);
ensure!(packet_len - offset >= 4, "Short packet");
offset += 4;
let (ancount, nscount, arcount) = (ancount(packet), nscount(packet), arcount(packet));
offset = traverse_rrs(
packet,
offset,
ancount as usize + nscount as usize,
|_offset| Ok(()),
)?;
let mut edns_payload_set = false;
traverse_rrs_mut(packet, offset, arcount as _, |packet, offset| {
let qtype = BigEndian::read_u16(&packet[offset..]);
if qtype == DNS_TYPE_OPT {
ensure!(!edns_payload_set, "Duplicate OPT RR found");
BigEndian::write_u16(&mut packet[offset + 2..], max_payload_size);
edns_payload_set = true;
}
Ok(())
})?;
if edns_payload_set {
return Ok(());
}
add_edns_section(packet, max_payload_size)?;
Ok(())
}
fn padded_len(unpadded_len: usize) -> usize {
const BOUNDARIES: [usize; 16] = [
64, 128, 192, 256, 320, 384, 512, 704, 768, 896, 960, 1024, 1088, 1152, 2688, 4080,
];
BOUNDARIES
.iter()
.find(|&&boundary| boundary >= unpadded_len)
.copied()
.unwrap_or(DNS_MAX_PACKET_SIZE)
}
pub fn add_edns_padding(packet: &mut Vec<u8>) -> Result<(), Error> {
let mut packet_len = packet.len();
ensure!(packet_len > DNS_OFFSET_QUESTION, "Short packet");
ensure!(packet_len <= DNS_MAX_PACKET_SIZE, "Large packet");
ensure!(qdcount(packet) == 1, "No question");
let mut offset = skip_name(packet, DNS_OFFSET_QUESTION)?;
assert!(offset > DNS_OFFSET_QUESTION);
ensure!(packet_len - offset >= 4, "Short packet");
offset += 4;
let (ancount, nscount, arcount) = (ancount(packet), nscount(packet), arcount(packet));
offset = traverse_rrs(
packet,
offset,
ancount as usize + nscount as usize,
|_offset| Ok(()),
)?;
let mut edns_offset = None;
traverse_rrs_mut(packet, offset, arcount as _, |packet, offset| {
let qtype = BigEndian::read_u16(&packet[offset..]);
if qtype == DNS_TYPE_OPT {
ensure!(edns_offset.is_none(), "Duplicate OPT RR found");
edns_offset = Some(offset)
}
Ok(())
})?;
let edns_offset = match edns_offset {
Some(edns_offset) => edns_offset,
None => {
let edns_offset = packet.len() + 1;
add_edns_section(packet, DNS_MAX_PACKET_SIZE as _)?;
packet_len = packet.len();
edns_offset
}
};
let padding_len = padded_len(packet_len) - packet_len;
let mut edns_padding_prr = vec![b'X'; 4 + padding_len];
BigEndian::write_u16(&mut edns_padding_prr[0..], DNS_PTYPE_PADDING);
BigEndian::write_u16(&mut edns_padding_prr[2..], padding_len as u16);
let edns_padding_prr_len = edns_padding_prr.len();
let edns_rdlen_offset: usize = edns_offset + 8;
ensure!(packet_len - edns_rdlen_offset >= 2, "Short packet");
let edns_rdlen = BigEndian::read_u16(&packet[edns_rdlen_offset..]);
ensure!(
edns_offset + edns_rdlen as usize <= packet_len,
"Out of range EDNS size"
);
ensure!(
0xffff - edns_rdlen as usize >= edns_padding_prr_len,
"EDNS section too large for padding"
);
ensure!(
DNS_MAX_PACKET_SIZE - packet_len >= edns_padding_prr_len,
"Large packet"
);
BigEndian::write_u16(
&mut packet[edns_rdlen_offset..],
edns_rdlen + edns_padding_prr_len as u16,
);
packet.extend(&edns_padding_prr);
Ok(())
}

53
src/libdoh/src/errors.rs Normal file
View file

@ -0,0 +1,53 @@
use std::io;
use hyper::StatusCode;
#[derive(Debug)]
pub enum DoHError {
Incomplete,
InvalidData,
TooLarge,
UpstreamIssue,
UpstreamTimeout,
StaleKey,
Hyper(hyper::Error),
Io(io::Error),
ODoHConfigError(anyhow::Error),
TooManyTcpSessions,
}
impl std::error::Error for DoHError {}
impl std::fmt::Display for DoHError {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
match self {
DoHError::Incomplete => write!(fmt, "Incomplete"),
DoHError::InvalidData => write!(fmt, "Invalid data"),
DoHError::TooLarge => write!(fmt, "Too large"),
DoHError::UpstreamIssue => write!(fmt, "Upstream error"),
DoHError::UpstreamTimeout => write!(fmt, "Upstream timeout"),
DoHError::StaleKey => write!(fmt, "Stale key material"),
DoHError::Hyper(e) => write!(fmt, "HTTP error: {e}"),
DoHError::Io(e) => write!(fmt, "IO error: {e}"),
DoHError::ODoHConfigError(e) => write!(fmt, "ODoH config error: {e}"),
DoHError::TooManyTcpSessions => write!(fmt, "Too many TCP sessions"),
}
}
}
impl From<DoHError> for StatusCode {
fn from(e: DoHError) -> StatusCode {
match e {
DoHError::Incomplete => StatusCode::UNPROCESSABLE_ENTITY,
DoHError::InvalidData => StatusCode::BAD_REQUEST,
DoHError::TooLarge => StatusCode::PAYLOAD_TOO_LARGE,
DoHError::UpstreamIssue => StatusCode::BAD_GATEWAY,
DoHError::UpstreamTimeout => StatusCode::BAD_GATEWAY,
DoHError::StaleKey => StatusCode::UNAUTHORIZED,
DoHError::Hyper(_) => StatusCode::SERVICE_UNAVAILABLE,
DoHError::Io(_) => StatusCode::INTERNAL_SERVER_ERROR,
DoHError::ODoHConfigError(_) => StatusCode::INTERNAL_SERVER_ERROR,
DoHError::TooManyTcpSessions => StatusCode::SERVICE_UNAVAILABLE,
}
}
}

64
src/libdoh/src/globals.rs Normal file
View file

@ -0,0 +1,64 @@
use std::net::SocketAddr;
#[cfg(feature = "tls")]
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;
use tokio::runtime;
use crate::odoh::ODoHRotator;
#[derive(Debug)]
pub struct Globals {
#[cfg(feature = "tls")]
pub tls_cert_path: Option<PathBuf>,
#[cfg(feature = "tls")]
pub tls_cert_key_path: Option<PathBuf>,
pub listen_address: SocketAddr,
pub local_bind_address: SocketAddr,
pub server_address: SocketAddr,
pub path: String,
pub max_clients: usize,
pub timeout: Duration,
pub clients_count: ClientsCount,
pub max_concurrent_streams: u32,
pub min_ttl: u32,
pub max_ttl: u32,
pub err_ttl: u32,
pub keepalive: bool,
pub disable_post: bool,
pub allow_odoh_post: bool,
pub odoh_configs_path: String,
pub odoh_rotator: Arc<ODoHRotator>,
pub runtime_handle: runtime::Handle,
}
#[derive(Debug, Clone, Default)]
pub struct ClientsCount(Arc<AtomicUsize>);
impl ClientsCount {
pub fn current(&self) -> usize {
self.0.load(Ordering::Relaxed)
}
pub fn increment(&self) -> usize {
self.0.fetch_add(1, Ordering::Relaxed)
}
pub fn decrement(&self) -> usize {
let mut count;
while {
count = self.0.load(Ordering::Relaxed);
count > 0
&& self
.0
.compare_exchange(count, count - 1, Ordering::Relaxed, Ordering::Relaxed)
!= Ok(count)
} {}
count
}
}

526
src/libdoh/src/lib.rs Normal file
View file

@ -0,0 +1,526 @@
mod constants;
pub mod dns;
mod errors;
mod globals;
pub mod odoh;
#[cfg(feature = "tls")]
mod tls;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::time::Duration;
use base64::engine::Engine;
use byteorder::{BigEndian, ByteOrder};
use futures::prelude::*;
use futures::task::{Context, Poll};
use hyper::http;
use hyper::server::conn::Http;
use hyper::{Body, HeaderMap, Method, Request, Response, StatusCode};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use tokio::net::{TcpListener, TcpSocket, UdpSocket};
use tokio::runtime;
use crate::constants::*;
pub use crate::errors::*;
pub use crate::globals::*;
pub mod reexports {
pub use tokio;
}
const BASE64_URL_SAFE_NO_PAD: base64::engine::GeneralPurpose =
base64::engine::general_purpose::GeneralPurpose::new(
&base64::alphabet::URL_SAFE,
base64::engine::general_purpose::GeneralPurposeConfig::new()
.with_encode_padding(false)
.with_decode_padding_mode(base64::engine::DecodePaddingMode::Indifferent),
);
#[derive(Clone, Debug)]
struct DnsResponse {
packet: Vec<u8>,
ttl: u32,
}
#[derive(Clone, Debug)]
enum DoHType {
Standard,
Oblivious,
}
impl DoHType {
fn as_str(&self) -> String {
match self {
DoHType::Standard => String::from("application/dns-message"),
DoHType::Oblivious => String::from("application/oblivious-dns-message"),
}
}
}
#[derive(Clone, Debug)]
pub struct DoH {
pub globals: Arc<Globals>,
}
#[allow(clippy::unnecessary_wraps)]
fn http_error(status_code: StatusCode) -> Result<Response<Body>, http::Error> {
let response = Response::builder()
.status(status_code)
.body(Body::empty())
.unwrap();
Ok(response)
}
#[derive(Clone, Debug)]
pub struct LocalExecutor {
runtime_handle: runtime::Handle,
}
impl LocalExecutor {
fn new(runtime_handle: runtime::Handle) -> Self {
LocalExecutor { runtime_handle }
}
}
impl<F> hyper::rt::Executor<F> for LocalExecutor
where
F: std::future::Future + Send + 'static,
F::Output: Send,
{
fn execute(&self, fut: F) {
self.runtime_handle.spawn(fut);
}
}
#[allow(clippy::type_complexity)]
impl hyper::service::Service<http::Request<Body>> for DoH {
type Error = http::Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
type Response = Response<Body>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Request<Body>) -> Self::Future {
let globals = &self.globals;
let self_inner = self.clone();
if req.uri().path() == globals.path {
match *req.method() {
Method::POST => Box::pin(async move { self_inner.serve_post(req).await }),
Method::GET => Box::pin(async move { self_inner.serve_get(req).await }),
_ => Box::pin(async { http_error(StatusCode::METHOD_NOT_ALLOWED) }),
}
} else if req.uri().path() == globals.odoh_configs_path {
match *req.method() {
Method::GET => Box::pin(async move { self_inner.serve_odoh_configs().await }),
_ => Box::pin(async { http_error(StatusCode::METHOD_NOT_ALLOWED) }),
}
} else {
Box::pin(async { http_error(StatusCode::NOT_FOUND) })
}
}
}
impl DoH {
async fn serve_get(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
match Self::parse_content_type(&req) {
Ok(DoHType::Standard) => self.serve_doh_get(req).await,
Ok(DoHType::Oblivious) => self.serve_odoh_get(req).await,
Err(response) => Ok(response),
}
}
async fn serve_post(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
match Self::parse_content_type(&req) {
Ok(DoHType::Standard) => self.serve_doh_post(req).await,
Ok(DoHType::Oblivious) => self.serve_odoh_post(req).await,
Err(response) => Ok(response),
}
}
async fn serve_doh_query(&self, query: Vec<u8>) -> Result<Response<Body>, http::Error> {
let resp = match self.proxy(query).await {
Ok(resp) => {
self.build_response(resp.packet, resp.ttl, DoHType::Standard.as_str(), true)
}
Err(e) => return http_error(StatusCode::from(e)),
};
match resp {
Ok(resp) => Ok(resp),
Err(e) => http_error(StatusCode::from(e)),
}
}
fn query_from_query_string(&self, req: Request<Body>) -> Option<Vec<u8>> {
let http_query = req.uri().query().unwrap_or("");
let mut question_str = None;
for parts in http_query.split('&') {
let mut kv = parts.split('=');
if let Some(k) = kv.next() {
if k == DNS_QUERY_PARAM {
question_str = kv.next();
}
}
}
if let Some(question_str) = question_str {
if question_str.len() > MAX_DNS_QUESTION_LEN * 4 / 3 {
return None;
}
}
let query = match question_str
.and_then(|question_str| BASE64_URL_SAFE_NO_PAD.decode(question_str).ok())
{
Some(query) => query,
_ => return None,
};
Some(query)
}
async fn serve_doh_get(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
let query = match self.query_from_query_string(req) {
Some(query) => query,
_ => return http_error(StatusCode::BAD_REQUEST),
};
self.serve_doh_query(query).await
}
async fn serve_doh_post(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
if self.globals.disable_post {
return http_error(StatusCode::METHOD_NOT_ALLOWED);
}
let query = match self.read_body(req.into_body()).await {
Ok(q) => q,
Err(e) => return http_error(StatusCode::from(e)),
};
self.serve_doh_query(query).await
}
async fn serve_odoh(&self, encrypted_query: Vec<u8>) -> Result<Response<Body>, http::Error> {
let odoh_public_key = (*self.globals.odoh_rotator).clone().current_public_key();
let (query, context) = match (*odoh_public_key).clone().decrypt_query(encrypted_query) {
Ok((q, context)) => (q.to_vec(), context),
Err(e) => return http_error(StatusCode::from(e)),
};
let resp = match self.proxy(query).await {
Ok(resp) => resp,
Err(e) => return http_error(StatusCode::from(e)),
};
let encrypted_resp = match context.encrypt_response(resp.packet) {
Ok(resp) => self.build_response(resp, 0u32, DoHType::Oblivious.as_str(), false),
Err(e) => return http_error(StatusCode::from(e)),
};
match encrypted_resp {
Ok(resp) => Ok(resp),
Err(e) => http_error(StatusCode::from(e)),
}
}
async fn serve_odoh_get(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
let encrypted_query = match self.query_from_query_string(req) {
Some(encrypted_query) => encrypted_query,
_ => return http_error(StatusCode::BAD_REQUEST),
};
self.serve_odoh(encrypted_query).await
}
async fn serve_odoh_post(&self, req: Request<Body>) -> Result<Response<Body>, http::Error> {
if self.globals.disable_post && !self.globals.allow_odoh_post {
return http_error(StatusCode::METHOD_NOT_ALLOWED);
}
let encrypted_query = match self.read_body(req.into_body()).await {
Ok(q) => q,
Err(e) => return http_error(StatusCode::from(e)),
};
self.serve_odoh(encrypted_query).await
}
async fn serve_odoh_configs(&self) -> Result<Response<Body>, http::Error> {
let odoh_public_key = (*self.globals.odoh_rotator).clone().current_public_key();
let configs = (*odoh_public_key).clone().into_config();
match self.build_response(
configs,
ODOH_KEY_ROTATION_SECS,
"application/octet-stream".to_string(),
true,
) {
Ok(resp) => Ok(resp),
Err(e) => http_error(StatusCode::from(e)),
}
}
fn acceptable_content_type(
headers: &HeaderMap,
content_types: &[&'static str],
) -> Option<&'static str> {
let accept = headers.get(hyper::header::ACCEPT);
let accept = accept?;
for part in accept.to_str().unwrap_or("").split(',').map(|s| s.trim()) {
if let Some(found) = part
.split(';')
.next()
.map(|s| s.trim().to_ascii_lowercase())
{
if let Some(&content_type) = content_types
.iter()
.find(|&&content_type| content_type == found)
{
return Some(content_type);
}
}
}
None
}
fn parse_content_type(req: &Request<Body>) -> Result<DoHType, Response<Body>> {
const CT_DOH: &str = "application/dns-message";
const CT_ODOH: &str = "application/oblivious-dns-message";
let headers = req.headers();
let content_type = match headers.get(hyper::header::CONTENT_TYPE) {
None => {
let acceptable_content_type =
Self::acceptable_content_type(headers, &[CT_DOH, CT_ODOH]);
match acceptable_content_type {
None => {
let response = Response::builder()
.status(StatusCode::NOT_ACCEPTABLE)
.body(Body::empty())
.unwrap();
return Err(response);
}
Some(content_type) => content_type,
}
}
Some(content_type) => match content_type.to_str() {
Err(_) => {
let response = Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::empty())
.unwrap();
return Err(response);
}
Ok(content_type) => content_type,
},
};
match content_type.to_ascii_lowercase().as_str() {
CT_DOH => Ok(DoHType::Standard),
CT_ODOH => Ok(DoHType::Oblivious),
_ => {
let response = Response::builder()
.status(StatusCode::UNSUPPORTED_MEDIA_TYPE)
.body(Body::empty())
.unwrap();
Err(response)
}
}
}
async fn read_body(&self, mut body: Body) -> Result<Vec<u8>, DoHError> {
let mut sum_size = 0;
let mut query = vec![];
while let Some(chunk) = body.next().await {
let chunk = chunk.map_err(|_| DoHError::TooLarge)?;
sum_size += chunk.len();
if sum_size >= MAX_DNS_QUESTION_LEN {
return Err(DoHError::TooLarge);
}
query.extend(chunk);
}
Ok(query)
}
async fn proxy(&self, query: Vec<u8>) -> Result<DnsResponse, DoHError> {
let proxy_timeout = self.globals.timeout;
let timeout_res = tokio::time::timeout(proxy_timeout, self._proxy(query)).await;
timeout_res.map_err(|_| DoHError::UpstreamTimeout)?
}
async fn _proxy(&self, mut query: Vec<u8>) -> Result<DnsResponse, DoHError> {
if query.len() < MIN_DNS_PACKET_LEN {
return Err(DoHError::Incomplete);
}
let _ = dns::set_edns_max_payload_size(&mut query, MAX_DNS_RESPONSE_LEN as _);
let globals = &self.globals;
let mut packet = vec![0; MAX_DNS_RESPONSE_LEN];
let (min_ttl, max_ttl, err_ttl) = (globals.min_ttl, globals.max_ttl, globals.err_ttl);
// UDP
{
let socket = UdpSocket::bind(&globals.local_bind_address)
.await
.map_err(DoHError::Io)?;
let expected_server_address = globals.server_address;
socket
.send_to(&query, &globals.server_address)
.map_err(DoHError::Io)
.await?;
let (len, response_server_address) =
socket.recv_from(&mut packet).map_err(DoHError::Io).await?;
if len < MIN_DNS_PACKET_LEN || expected_server_address != response_server_address {
return Err(DoHError::UpstreamIssue);
}
packet.truncate(len);
}
// TCP
if dns::is_truncated(&packet) {
let clients_count = self.globals.clients_count.current();
if self.globals.max_clients >= UDP_TCP_RATIO
&& clients_count >= self.globals.max_clients / UDP_TCP_RATIO
{
return Err(DoHError::TooManyTcpSessions);
}
let socket = match globals.server_address {
SocketAddr::V4(_) => TcpSocket::new_v4(),
SocketAddr::V6(_) => TcpSocket::new_v6(),
}
.map_err(DoHError::Io)?;
let mut ext_socket = socket
.connect(globals.server_address)
.await
.map_err(DoHError::Io)?;
ext_socket.set_nodelay(true).map_err(DoHError::Io)?;
let mut binlen = [0u8, 0];
BigEndian::write_u16(&mut binlen, query.len() as u16);
ext_socket.write_all(&binlen).await.map_err(DoHError::Io)?;
ext_socket.write_all(&query).await.map_err(DoHError::Io)?;
ext_socket.flush().await.map_err(DoHError::Io)?;
ext_socket
.read_exact(&mut binlen)
.await
.map_err(DoHError::Io)?;
let packet_len = BigEndian::read_u16(&binlen) as usize;
if !(MIN_DNS_PACKET_LEN..=MAX_DNS_RESPONSE_LEN).contains(&packet_len) {
return Err(DoHError::UpstreamIssue);
}
packet = vec![0u8; packet_len];
ext_socket
.read_exact(&mut packet)
.await
.map_err(DoHError::Io)?;
}
let ttl = if dns::is_recoverable_error(&packet) {
err_ttl
} else {
match dns::min_ttl(&packet, min_ttl, max_ttl, err_ttl) {
Err(_) => return Err(DoHError::UpstreamIssue),
Ok(ttl) => ttl,
}
};
dns::add_edns_padding(&mut packet)
.map_err(|_| DoHError::TooLarge)
.ok();
Ok(DnsResponse { packet, ttl })
}
fn build_response(
&self,
packet: Vec<u8>,
ttl: u32,
content_type: String,
cors: bool,
) -> Result<Response<Body>, DoHError> {
let packet_len = packet.len();
let mut response_builder = Response::builder()
.header(hyper::header::CONTENT_LENGTH, packet_len)
.header(hyper::header::CONTENT_TYPE, content_type.as_str())
.header(
hyper::header::CACHE_CONTROL,
format!(
"max-age={ttl}, stale-if-error={STALE_IF_ERROR_SECS}, \
stale-while-revalidate={STALE_WHILE_REVALIDATE_SECS}"
)
.as_str(),
);
if cors {
response_builder =
response_builder.header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*");
}
let response = response_builder
.body(Body::from(packet))
.map_err(|_| DoHError::InvalidData)?;
Ok(response)
}
async fn client_serve<I>(self, stream: I, server: Http<LocalExecutor>)
where
I: AsyncRead + AsyncWrite + Send + Unpin + 'static,
{
let clients_count = self.globals.clients_count.clone();
if clients_count.increment() > self.globals.max_clients {
clients_count.decrement();
return;
}
self.globals.runtime_handle.clone().spawn(async move {
tokio::time::timeout(
self.globals.timeout + Duration::from_secs(1),
server.serve_connection(stream, self),
)
.await
.ok();
clients_count.decrement();
});
}
async fn start_without_tls(
self,
listener: TcpListener,
server: Http<LocalExecutor>,
) -> Result<(), DoHError> {
let listener_service = async {
while let Ok((stream, _client_addr)) = listener.accept().await {
self.clone().client_serve(stream, server.clone()).await;
}
Ok(()) as Result<(), DoHError>
};
listener_service.await?;
Ok(())
}
pub async fn entrypoint(self) -> Result<(), DoHError> {
let listen_address = self.globals.listen_address;
let listener = TcpListener::bind(&listen_address)
.await
.map_err(DoHError::Io)?;
let path = &self.globals.path;
let tls_enabled: bool;
#[cfg(not(feature = "tls"))]
{
tls_enabled = false;
}
#[cfg(feature = "tls")]
{
tls_enabled =
self.globals.tls_cert_path.is_some() && self.globals.tls_cert_key_path.is_some();
}
if tls_enabled {
println!("Listening on https://{listen_address}{path}");
} else {
println!("Listening on http://{listen_address}{path}");
}
let mut server = Http::new();
server.http1_keep_alive(self.globals.keepalive);
server.http2_max_concurrent_streams(self.globals.max_concurrent_streams);
server.pipeline_flush(true);
let executor = LocalExecutor::new(self.globals.runtime_handle.clone());
let server = server.with_executor(executor);
#[cfg(feature = "tls")]
{
if tls_enabled {
self.start_with_tls(listener, server).await?;
return Ok(());
}
}
self.start_without_tls(listener, server).await?;
Ok(())
}
}

132
src/libdoh/src/odoh.rs Normal file
View file

@ -0,0 +1,132 @@
use std::fmt;
use std::sync::Arc;
use std::time::Duration;
use arc_swap::ArcSwap;
use odoh_rs::{
Deserialize, ObliviousDoHConfig, ObliviousDoHConfigs, ObliviousDoHKeyPair, ObliviousDoHMessage,
ObliviousDoHMessagePlaintext, OdohSecret, ResponseNonce, Serialize,
};
use rand::Rng;
use tokio::runtime;
use crate::constants::ODOH_KEY_ROTATION_SECS;
use crate::errors::DoHError;
#[derive(Clone)]
pub struct ODoHPublicKey {
key_pair: ObliviousDoHKeyPair,
serialized_configs: Vec<u8>,
}
impl fmt::Debug for ODoHPublicKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ODoHPublicKey").finish()
}
}
#[derive(Clone, Debug)]
pub struct ODoHQueryContext {
query: ObliviousDoHMessagePlaintext,
server_secret: OdohSecret,
}
impl ODoHPublicKey {
pub fn new() -> Result<ODoHPublicKey, DoHError> {
let key_pair = ObliviousDoHKeyPair::new(&mut rand::thread_rng());
let config = ObliviousDoHConfig::from(key_pair.public().clone());
let mut serialized_configs = Vec::new();
ObliviousDoHConfigs::from(vec![config])
.serialize(&mut serialized_configs)
.map_err(|e| DoHError::ODoHConfigError(e.into()))?;
Ok(ODoHPublicKey {
key_pair,
serialized_configs,
})
}
pub fn into_config(self) -> Vec<u8> {
self.serialized_configs
}
pub fn decrypt_query(
self,
encrypted_query: Vec<u8>,
) -> Result<(Vec<u8>, ODoHQueryContext), DoHError> {
let odoh_query = ObliviousDoHMessage::deserialize(&mut bytes::Bytes::from(encrypted_query))
.map_err(|_| DoHError::InvalidData)?;
match self.key_pair.public().identifier() {
Ok(key_id) => {
if !key_id.eq(&odoh_query.key_id()) {
return Err(DoHError::StaleKey);
}
}
Err(_) => return Err(DoHError::InvalidData),
};
let (query, server_secret) = match odoh_rs::decrypt_query(&odoh_query, &self.key_pair) {
Ok((pq, ss)) => (pq, ss),
Err(_) => return Err(DoHError::InvalidData),
};
let context = ODoHQueryContext {
query: query.clone(),
server_secret,
};
Ok((query.into_msg().to_vec(), context))
}
}
impl ODoHQueryContext {
pub fn encrypt_response(self, response_body: Vec<u8>) -> Result<Vec<u8>, DoHError> {
let response_nonce = rand::thread_rng().r#gen::<ResponseNonce>();
let response_body_ = ObliviousDoHMessagePlaintext::new(response_body, 0);
let encrypted_response = odoh_rs::encrypt_response(
&self.query,
&response_body_,
self.server_secret,
response_nonce,
)
.map_err(|_| DoHError::InvalidData)?;
let mut encrypted_response_bytes = Vec::new();
encrypted_response
.serialize(&mut encrypted_response_bytes)
.map_err(|_| DoHError::InvalidData)?;
Ok(encrypted_response_bytes)
}
}
#[derive(Clone, Debug)]
pub struct ODoHRotator {
key: Arc<ArcSwap<ODoHPublicKey>>,
}
impl ODoHRotator {
pub fn new(runtime_handle: runtime::Handle) -> Result<ODoHRotator, DoHError> {
let public_key = match ODoHPublicKey::new() {
Ok(key) => Arc::new(ArcSwap::from_pointee(key)),
Err(e) => panic!("ODoH key rotation error: {}", e),
};
let current_key = Arc::clone(&public_key);
runtime_handle.spawn(async move {
loop {
tokio::time::sleep(Duration::from_secs(ODOH_KEY_ROTATION_SECS.into())).await;
match ODoHPublicKey::new() {
Ok(key) => {
current_key.store(Arc::new(key));
}
Err(e) => eprintln!("ODoH key rotation error: {e}"),
};
}
});
Ok(ODoHRotator {
key: Arc::clone(&public_key),
})
}
pub fn current_public_key(&self) -> Arc<ODoHPublicKey> {
let key = Arc::clone(&self.key);
Arc::clone(&key.load())
}
}

165
src/libdoh/src/tls.rs Normal file
View file

@ -0,0 +1,165 @@
use std::fs::File;
use std::io::{self, BufReader, Cursor, Read};
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use futures::{future::FutureExt, join, select};
use hyper::server::conn::Http;
use tokio::{
net::TcpListener,
sync::mpsc::{self, Receiver},
};
use tokio_rustls::{
rustls::{Certificate, PrivateKey, ServerConfig},
TlsAcceptor,
};
use crate::constants::CERTS_WATCH_DELAY_SECS;
use crate::errors::*;
use crate::{DoH, LocalExecutor};
pub fn create_tls_acceptor<P, P2>(certs_path: P, certs_keys_path: P2) -> io::Result<TlsAcceptor>
where
P: AsRef<Path>,
P2: AsRef<Path>,
{
let certs: Vec<_> = {
let certs_path_str = certs_path.as_ref().display().to_string();
let mut reader = BufReader::new(File::open(certs_path).map_err(|e| {
io::Error::new(
e.kind(),
format!("Unable to load the certificates [{certs_path_str}]: {e}"),
)
})?);
rustls_pemfile::certs(&mut reader).map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Unable to parse the certificates",
)
})?
}
.drain(..)
.map(Certificate)
.collect();
let certs_keys: Vec<_> = {
let certs_keys_path_str = certs_keys_path.as_ref().display().to_string();
let encoded_keys = {
let mut encoded_keys = vec![];
File::open(certs_keys_path)
.map_err(|e| {
io::Error::new(
e.kind(),
format!("Unable to load the certificate keys [{certs_keys_path_str}]: {e}"),
)
})?
.read_to_end(&mut encoded_keys)?;
encoded_keys
};
let mut reader = Cursor::new(encoded_keys);
let pkcs8_keys = rustls_pemfile::pkcs8_private_keys(&mut reader).map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Unable to parse the certificates private keys (PKCS8)",
)
})?;
reader.set_position(0);
let mut rsa_keys = rustls_pemfile::rsa_private_keys(&mut reader).map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Unable to parse the certificates private keys (RSA)",
)
})?;
let mut keys = pkcs8_keys;
keys.append(&mut rsa_keys);
if keys.is_empty() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"No private keys found - Make sure that they are in PKCS#8/PEM format",
));
}
keys.drain(..).map(PrivateKey).collect()
};
let mut server_config = certs_keys
.into_iter()
.find_map(|certs_key| {
let server_config_builder = ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth();
match server_config_builder.with_single_cert(certs.clone(), certs_key) {
Ok(found_config) => Some(found_config),
_ => None,
}
})
.ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Unable to find a valid certificate and key",
)
})?;
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
Ok(TlsAcceptor::from(Arc::new(server_config)))
}
impl DoH {
async fn start_https_service(
self,
mut tls_acceptor_receiver: Receiver<TlsAcceptor>,
listener: TcpListener,
server: Http<LocalExecutor>,
) -> Result<(), DoHError> {
let mut tls_acceptor: Option<TlsAcceptor> = None;
let listener_service = async {
loop {
select! {
tcp_cnx = listener.accept().fuse() => {
if tls_acceptor.is_none() || tcp_cnx.is_err() {
continue;
}
let (raw_stream, _client_addr) = tcp_cnx.unwrap();
if let Ok(stream) = tls_acceptor.as_ref().unwrap().accept(raw_stream).await {
self.clone().client_serve(stream, server.clone()).await
}
}
new_tls_acceptor = tls_acceptor_receiver.recv().fuse() => {
if new_tls_acceptor.is_none() {
break;
}
tls_acceptor = new_tls_acceptor;
}
complete => break
}
}
Ok(()) as Result<(), DoHError>
};
listener_service.await?;
Ok(())
}
pub async fn start_with_tls(
self,
listener: TcpListener,
server: Http<LocalExecutor>,
) -> Result<(), DoHError> {
let certs_path = self.globals.tls_cert_path.as_ref().unwrap().clone();
let certs_keys_path = self.globals.tls_cert_key_path.as_ref().unwrap().clone();
let (tls_acceptor_sender, tls_acceptor_receiver) = mpsc::channel(1);
let https_service = self.start_https_service(tls_acceptor_receiver, listener, server);
let cert_service = async {
loop {
match create_tls_acceptor(&certs_path, &certs_keys_path) {
Ok(tls_acceptor) => {
if tls_acceptor_sender.send(tls_acceptor).await.is_err() {
break;
}
}
Err(e) => eprintln!("TLS certificates error: {e}"),
}
tokio::time::sleep(Duration::from_secs(CERTS_WATCH_DELAY_SECS.into())).await;
}
Ok::<_, DoHError>(())
};
join!(https_service, cert_service).0
}
}

View file

@ -1,283 +1,63 @@
#![cfg_attr(feature = "clippy", feature(plugin))]
#![cfg_attr(feature = "clippy", plugin(clippy))]
#[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
extern crate base64;
#[macro_use]
extern crate clap;
extern crate futures;
extern crate hyper;
extern crate tokio;
extern crate tokio_io;
extern crate tokio_timer;
mod dns;
mod config;
mod constants;
mod utils;
use clap::{App, Arg};
use futures::future;
use futures::prelude::*;
use hyper::header::{CacheControl, CacheDirective, ContentLength, ContentType};
use hyper::server::{Http, Request, Response, Service};
use hyper::{Body, Method, StatusCode};
use std::cell::RefCell;
use std::net::SocketAddr;
use std::rc::Rc;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::Arc;
use std::time::Duration;
use tokio::executor::current_thread;
use tokio::net::{TcpListener, UdpSocket};
use tokio_timer::Timer;
const DNS_QUERY_PARAM: &str = "dns";
const LISTEN_ADDRESS: &str = "127.0.0.1:3000";
const LOCAL_BIND_ADDRESS: &str = "0.0.0.0:0";
const MAX_CLIENTS: u32 = 512;
const MAX_DNS_QUESTION_LEN: usize = 512;
const MAX_DNS_RESPONSE_LEN: usize = 4096;
const MIN_DNS_PACKET_LEN: usize = 17;
const PATH: &str = "/dns-query";
const SERVER_ADDRESS: &str = "9.9.9.9:53";
const TIMEOUT_SEC: u64 = 10;
const MAX_TTL: u32 = 86400 * 7;
const MIN_TTL: u32 = 1;
const ERR_TTL: u32 = 1;
use libdoh::odoh::ODoHRotator;
use libdoh::reexports::tokio;
use libdoh::*;
#[derive(Clone, Debug)]
struct DoH {
listen_address: SocketAddr,
local_bind_address: SocketAddr,
server_address: SocketAddr,
path: String,
max_clients: u32,
timeout: Duration,
timers: Timer,
clients_count: Rc<RefCell<u32>>,
}
impl Service for DoH {
type Request = Request;
type Response = Response;
type Error = hyper::Error;
type Future = Box<Future<Item = Self::Response, Error = Self::Error>>;
fn call(&self, req: Request) -> Self::Future {
{
let count = self.clients_count.borrow_mut();
if *count > self.max_clients {
let mut response = Response::new();
response.set_status(StatusCode::TooManyRequests);
return Box::new(future::ok(response));
}
(*count).saturating_add(1);
}
let clients_count_inner = self.clients_count.clone();
let fut = self
.handle_client(req)
.then(move |fut| {
(*clients_count_inner).borrow_mut().saturating_sub(1);
fut
})
.map_err(|err| {
eprintln!("server error: {:?}", err);
err
});
let timed = self
.timers
.timeout(fut.map_err(|_| {}), self.timeout)
.map_err(|_| hyper::Error::Timeout);
Box::new(timed)
}
}
impl DoH {
fn handle_client(&self, req: Request) -> Box<Future<Item = Response, Error = hyper::Error>> {
let mut response = Response::new();
if req.path() != self.path {
response.set_status(StatusCode::NotFound);
return Box::new(future::ok(response));
}
match *req.method() {
Method::Post => {
let fut = self.read_body_and_proxy(req.body());
return Box::new(fut.map_err(|_| hyper::Error::Incomplete));
}
Method::Get => {
let query = req.query().unwrap_or("");
let mut question_str = None;
for parts in query.split('&') {
let mut kv = parts.split('=');
if let Some(k) = kv.next() {
if k == DNS_QUERY_PARAM {
question_str = kv.next();
}
}
}
let question = match question_str.and_then(|question_str| {
base64::decode_config(question_str, base64::URL_SAFE_NO_PAD).ok()
}) {
Some(question) => question,
_ => {
response.set_status(StatusCode::BadRequest);
return Box::new(future::ok(response));
}
};
let fut = self.proxy(question);
return Box::new(fut.map_err(|_| hyper::Error::Incomplete));
}
_ => {
response.set_status(StatusCode::MethodNotAllowed);
}
};
Box::new(future::ok(response))
}
fn proxy(&self, query: Vec<u8>) -> Box<Future<Item = Response, Error = ()>> {
let socket = UdpSocket::bind(&self.local_bind_address).unwrap();
let expected_server_address = self.server_address;
let fut = socket
.send_dgram(query, &self.server_address)
.map_err(|_| ())
.and_then(move |(socket, _)| {
let packet = vec![0; MAX_DNS_RESPONSE_LEN];
socket.recv_dgram(packet).map_err(|_| {})
})
.and_then(move |(_socket, mut packet, len, response_server_address)| {
if len < MIN_DNS_PACKET_LEN || expected_server_address != response_server_address {
return future::err(());
}
packet.truncate(len);
let ttl = match dns::min_ttl(&packet, MIN_TTL, MAX_TTL, ERR_TTL) {
Err(_) => return future::err(()),
Ok(min_ttl) => min_ttl,
};
let packet_len = packet.len();
let mut response = Response::new();
response.set_body(packet);
let response = response
.with_header(ContentLength(packet_len as u64))
.with_header(ContentType(
"application/dns-message".parse().unwrap(),
))
.with_header(CacheControl(vec![CacheDirective::MaxAge(ttl)]));
future::ok(response)
});
Box::new(fut)
}
fn read_body_and_proxy(&self, body: Body) -> Box<Future<Item = Response, Error = ()>> {
let mut sum_size = 0;
let inner = self.clone();
let fut =
body.and_then(move |chunk| {
sum_size += chunk.len();
if sum_size > MAX_DNS_QUESTION_LEN {
Err(hyper::error::Error::TooLarge)
} else {
Ok(chunk)
}
}).concat2()
.map_err(move |_err| ())
.map(move |chunk| chunk.to_vec())
.and_then(move |query| {
if query.len() < MIN_DNS_PACKET_LEN {
return Box::new(future::err(())) as Box<Future<Item = _, Error = _>>;
}
Box::new(inner.proxy(query))
});
Box::new(fut)
}
}
use crate::config::*;
use crate::constants::*;
fn main() {
let mut doh = DoH {
let mut runtime_builder = tokio::runtime::Builder::new_multi_thread();
runtime_builder.enable_all();
runtime_builder.thread_name("doh-proxy");
let runtime = runtime_builder.build().unwrap();
let rotator = match ODoHRotator::new(runtime.handle().clone()) {
Ok(r) => r,
Err(_) => panic!("Failed to create ODoHRotator"),
};
let mut globals = Globals {
#[cfg(feature = "tls")]
tls_cert_path: None,
#[cfg(feature = "tls")]
tls_cert_key_path: None,
listen_address: LISTEN_ADDRESS.parse().unwrap(),
local_bind_address: LOCAL_BIND_ADDRESS.parse().unwrap(),
local_bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
server_address: SERVER_ADDRESS.parse().unwrap(),
path: PATH.to_string(),
max_clients: MAX_CLIENTS,
timeout: Duration::from_secs(TIMEOUT_SEC),
clients_count: Rc::new(RefCell::new(0u32)),
timers: tokio_timer::wheel().build(),
};
parse_opts(&mut doh);
let listen_address = doh.listen_address;
let listener = TcpListener::bind(&listen_address).unwrap();
println!("Listening on http://{}", listen_address);
let doh = Rc::new(doh);
let server = Http::new()
.keep_alive(false)
.serve_incoming(listener.incoming(), move || Ok(doh.clone()));
let fut = server.for_each(move |client_fut| {
current_thread::spawn(client_fut.map(|_| {}).map_err(|_| {}));
Ok(())
});
current_thread::block_on_all(fut).unwrap();
}
clients_count: Default::default(),
max_concurrent_streams: MAX_CONCURRENT_STREAMS,
min_ttl: MIN_TTL,
max_ttl: MAX_TTL,
err_ttl: ERR_TTL,
keepalive: true,
disable_post: false,
allow_odoh_post: false,
odoh_configs_path: ODOH_CONFIGS_PATH.to_string(),
odoh_rotator: Arc::new(rotator),
fn parse_opts(doh: &mut DoH) {
let max_clients = MAX_CLIENTS.to_string();
let timeout_sec = TIMEOUT_SEC.to_string();
let matches = App::new("doh-proxy")
.about("A DNS-over-HTTP server proxy")
.arg(
Arg::with_name("listen_address")
.short("l")
.long("listen-address")
.takes_value(true)
.default_value(LISTEN_ADDRESS)
.help("Address to listen to"),
)
.arg(
Arg::with_name("server_address")
.short("u")
.long("server-address")
.takes_value(true)
.default_value(SERVER_ADDRESS)
.help("Address to connect to"),
)
.arg(
Arg::with_name("local_bind_address")
.short("b")
.long("local-bind-address")
.takes_value(true)
.default_value(LOCAL_BIND_ADDRESS)
.help("Address to connect from"),
)
.arg(
Arg::with_name("path")
.short("p")
.long("path")
.takes_value(true)
.default_value(PATH)
.help("URI path"),
)
.arg(
Arg::with_name("max_clients")
.short("c")
.long("max-clients")
.takes_value(true)
.default_value(&max_clients)
.help("Maximum number of simultaneous clients"),
)
.arg(
Arg::with_name("timeout")
.short("t")
.long("timeout")
.takes_value(true)
.default_value(&timeout_sec)
.help("Timeout, in seconds"),
)
.get_matches();
if let Some(listen_address) = matches.value_of("listen_address") {
doh.listen_address = listen_address.parse().unwrap();
}
if let Some(server_address) = matches.value_of("server_address") {
doh.server_address = server_address.parse().unwrap();
}
if let Some(local_bind_address) = matches.value_of("local_bind_address") {
doh.local_bind_address = local_bind_address.parse().unwrap();
}
if let Some(max_clients) = matches.value_of("max_clients") {
doh.max_clients = max_clients.parse().unwrap();
}
if let Some(timeout) = matches.value_of("timeout") {
doh.timeout = Duration::from_secs(timeout.parse().unwrap());
}
runtime_handle: runtime.handle().clone(),
};
parse_opts(&mut globals);
let doh = DoH {
globals: Arc::new(globals),
};
runtime.block_on(doh.entrypoint()).unwrap();
}

24
src/utils.rs Normal file
View file

@ -0,0 +1,24 @@
// functions to verify the startup arguments as correct
use std::net::{SocketAddr, ToSocketAddrs};
pub(crate) fn verify_sock_addr(arg_val: &str) -> Result<String, String> {
match arg_val.parse::<SocketAddr>() {
Ok(_addr) => Ok(arg_val.to_string()),
Err(_) => Err(format!(
"Could not parse \"{arg_val}\" as a valid socket address (with port)."
)),
}
}
pub(crate) fn verify_remote_server(arg_val: &str) -> Result<String, String> {
match arg_val.to_socket_addrs() {
Ok(mut addr_iter) => match addr_iter.next() {
Some(_) => Ok(arg_val.to_string()),
None => Err(format!(
"Could not parse \"{arg_val}\" as a valid remote uri"
)),
},
Err(err) => Err(format!("{err}")),
}
}