Compare commits

..

No commits in common. "master" and "2.0.28" have entirely different histories.

1784 changed files with 153364 additions and 348254 deletions

View file

@ -1 +0,0 @@
tracker.debian.org

View file

@ -1 +0,0 @@
8.8.8.8

View file

@ -1,21 +0,0 @@
##################
# Test blocklist #
##################
ad.*
ads.*
banner.*
banners.*
creatives.*
oas.*
oascentral.* # test inline comment
stats.* # test inline comment with trailing spaces
tag.*
telemetry.*
tracker.*
*.local
eth0.me
*.workgroup
*.youtube.* @time-to-sleep
facebook.com @work

View file

@ -1,213 +0,0 @@
#! /bin/sh
PACKAGE_VERSION="$1"
cd dnscrypt-proxy || exit 1
go clean
env GOOS=windows GOARCH=386 go build -mod vendor -ldflags="-s -w"
mkdir win32
ln dnscrypt-proxy.exe win32/
cp ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt win32/
for i in win32/LICENSE win32/*.toml win32/*.txt; do ex -bsc '%!awk "{sub(/$/,\"\r\")}1"' -cx "$i"; done
ln ../windows/* win32/
zip -9 -r dnscrypt-proxy-win32-${PACKAGE_VERSION:-dev}.zip win32
go clean
env GOOS=windows GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
mkdir win64
ln dnscrypt-proxy.exe win64/
cp ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt win64/
for i in win64/LICENSE win64/*.toml win64/*.txt; do ex -bsc '%!awk "{sub(/$/,\"\r\")}1"' -cx "$i"; done
ln ../windows/* win64/
zip -9 -r dnscrypt-proxy-win64-${PACKAGE_VERSION:-dev}.zip win64
go clean
env GOOS=windows GOARCH=arm64 go build -mod vendor -ldflags="-s -w"
mkdir winarm
ln dnscrypt-proxy.exe winarm/
cp ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt winarm/
for i in winarm/LICENSE winarm/*.toml winarm/*.txt; do ex -bsc '%!awk "{sub(/$/,\"\r\")}1"' -cx "$i"; done
ln ../windows/* winarm/
zip -9 -r dnscrypt-proxy-winarm-${PACKAGE_VERSION:-dev}.zip winarm
go clean
env GO386=softfloat GOOS=openbsd GOARCH=386 go build -mod vendor -ldflags="-s -w"
mkdir openbsd-i386
ln dnscrypt-proxy openbsd-i386/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt openbsd-i386/
tar czpvf dnscrypt-proxy-openbsd_i386-${PACKAGE_VERSION:-dev}.tar.gz openbsd-i386
go clean
env GOOS=openbsd GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
mkdir openbsd-amd64
ln dnscrypt-proxy openbsd-amd64/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt openbsd-amd64/
tar czpvf dnscrypt-proxy-openbsd_amd64-${PACKAGE_VERSION:-dev}.tar.gz openbsd-amd64
go clean
env GOOS=freebsd GOARCH=386 go build -mod vendor -ldflags="-s -w"
mkdir freebsd-i386
ln dnscrypt-proxy freebsd-i386/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt freebsd-i386/
tar czpvf dnscrypt-proxy-freebsd_i386-${PACKAGE_VERSION:-dev}.tar.gz freebsd-i386
go clean
env GOOS=freebsd GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
mkdir freebsd-amd64
ln dnscrypt-proxy freebsd-amd64/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt freebsd-amd64/
tar czpvf dnscrypt-proxy-freebsd_amd64-${PACKAGE_VERSION:-dev}.tar.gz freebsd-amd64
go clean
env GOOS=freebsd GOARCH=arm GOARM=5 go build -mod vendor -ldflags="-s -w"
mkdir freebsd-arm
ln dnscrypt-proxy freebsd-arm/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt freebsd-arm/
tar czpvf dnscrypt-proxy-freebsd_arm-${PACKAGE_VERSION:-dev}.tar.gz freebsd-arm
go clean
env GOOS=dragonfly GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
mkdir dragonflybsd-amd64
ln dnscrypt-proxy dragonflybsd-amd64/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt dragonflybsd-amd64/
tar czpvf dnscrypt-proxy-dragonflybsd_amd64-${PACKAGE_VERSION:-dev}.tar.gz dragonflybsd-amd64
go clean
env GOOS=netbsd GOARCH=386 go build -mod vendor -ldflags="-s -w"
mkdir netbsd-i386
ln dnscrypt-proxy netbsd-i386/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt netbsd-i386/
tar czpvf dnscrypt-proxy-netbsd_i386-${PACKAGE_VERSION:-dev}.tar.gz netbsd-i386
go clean
env GOOS=netbsd GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
mkdir netbsd-amd64
ln dnscrypt-proxy netbsd-amd64/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt netbsd-amd64/
tar czpvf dnscrypt-proxy-netbsd_amd64-${PACKAGE_VERSION:-dev}.tar.gz netbsd-amd64
go clean
env GOOS=solaris GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
mkdir solaris-amd64
ln dnscrypt-proxy solaris-amd64/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt solaris-amd64/
tar czpvf dnscrypt-proxy-solaris_amd64-${PACKAGE_VERSION:-dev}.tar.gz solaris-amd64
go clean
env CGO_ENABLED=0 GOOS=linux GOARCH=386 go build -mod vendor -ldflags="-s -w"
mkdir linux-i386
ln dnscrypt-proxy linux-i386/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt linux-i386/
tar czpvf dnscrypt-proxy-linux_i386-${PACKAGE_VERSION:-dev}.tar.gz linux-i386
go clean
env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
mkdir linux-x86_64
ln dnscrypt-proxy linux-x86_64/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt linux-x86_64/
tar czpvf dnscrypt-proxy-linux_x86_64-${PACKAGE_VERSION:-dev}.tar.gz linux-x86_64
go clean
env CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=5 go build -mod vendor -ldflags="-s -w"
mkdir linux-arm
ln dnscrypt-proxy linux-arm/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt linux-arm/
tar czpvf dnscrypt-proxy-linux_arm-${PACKAGE_VERSION:-dev}.tar.gz linux-arm
go clean
env CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -mod vendor -ldflags="-s -w"
mkdir linux-arm64
ln dnscrypt-proxy linux-arm64/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt linux-arm64/
tar czpvf dnscrypt-proxy-linux_arm64-${PACKAGE_VERSION:-dev}.tar.gz linux-arm64
go clean
env CGO_ENABLED=0 GOOS=linux GOARCH=mips GOMIPS=softfloat go build -mod vendor -ldflags="-s -w"
mkdir linux-mips
ln dnscrypt-proxy linux-mips/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt linux-mips/
tar czpvf dnscrypt-proxy-linux_mips-${PACKAGE_VERSION:-dev}.tar.gz linux-mips
go clean
env CGO_ENABLED=0 GOOS=linux GOARCH=mipsle GOMIPS=softfloat go build -mod vendor -ldflags="-s -w"
mkdir linux-mipsle
ln dnscrypt-proxy linux-mipsle/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt linux-mipsle/
tar czpvf dnscrypt-proxy-linux_mipsle-${PACKAGE_VERSION:-dev}.tar.gz linux-mipsle
go clean
env CGO_ENABLED=0 GOOS=linux GOARCH=mips64 GOMIPS64=softfloat go build -mod vendor -ldflags="-s -w"
mkdir linux-mips64
ln dnscrypt-proxy linux-mips64/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt linux-mips64/
tar czpvf dnscrypt-proxy-linux_mips64-${PACKAGE_VERSION:-dev}.tar.gz linux-mips64
go clean
env CGO_ENABLED=0 GOOS=linux GOARCH=mips64le GOMIPS64=softfloat go build -mod vendor -ldflags="-s -w"
mkdir linux-mips64le
ln dnscrypt-proxy linux-mips64le/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt linux-mips64le/
tar czpvf dnscrypt-proxy-linux_mips64le-${PACKAGE_VERSION:-dev}.tar.gz linux-mips64le
go clean
env CGO_ENABLED=0 GOOS=linux GOARCH=riscv64 go build -mod vendor -ldflags="-s -w"
mkdir linux-riscv64
ln dnscrypt-proxy linux-riscv64/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt linux-riscv64/
tar czpvf dnscrypt-proxy-linux_riscv64-${PACKAGE_VERSION:-dev}.tar.gz linux-riscv64
go clean
env GOOS=darwin GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
mkdir macos-x86_64
ln dnscrypt-proxy macos-x86_64/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt macos-x86_64/
tar czpvf dnscrypt-proxy-macos_x86_64-${PACKAGE_VERSION:-dev}.tar.gz macos-x86_64
go clean
env GOOS=darwin GOARCH=arm64 go build -mod vendor -ldflags="-s -w"
mkdir macos-arm64
ln dnscrypt-proxy macos-arm64/
ln ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt macos-arm64/
tar czpvf dnscrypt-proxy-macos_arm64-${PACKAGE_VERSION:-dev}.tar.gz macos-arm64
# Android
NDK_VER=r20
curl -LOs https://dl.google.com/android/repository/android-ndk-${NDK_VER}-linux-x86_64.zip
unzip -q android-ndk-${NDK_VER}-linux-x86_64.zip -d ${HOME}
rm android-ndk-${NDK_VER}-linux-x86_64.zip
NDK_TOOLS=${HOME}/android-ndk-${NDK_VER}
export PATH=${PATH}:${NDK_TOOLS}/toolchains/llvm/prebuilt/linux-x86_64/bin
go clean
env CC=armv7a-linux-androideabi19-clang CXX=armv7a-linux-androideabi19-clang++ CGO_ENABLED=1 GOOS=android GOARCH=arm GOARM=7 go build -mod vendor -ldflags="-s -w"
mkdir android-arm
ln dnscrypt-proxy android-arm/
cp ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt android-arm/
zip -9 -r dnscrypt-proxy-android_arm-${PACKAGE_VERSION:-dev}.zip android-arm
go clean
env CC=aarch64-linux-android21-clang CXX=aarch64-linux-android21-clang++ CGO_ENABLED=1 GOOS=android GOARCH=arm64 go build -mod vendor -ldflags="-s -w"
mkdir android-arm64
ln dnscrypt-proxy android-arm64/
cp ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt android-arm64/
zip -9 -r dnscrypt-proxy-android_arm64-${PACKAGE_VERSION:-dev}.zip android-arm64
go clean
env CC=i686-linux-android19-clang CXX=i686-linux-android19-clang++ CGO_ENABLED=1 GOOS=android GOARCH=386 go build -mod vendor -ldflags="-s -w"
mkdir android-i386
ln dnscrypt-proxy android-i386/
cp ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt android-i386/
zip -9 -r dnscrypt-proxy-android_i386-${PACKAGE_VERSION:-dev}.zip android-i386
go clean
env CC=x86_64-linux-android21-clang CXX=x86_64-linux-android21-clang++ CGO_ENABLED=1 GOOS=android GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
mkdir android-x86_64
ln dnscrypt-proxy android-x86_64/
cp ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt android-x86_64/
zip -9 -r dnscrypt-proxy-android_x86_64-${PACKAGE_VERSION:-dev}.zip android-x86_64
# Done
ls -l dnscrypt-proxy-*.tar.gz dnscrypt-proxy-*.zip

View file

@ -1,57 +0,0 @@
#! /bin/sh
PACKAGE_VERSION="$1"
cd dnscrypt-proxy || exit 1
# setup the environment
sudo apt-get update -y
sudo apt-get install -y wget wine dotnet-sdk-8.0
sudo dpkg --add-architecture i386 && sudo apt-get update && sudo apt-get install -y wine32
sudo apt-get install -y unzip
export WINEPREFIX="$HOME"/.wine32
export WINEARCH=win32
export WINEDEBUG=-all
wget https://dl.winehq.org/wine/wine-mono/9.4.0/wine-mono-9.4.0-x86.msi
WINEPREFIX="$HOME/.wine32" WINEARCH=win32 wineboot --init
WINEPREFIX="$HOME/.wine32" WINEARCH=win32 wine msiexec /i wine-mono-9.4.0-x86.msi
mkdir "$HOME"/.wine32/drive_c/temp
mkdir -p "$HOME"/.wine/drive_c/temp
wget https://github.com/wixtoolset/wix3/releases/download/wix3141rtm/wix314-binaries.zip -nv -O wix.zip
unzip wix.zip -d "$HOME"/wix
rm -f wix.zip
builddir=$(pwd)
srcdir=$(
cd ..
pwd
)
version=$PACKAGE_VERSION
cd "$HOME"/wix || exit
ln -s "$builddir" "$HOME"/wix/build
ln -s "$srcdir"/contrib/msi "$HOME"/wix/wixproj
echo "builddir: $builddir"
# build the msi's
#################
for arch in x64 x86; do
binpath="win32"
if [ "$arch" = "x64" ]; then
binpath="win64"
fi
echo $arch
wine candle.exe -dVersion="$version" -dPlatform=$arch -dPath=build\\$binpath -arch $arch wixproj\\dnscrypt.wxs -out build\\dnscrypt-$arch.wixobj
wine light.exe -out build\\dnscrypt-proxy-$arch-"$version".msi build\\dnscrypt-$arch.wixobj -sval
done
cd "$builddir" || exit

View file

@ -1,166 +0,0 @@
#! /bin/sh
DNS_PORT=5300
HTTP_PORT=3053
TEST_COUNT=0
exec 2>error.log
t() {
TEST_COUNT=$((TEST_COUNT + 1))
echo "Test #${TEST_COUNT}..."
false
}
fail() (
echo "*** Test #${TEST_COUNT} FAILED ***" >&2
)
section() {
true
}
rm -f blocked-names.log blocked-ips.log query.log nx.log allowed-names.log
t || (
cd ../dnscrypt-proxy
go test -mod vendor
go build -mod vendor -race
) || fail
section
sed -e "s/127.0.0.1:53/127.0.0.1:${DNS_PORT}/g" -e "s/# server_names =.*/server_names = ['scaleway-fr']/" ../dnscrypt-proxy/example-dnscrypt-proxy.toml >test-dnscrypt-proxy.toml
../dnscrypt-proxy/dnscrypt-proxy -loglevel 3 -config test-dnscrypt-proxy.toml -pidfile /tmp/dnscrypt-proxy.pidfile &
sleep 5
t ||
dig -p${DNS_PORT} . @127.0.0.1 | grep -Fq 'root-servers.net.' || fail
t || dig -p${DNS_PORT} +dnssec . @127.0.0.1 | grep -Fq 'root-servers.net.' || fail
t || dig -p${DNS_PORT} +dnssec . @127.0.0.1 | grep -Fq 'flags: do;' || fail
t || dig -p${DNS_PORT} +short one.one.one.one @127.0.0.1 | grep -Fq '1.1.1.1' || fail
t || dig -p${DNS_PORT} +dnssec dnscrypt.info @127.0.0.1 | grep -Fq 'flags: qr rd ra ad' || fail
t || dig -p${DNS_PORT} +dnssec dnscrypt.info @127.0.0.1 | grep -Fq 'flags: do;' || fail
kill $(cat /tmp/dnscrypt-proxy.pidfile)
sleep 5
section
../dnscrypt-proxy/dnscrypt-proxy -loglevel 3 -config test2-dnscrypt-proxy.toml -pidfile /tmp/dnscrypt-proxy.pidfile &
sleep 5
section
t || dig -p${DNS_PORT} A microsoft.com @127.0.0.1 | grep -Fq "NOERROR" || fail
t || dig -p${DNS_PORT} A MICROSOFT.COM @127.0.0.1 | grep -Fq "NOERROR" || fail
section
t || dig -p${DNS_PORT} AAAA ipv6.google.com @127.0.0.1 | grep -Fq 'locally blocked' || fail
section
t || dig -p${DNS_PORT} invalid. @127.0.0.1 | grep -Fq NXDOMAIN || fail
t || dig -p${DNS_PORT} +dnssec invalid. @127.0.0.1 | grep -Fq 'flags: do;' || fail
t || dig -p${DNS_PORT} PTR 168.192.in-addr.arpa @127.0.0.1 | grep -Fq 'NXDOMAIN' || fail
t || dig -p${DNS_PORT} +dnssec PTR 168.192.in-addr.arpa @127.0.0.1 | grep -Fq 'flags: do;' || fail
section
t || dig -p${DNS_PORT} +dnssec darpa.mil @127.0.0.1 2>&1 | grep -Fvq 'RRSIG' || fail
t || dig -p${DNS_PORT} +dnssec www.darpa.mil @127.0.0.1 2>&1 | grep -Fvq 'RRSIG' || fail
t || dig -p${DNS_PORT} A download.windowsupdate.com @127.0.0.1 | grep -Fq "NOERROR" || fail
section
t || dig -p${DNS_PORT} +short cloakedunregistered.com @127.0.0.1 | grep -Eq '1.1.1.1|1.0.0.1' || fail
t || dig -p${DNS_PORT} +short MX cloakedunregistered.com @127.0.0.1 | grep -Fq 'locally blocked' || fail
t || dig -p${DNS_PORT} +short MX example.com @127.0.0.1 | grep -Fvq 'locally blocked' || fail
t || dig -p${DNS_PORT} NS cloakedunregistered.com @127.0.0.1 | grep -Fiq 'gtld-servers.net' || fail
t || dig -p${DNS_PORT} +short www.cloakedunregistered2.com @127.0.0.1 | grep -Eq '1.1.1.1|1.0.0.1' || fail
t || dig -p${DNS_PORT} +short www.dnscrypt-test @127.0.0.1 | grep -Fq '192.168.100.100' || fail
t || dig -p${DNS_PORT} a.www.dnscrypt-test @127.0.0.1 | grep -Fq 'NXDOMAIN' || fail
t || dig -p${DNS_PORT} +short ptr 101.100.168.192.in-addr.arpa. @127.0.0.1 | grep -Eq 'www.dnscrypt-test.com' || fail
t || dig -p${DNS_PORT} +short ptr 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.2.0.d.f.ip6.arpa. @127.0.0.1 | grep -Eq 'ipv6.dnscrypt-test.com' || fail
section
t || dig -p${DNS_PORT} telemetry.example @127.0.0.1 | grep -Fq 'locally blocked' || fail
section
t || dig -p${DNS_PORT} dns.google @127.0.0.1 | grep -Fq 'locally blocked' || fail
section
t || dig -p${DNS_PORT} tracker.xdebian.org @127.0.0.1 | grep -Fq 'locally blocked' || fail
t || dig -p${DNS_PORT} tracker.debian.org @127.0.0.1 | grep -Fqv 'locally blocked' || fail
section
t || curl --insecure -siL https://127.0.0.1:${HTTP_PORT}/ | grep -Fq 'HTTP/2 404' || fail
t || curl --insecure -sL https://127.0.0.1:${HTTP_PORT}/dns-query | grep -Fq 'dnscrypt-proxy local DoH server' || fail
t ||
echo yv4BAAABAAAAAAABAAACAAEAACkQAAAAgAAAAA== | base64 -d |
curl -H'Content-Type: application/dns-message' -H'Accept: application/dns-message' --data-binary @- -D - --insecure https://127.0.0.1:${HTTP_PORT}/dns-query 2>/dev/null |
grep -Fq application/dns-message || fail
kill $(cat /tmp/dnscrypt-proxy.pidfile)
sleep 5
section
t || grep -Fq 'telemetry.example' blocked-names.log || fail
t || grep -Fq 'telemetry.*' blocked-names.log || fail
t || grep -Fq 'tracker.xdebian.org' blocked-names.log || fail
t || grep -Fq 'tracker.*' blocked-names.log || fail
section
t || grep -Fq 'dns.google' blocked-ips.log || fail
t || grep -Fq '8.8.8.8' blocked-ips.log || fail
section
t || grep -Fq 'a.www.dnscrypt-test' nx.log || fail
section
t || grep -Fq 'a.www.dnscrypt-test' nx.log || fail
section
t || grep -Eq 'microsoft.com.*PASS.*[^-]$' query.log || fail
t || grep -Eq 'microsoft.com.*PASS.*-$' query.log || fail
t || grep -Eq 'ipv6.google.com.*SYNTH' query.log || fail
t || grep -Eq 'invalid.*SYNTH' query.log || fail
t || grep -Eq '168.192.in-addr.arpa.*SYNTH' query.log || fail
t || grep -Eq 'darpa.mil.*FORWARD' query.log || fail
t || grep -Eq 'www.darpa.mil.*FORWARD' query.log || fail
t || grep -Eq 'download.windowsupdate.com.*FORWARD' query.log || fail
t || grep -Eq 'cloakedunregistered.com.*CLOAK' query.log || fail
t || grep -Eq 'www.cloakedunregistered2.com.*CLOAK' query.log || fail
t || grep -Eq 'www.dnscrypt-test.*CLOAK' query.log || fail
t || grep -Eq 'a.www.dnscrypt-test.*NXDOMAIN' query.log || fail
t || grep -Eq 'telemetry.example.*REJECT' query.log || fail
t || grep -Eq 'dns.google.*REJECT' query.log || fail
t || grep -Eq 'tracker.xdebian.org.*REJECT' query.log || fail
t || grep -Eq 'tracker.debian.org.*PASS' query.log || fail
t || grep -Eq '[.].*NS.*PASS' query.log || fail
section
t || grep -Fq 'tracker.debian.org' allowed-names.log || fail
t || grep -Fq '*.tracker.debian' allowed-names.log || fail
section
../dnscrypt-proxy/dnscrypt-proxy -loglevel 3 -config test3-dnscrypt-proxy.toml -pidfile /tmp/dnscrypt-proxy.pidfile &
sleep 5
section
t || dig -p${DNS_PORT} A microsoft.com @127.0.0.1 | grep -Fq "NOERROR" || fail
t || dig -p${DNS_PORT} A MICROSOFT.COM @127.0.0.1 | grep -Fq "NOERROR" || fail
kill $(cat /tmp/dnscrypt-proxy.pidfile)
sleep 5
section
../dnscrypt-proxy/dnscrypt-proxy -loglevel 3 -config test-odoh-proxied.toml -pidfile /tmp/odoh-proxied.pidfile &
sleep 5
section
t || dig -p${DNS_PORT} A microsoft.com @127.0.0.1 | grep -Fq "NOERROR" || fail
t || dig -p${DNS_PORT} A cloudflare.com @127.0.0.1 | grep -Fq "NOERROR" || fail
kill $(cat /tmp/odoh-proxied.pidfile)
sleep 5
if [ -s error.log ]; then
cat *.log
exit 1
fi

View file

@ -1,5 +0,0 @@
cloakedunregistered.* one.one.one.one
*.cloakedunregistered2.* one.one.one.one # inline comment
=www.dnscrypt-test 192.168.100.100
=www.dnscrypt-test.com 192.168.100.101
=ipv6.dnscrypt-test.com fd02::1

View file

@ -1,2 +0,0 @@
darpa.mil 4.2.2.2
download.windowsupdate.com $BOOTSTRAP

View file

@ -1,17 +0,0 @@
server_names = ['odohtarget']
listen_addresses = ['127.0.0.1:5300']
[query_log]
file = 'query.log'
[static]
[static.'odohtarget']
stamp = 'sdns://BQcAAAAAAAAADm9kb2guY3J5cHRvLnN4Ci9kbnMtcXVlcnk'
[static.'odohrelay']
stamp = 'sdns://hQcAAAAAAAAAAAAab2RvaC1yZWxheS5lZGdlY29tcHV0ZS5hcHABLw'
[anonymized_dns]
routes = [
{ server_name='odohtarget', via=['odohrelay'] }
]

View file

@ -1,69 +0,0 @@
server_names = ['public-scaleway-fr']
listen_addresses = ['127.0.0.1:5300']
require_dnssec = true
dnscrypt_ephemeral_keys = true
tls_disable_session_tickets = false
ignore_system_dns = false
lb_strategy = 'p12'
block_ipv6 = true
block_unqualified = true
block_undelegated = true
forwarding_rules = 'forwarding-rules.txt'
cloaking_rules = 'cloaking-rules.txt'
cloak_ptr = true
cache = true
[local_doh]
listen_addresses = ['127.0.0.1:3053']
cert_file = "../dnscrypt-proxy/localhost.pem"
cert_key_file = "../dnscrypt-proxy/localhost.pem"
[query_log]
file = 'query.log'
[nx_log]
file = 'nx.log'
[blocked_names]
blocked_names_file = 'blocked-names.txt'
log_file = 'blocked-names.log'
[blocked_ips]
blocked_ips_file = 'blocked-ips.txt'
log_file = 'blocked-ips.log'
[allowed_names]
allowed_names_file = 'allowed-names.txt'
log_file = 'allowed-names.log'
[schedules]
[schedules.'time-to-sleep']
mon = [{after='21:00', before='7:00'}]
tue = [{after='21:00', before='7:00'}]
wed = [{after='21:00', before='7:00'}]
thu = [{after='21:00', before='7:00'}]
fri = [{after='23:00', before='7:00'}]
sat = [{after='23:00', before='7:00'}]
sun = [{after='21:00', before='7:00'}]
[schedules.'work']
mon = [{after='9:00', before='18:00'}]
tue = [{after='9:00', before='18:00'}]
wed = [{after='9:00', before='18:00'}]
thu = [{after='9:00', before='18:00'}]
fri = [{after='9:00', before='17:00'}]
[sources]
[sources.'public-resolvers']
urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v2/public-resolvers.md', 'https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md']
cache_file = 'public-resolvers.md'
minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
prefix = 'public-'
[sources.'relays']
urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v2/relays.md', 'https://download.dnscrypt.info/resolvers-list/v2/relays.md']
cache_file = 'relays.md'
minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
refresh_delay = 72
prefix = 'relay-'

View file

@ -1,19 +0,0 @@
server_names = ['myserver']
listen_addresses = ['127.0.0.1:5300']
require_dnssec = true
dnscrypt_ephemeral_keys = true
tls_disable_session_tickets = false
ignore_system_dns = false
lb_strategy = 'p12'
block_ipv6 = true
block_unqualified = true
block_undelegated = true
cache = true
[query_log]
file = 'query.log'
[static]
[static.'myserver']
stamp = 'sdns://AQcAAAAAAAAADjIxMi40Ny4yMjguMTM2IOgBuE6mBr-wusDOQ0RbsV66ZLAvo8SqMa4QY2oHkDJNHzIuZG5zY3J5cHQtY2VydC5mci5kbnNjcnlwdC5vcmc'

7
.gitattributes vendored
View file

@ -1,7 +0,0 @@
* text=auto
*.go text diff=golang
*.bat text eol=crlf
go.mod text eol=lf
# Ensure test fixtures don't get mangled
**/testdata/** -text

View file

@ -1,65 +0,0 @@
---
name: "\U0001F41E Issues"
about: Bug reports
title: ''
labels: ''
assignees: ''
---
THE TRACKER IS DEDICATED TO KEEPING TRACK OF *BUGS*,
preferably after they have been already discussed and confirmed to be reproducible.
FOR ASSISTANCE, PLEASE CLOSE THIS FORM AND USE THE DISCUSSIONS SECTION INSTEAD:
https://github.com/DNSCrypt/dnscrypt-proxy/discussions/categories/q-a
~~~
Reported bugs must reproducible in the context described in the "Context" section.
Installation and configuration issues are not bugs, but individual assistance request.
Context: the LATEST version of `dnscrypt-proxy` (precompiled binaries downloaded from this repository) is correctly installed and configured on your system, but something doesn't seem to produce the expected result.
If the bug is not trivial to reproduce on any platform, please include ALL the steps required to reliably duplicate it, on a vanilla, generic install of macOS, Windows, OpenBSD or Ubuntu Linux system, in their most current version.
If you don't have any clear understanding of the issue or can't enumerate the steps to reproduce it, open a discussion instead:
https://github.com/DNSCrypt/dnscrypt-proxy/discussions
## Output of the following commands:
./dnscrypt-proxy -version
./dnscrypt-proxy -check
./dnscrypt-proxy -resolve example.com
- [ ] Initially raised as discussion #...
## *What* is affected by this bug?
## *When* does this occur?
## *Where* does it happen?
## *How* do we replicate the issue?
<!-- Please list all the steps required to reliably replicate it, starting from a newly installed operating system -->
## Expected behavior (i.e. solution)
## Other Comments

View file

@ -1,22 +0,0 @@
---
name: "🙋🏽 Planned changes"
about: List of planned changes
title: ''
labels: ''
assignees: ''
---
The starting point should be a discussion.
https://github.com/DNSCrypt/dnscrypt-proxy/discussions/
Suggestions may be raised as an "Ideas" discussion.
We can then determine if the discussion needs to be escalated into a "planned change" or not.
This will help us ensure that the issue tracker properly reflects ongoing or needed work on the project.
---
- [ ] Initially raised as discussion #...

View file

@ -1,15 +0,0 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
# Maintain dependencies for Go
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "daily"
open-pull-requests-limit: 10

View file

@ -1,12 +0,0 @@
name: Autocloser
on: [issues]
jobs:
autoclose:
runs-on: ubuntu-latest
steps:
- name: Autoclose issues that did not follow issue template
uses: roots/issue-closer@v1.2
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
issue-close-message: "This issue was automatically closed because it did not follow the issue template. We use the issue tracker exclusively for bug reports and feature additions that have been previously discussed. However, this issue appears to be a support request. Please use the discussion forums for support requests."
issue-pattern: ".*(do we replicate the issue|Expected behavior|raised as discussion|# Impact).*"

View file

@ -1,32 +0,0 @@
name: "CodeQL scan"
on:
push:
pull_request:
schedule:
- cron: '0 14 * * 6'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 2
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
- name: Autobuild
uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3

View file

@ -1,95 +0,0 @@
on:
push:
paths:
- "**.go"
- "go.*"
- "**/testdata/**"
- ".ci/**"
- ".git*"
- ".github/workflows/releases.yml"
pull_request:
paths:
- "**.go"
- "go.*"
- "**/testdata/**"
- ".ci/**"
- ".git*"
- ".github/workflows/releases.yml"
name: GitHub CI
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Get the version
id: get_version
run: echo "VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1
check-latest: true
id: go
- name: Test suite
run: |
go version
cd .ci
./ci-test.sh
cd -
- name: Build all
if: startsWith(github.ref, 'refs/tags/')
run: |
.ci/ci-build.sh "${{ steps.get_version.outputs.VERSION }}"
- name: Package
if: startsWith(github.ref, 'refs/tags/')
run: |
.ci/ci-package.sh "${{ steps.get_version.outputs.VERSION }}"
- name: Install minisign and sign
if: startsWith(github.ref, 'refs/tags/')
run: |
sudo apt-get -y install libsodium-dev
git clone --depth 1 https://github.com/jedisct1/minisign.git
cd minisign/src
mkdir -p /tmp/bin
cc -O2 -o /tmp/bin/minisign -D_GNU_SOURCE *.c -lsodium
cd -
/tmp/bin/minisign -v
echo '#' > /tmp/minisign.key
echo "${{ secrets.MINISIGN_SK }}" >> /tmp/minisign.key
cd dnscrypt-proxy
echo | /tmp/bin/minisign -s /tmp/minisign.key -Sm *.tar.gz *.zip
ls -l dnscrypt-proxy*
- name: Create release
id: create_release
uses: actions/create-release@v1
if: startsWith(github.ref, 'refs/tags/')
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Release ${{ github.ref }}
draft: false
prerelease: false
- name: Upload release assets
uses: softprops/action-gh-release@ab50eebb6488051c6788d97fa95232267c6a4e23
if: startsWith(github.ref, 'refs/tags/')
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
files: |
dnscrypt-proxy/*.zip
dnscrypt-proxy/*.tar.gz
dnscrypt-proxy/*.minisig
dnscrypt-proxy/*.msi

View file

@ -1,23 +0,0 @@
name: ShiftLeft Scan
on: push
jobs:
Scan-Build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Perform ShiftLeft Scan
uses: ShiftLeftSecurity/scan-action@master
env:
WORKSPACE: ""
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SCAN_AUTO_BUILD: true
with:
output: reports
- name: Upload report
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: reports

7
.gitignore vendored
View file

@ -10,10 +10,3 @@
dnscrypt-proxy/dnscrypt-proxy2
dnscrypt-proxy/dnscrypt-proxy
.idea
.ci/*.log
.ci/*.md
.ci/*.md.minisig
.ci/test-dnscrypt-proxy.toml
contrib/msi/*.msi
contrib/msi/*.wixpdb
contrib/msi/*.wixobj

252
.travis.yml Normal file
View file

@ -0,0 +1,252 @@
language: go
os:
- linux
go:
- 1.x
script:
- gimme --list
- echo $TRAVIS_GO_VERSION
- cd dnscrypt-proxy
- go clean
- env GOOS=windows GOARCH=386 go build -mod vendor -ldflags="-s -w"
- mkdir win32
- ln dnscrypt-proxy.exe win32/
- cp ../LICENSE example-dnscrypt-proxy.toml example-*.txt win32/
- for i in win32/LICENSE win32/*.toml win32/*.txt; do ex -bsc '%!awk "{sub(/$/,\"\r\")}1"'
-cx "$i"; done
- ln ../windows/* win32/
- zip -9 -r dnscrypt-proxy-win32-${TRAVIS_TAG:-dev}.zip win32
- go clean
- env GOOS=windows GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
- mkdir win64
- ln dnscrypt-proxy.exe win64/
- cp ../LICENSE example-dnscrypt-proxy.toml example-*.txt win64/
- for i in win64/LICENSE win64/*.toml win64/*.txt; do ex -bsc '%!awk "{sub(/$/,\"\r\")}1"'
-cx "$i"; done
- ln ../windows/* win64/
- zip -9 -r dnscrypt-proxy-win64-${TRAVIS_TAG:-dev}.zip win64
- go clean
- env GO386=387 GOOS=openbsd GOARCH=386 go build -mod vendor -ldflags="-s -w"
- mkdir openbsd-i386
- ln dnscrypt-proxy openbsd-i386/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt openbsd-i386/
- tar czpvf dnscrypt-proxy-openbsd_i386-${TRAVIS_TAG:-dev}.tar.gz openbsd-i386
- go clean
- env GOOS=openbsd GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
- mkdir openbsd-amd64
- ln dnscrypt-proxy openbsd-amd64/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt openbsd-amd64/
- tar czpvf dnscrypt-proxy-openbsd_amd64-${TRAVIS_TAG:-dev}.tar.gz openbsd-amd64
- go clean
- env GOOS=freebsd GOARCH=386 go build -mod vendor -ldflags="-s -w"
- mkdir freebsd-i386
- ln dnscrypt-proxy freebsd-i386/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt freebsd-i386/
- tar czpvf dnscrypt-proxy-freebsd_i386-${TRAVIS_TAG:-dev}.tar.gz freebsd-i386
- go clean
- env GOOS=freebsd GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
- mkdir freebsd-amd64
- ln dnscrypt-proxy freebsd-amd64/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt freebsd-amd64/
- tar czpvf dnscrypt-proxy-freebsd_amd64-${TRAVIS_TAG:-dev}.tar.gz freebsd-amd64
- go clean
- env GOOS=freebsd GOARCH=arm go build -mod vendor -ldflags="-s -w"
- mkdir freebsd-arm
- ln dnscrypt-proxy freebsd-arm/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt freebsd-arm/
- tar czpvf dnscrypt-proxy-freebsd_arm-${TRAVIS_TAG:-dev}.tar.gz freebsd-arm
- go clean
- env GOOS=freebsd GOARCH=arm GOARM=7 go build -mod vendor -ldflags="-s -w"
- mkdir freebsd-armv7
- ln dnscrypt-proxy freebsd-armv7/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt freebsd-armv7/
- tar czpvf dnscrypt-proxy-freebsd_armv7-${TRAVIS_TAG:-dev}.tar.gz freebsd-armv7
- go clean
- env GOOS=dragonfly GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
- mkdir dragonflybsd-amd64
- ln dnscrypt-proxy dragonflybsd-amd64/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt dragonflybsd-amd64/
- tar czpvf dnscrypt-proxy-dragonflybsd_amd64-${TRAVIS_TAG:-dev}.tar.gz dragonflybsd-amd64
- go clean
- env GOOS=netbsd GOARCH=386 go build -mod vendor -ldflags="-s -w"
- mkdir netbsd-i386
- ln dnscrypt-proxy netbsd-i386/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt netbsd-i386/
- tar czpvf dnscrypt-proxy-netbsd_i386-${TRAVIS_TAG:-dev}.tar.gz netbsd-i386
- go clean
- env GOOS=netbsd GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
- mkdir netbsd-amd64
- ln dnscrypt-proxy netbsd-amd64/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt netbsd-amd64/
- tar czpvf dnscrypt-proxy-netbsd_amd64-${TRAVIS_TAG:-dev}.tar.gz netbsd-amd64
- go clean
- env GOOS=solaris GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
- mkdir solaris-amd64
- ln dnscrypt-proxy solaris-amd64/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt solaris-amd64/
- tar czpvf dnscrypt-proxy-solaris_amd64-${TRAVIS_TAG:-dev}.tar.gz solaris-amd64
- go clean
- env CGO_ENABLED=0 GOOS=linux GOARCH=386 go build -mod vendor -ldflags="-s -w"
- mkdir linux-i386
- ln dnscrypt-proxy linux-i386/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt linux-i386/
- tar czpvf dnscrypt-proxy-linux_i386-${TRAVIS_TAG:-dev}.tar.gz linux-i386
- go clean
- env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
- mkdir linux-x86_64
- ln dnscrypt-proxy linux-x86_64/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt linux-x86_64/
- tar czpvf dnscrypt-proxy-linux_x86_64-${TRAVIS_TAG:-dev}.tar.gz linux-x86_64
- go clean
- env CGO_ENABLED=0 GOOS=linux GOARCH=arm go build -mod vendor -ldflags="-s -w"
- mkdir linux-arm
- ln dnscrypt-proxy linux-arm/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt linux-arm/
- tar czpvf dnscrypt-proxy-linux_arm-${TRAVIS_TAG:-dev}.tar.gz linux-arm
- go clean
- env CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -mod vendor -ldflags="-s -w"
- mkdir linux-arm64
- ln dnscrypt-proxy linux-arm64/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt linux-arm64/
- tar czpvf dnscrypt-proxy-linux_arm64-${TRAVIS_TAG:-dev}.tar.gz linux-arm64
- go clean
- env CGO_ENABLED=0 GOOS=linux GOARCH=mips GOMIPS=softfloat go build -mod vendor -ldflags="-s -w"
- mkdir linux-mips
- ln dnscrypt-proxy linux-mips/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt linux-mips/
- tar czpvf dnscrypt-proxy-linux_mips-${TRAVIS_TAG:-dev}.tar.gz linux-mips
- go clean
- env CGO_ENABLED=0 GOOS=linux GOARCH=mipsle GOMIPS=softfloat go build -mod vendor -ldflags="-s -w"
- mkdir linux-mipsle
- ln dnscrypt-proxy linux-mipsle/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt linux-mipsle/
- tar czpvf dnscrypt-proxy-linux_mipsle-${TRAVIS_TAG:-dev}.tar.gz linux-mipsle
- go clean
- env CGO_ENABLED=0 GOOS=linux GOARCH=mips64 go build -mod vendor -ldflags="-s -w"
- mkdir linux-mips64
- ln dnscrypt-proxy linux-mips64/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt linux-mips64/
- tar czpvf dnscrypt-proxy-linux_mips64-${TRAVIS_TAG:-dev}.tar.gz linux-mips64
- go clean
- env CGO_ENABLED=0 GOOS=linux GOARCH=mips64le go build -mod vendor -ldflags="-s -w"
- mkdir linux-mips64le
- ln dnscrypt-proxy linux-mips64le/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt linux-mips64le/
- tar czpvf dnscrypt-proxy-linux_mips64le-${TRAVIS_TAG:-dev}.tar.gz linux-mips64le
- go clean
- env GOOS=darwin GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
- mkdir macos
- ln dnscrypt-proxy macos/
- ln ../LICENSE example-dnscrypt-proxy.toml example-*.txt macos/
- tar czpvf dnscrypt-proxy-macos-${TRAVIS_TAG:-dev}.tar.gz macos
- go clean
- env CC=arm-linux-androideabi-clang CXX=arm-linux-androideabi-clang++
CGO_ENABLED=1
GOOS=android GOARCH=arm GOARM=7 go build -mod vendor -ldflags="-s -w"
- mkdir android-arm
- ln dnscrypt-proxy android-arm/
- cp ../LICENSE example-dnscrypt-proxy.toml example-*.txt android-arm/
- zip -9 -r dnscrypt-proxy-android_arm-${TRAVIS_TAG:-dev}.zip android-arm
- go clean
- env CC=aarch64-linux-android-clang CXX=aarch64-linux-android-clang++
CGO_ENABLED=1
GOOS=android GOARCH=arm64 go build -mod vendor -ldflags="-s -w"
- mkdir android-arm64
- ln dnscrypt-proxy android-arm64/
- cp ../LICENSE example-dnscrypt-proxy.toml example-*.txt android-arm64/
- zip -9 -r dnscrypt-proxy-android_arm64-${TRAVIS_TAG:-dev}.zip android-arm64
- go clean
- env CC=i686-linux-android-clang CXX=i686-linux-android-clang++
CGO_ENABLED=1 GOOS=android
GOARCH=386 go build -mod vendor -ldflags="-s -w"
- mkdir android-i386
- ln dnscrypt-proxy android-i386/
- cp ../LICENSE example-dnscrypt-proxy.toml example-*.txt android-i386/
- zip -9 -r dnscrypt-proxy-android_i386-${TRAVIS_TAG:-dev}.zip android-i386
- go clean
- env CC=x86_64-linux-android-clang CXX=x86_64-linux-android-clang++
CGO_ENABLED=1
GOOS=android GOARCH=amd64 go build -mod vendor -ldflags="-s -w"
- mkdir android-x86_64
- ln dnscrypt-proxy android-x86_64/
- cp ../LICENSE example-dnscrypt-proxy.toml example-*.txt android-x86_64/
- zip -9 -r dnscrypt-proxy-android_x86_64-${TRAVIS_TAG:-dev}.zip android-x86_64
after_success:
- ls -l dnscrypt-proxy-*.tar.gz dnscrypt-proxy-*.zip
deploy:
provider: releases
api_key:
secure: J3K/wo3oW/ySl6X4Zk5PX+EVy4fa0qa4fbpKNivogch5yjYw2pgrlSvwto9TM12Gxi4tTMKiWYK4YBapNf+tm501s4OyS1G1rJR1fZ+iyaHgGBLD+QppbivZt+P7Do56agSili68Zcgm7MQfZbvOq9z42z3AJ71+UNTJmTp63voaAuyOF/VdLsmJHMd/5nmFJH6mfMrgMs720GCWxFgdq3NRM2AdVldsp1YmNb4qKqIzunmfxqG9TqVlpq35tNOhWA/Ll3rbsiDVeUpBAW5ked/qHyGRkFVk44O6cPSFGe035Txx0JviBshGxsNSP+aJL9T55hIj1dmuk6g5uhPqABU/zcdJvXOv11oqJuV/DGHO31UfVN6u744LJY6Y1lkd+UUNiOJDPGC80+6M2GbP7BFMZiO5qnYkxzktnYg9b6zIPwmj96XZSniDTAn+qemJf2S8rzShvBtWX29Q4odIaCfFUY8i0muowQ4Vep5S5FqVG+r/rQTXOUIUsNv4r/gP/y5hdtOMC2r1VSvWk068upmW6ovCtcmTghSfYcLCG5r+g5OE2mKj9kbx6RQMspewk9+pvOhNZKXsn/AIvvDC4V46MaDjFkdYN0VbsYB5NH11DGCPH7vDwJnAzzMWnofCkiTG07dJYlLUnD9iUgYoNkrxivAgQKnDP8C6Ka0RGdk=
file:
- dnscrypt-proxy-*.tar.gz
- dnscrypt-proxy-*.zip
- dnscrypt-proxy-*.minisig
file_glob: true
skip_cleanup: true
on:
repo: DNSCrypt/dnscrypt-proxy
tags: true
before_deploy:
- mkdir -p /tmp/bin /tmp/lib /tmp/include
- export LD_LIBRARY_PATH=/tmp/lib:LD_LIBRARY_PATH
- export PATH=/tmp/bin:$PATH
- git clone --depth 1 https://github.com/jedisct1/libsodium.git --branch=stable
- cd libsodium
- env ./configure --disable-dependency-tracking --prefix=/tmp
- make -j$(nproc) install
- cd -
- git clone --depth 1 https://github.com/jedisct1/minisign.git
- cd minisign/src
- gcc -O2 -o /tmp/bin/minisign -I/tmp/include -L/tmp/lib *.c -lsodium
- cd -
- minisign -v
- echo '#' > /tmp/minisign.key
- echo "$MINISIGN_SK" >> /tmp/minisign.key
- echo | minisign -s /tmp/minisign.key -Sm dnscrypt-proxy-*.tar.gz dnscrypt-proxy-*.zip
before_install:
- NDK_VER=r18
- curl -LO http://dl.google.com/android/repository/android-ndk-${NDK_VER}-linux-x86_64.zip
- unzip -q android-ndk-${NDK_VER}-linux-x86_64.zip -d $HOME
- rm android-ndk-${NDK_VER}-linux-x86_64.zip
- NDK_TOOLS=$HOME/android-ndk-${NDK_VER}
- NDK_STANDALONE=$HOME/ndk-standalone-${NDK_VER}
- MAKE_TOOLCHAIN=$NDK_TOOLS/build/tools/make_standalone_toolchain.py
- for arch in x86 arm; do python $MAKE_TOOLCHAIN --arch $arch --api 19
--install-dir $NDK_STANDALONE/$arch; PATH=$PATH:$NDK_STANDALONE/$arch/bin; done
- for arch in x86_64 arm64; do python $MAKE_TOOLCHAIN --arch $arch --api 21
--install-dir $NDK_STANDALONE/$arch; PATH=$PATH:$NDK_STANDALONE/$arch/bin; done
- rm -rf $NDK_TOOLS
env:
global:
- secure: cuTXb4v5NwTr1XmkiHGkFir8fMiiBMnraCrls3thdDRlSTix0CiQc/H5Vh8SHauuG6VwVyrCT/Xsf0UQUmnULkPHjvuiNehb+bG4J3fz7hF94glBdQ8vxTuMmnHfJEYTQRLwCsWMBEC1wekw13O8J/0opFNG5neduns3Z1/rD5VSlBwgc8/4lomEp0fadIvzLeS7f5mxeXAD5Z9KBmc09uCjxVoF9Qsk1r901B0c0RMxIbJWyW9ZhDIVr/aEUN/tU0EXMKOA85sizg2moAigb8RZ1WCTh7utLGKpAyQegNk/unkksKMzZFkUCwHkrxlujwoe93wUS4rvZ+3nHMtLQdR+OfMeVs4/zvvQVq2f3bOXgkxgvhq6Bop0RK0xyEJffa5hUFbGNKhIFkLFLn1Ok28t2q7NOFPr0H2egHlkwgPztyhYMYb9C5PW4zd9buI0LS5452C4jXH5raBMfx844wTzaBbN689AKiYb84Qesqczss/o7eC7V48kh823dlZ/s2//gtp1ceqdAtNvp4dy7X/ECA/vNlpYisrtkR/CsFpJjGoTvS37leVMpmc5bn39dkoa5ZLliu7CaFRefbavcWWEImVStll9FBQ6+Ck9+41gczl9Rr7eGIV9ZZ/fmdkLNgxIpoAhZRee/dZD+/0gUExHxXXn10MqPuNVytVPiuU=

441
ChangeLog
View file

@ -1,389 +1,4 @@
# Version 2.1.8
- Dependencies have been updated, notably the QUIC implementation,
which could be vulnerable to denial-of-service attacks.
- In forwarding rules, the target can now optionally include a
non-standard DNS port number. The port number is also now optional when
using IPv6.
- An annoying log message related to permissions on Windows has been
suppressed.
- Resolver IP addresses can now be refreshed more frequently.
Additionally, jitter has been introduced to prevent all resolvers from
being refreshed simultaneously. Further changes have been implemented
to mitigate issues arising from multiple concurrent attempts to resolve
a resolver's IP address.
- An empty value for "tls_cipher_suite" is now equivalent to leaving
the property undefined. Previously, it disabled all TLS cipher suites,
which had little practical justification.
- In forwarding rules, an optional `*.` prefix is now accepted.
# Version 2.1.7
- This version reintroduces support for XSalsa20 enryption in DNSCrypt,
which was removed in 2.1.6. Unfortunately, a bunch of servers still
only support that encryption system.
- A check for lying resolvers was added for DNSCrypt, similar to the one
that was already present for DoH and ODoH.
- Binary packages for Windows/ARM are now available, albeit not in MSI
format yet.
# Version 2.1.6
- Forwarding: in the list of servers for a zone, the `$BOOTSTRAP` keyword
can be included as a shortcut to forward to the bootstrap servers.
And the `$DHCP` keyword can be included to forward to the DNS resolvers
provided by the local DHCP server. Based on work by YX Hao, thanks!
DHCP forwarding should be considered experimental and may not work on all
operating systems. A rule for a zone can mix and match multiple forwarder
types, such as `10.0.0.1,10.0.0.254,$DHCP,192.168.1.1,$BOOTSTRAP`.
Note that this is not implemented for captive portals yet.
- Lying resolvers are now skipped, instead of just printing an error.
This doesn't apply to captive portal and forwarding entries, which are the
only reasonable use case for lying resolvers.
- Support for XSalsa20 in DNSCrypt has been removed. This was not documented,
and was supserseded by XChaCha20 in 2016.
- Source files are now fetched with compression.
- DNS64: compatibility has been improved.
- Forwarding: the root domain (`.`) can now be forwarded.
- The ARC caching algorithm has been replaced by the SIEVE algorithm.
- Properties of multiple servers are now updated simultaneously.
The concurrency level can be adjusted with the new `cert_refresh_concurrency`
setting. Contributed by YX Hao.
- MSI packages for DNSCrypt can now easily be built.
- New command-line flag: `-include-relays` to include relays in `-list` and
`-list-all`.
- Support for DNS extended error codes has been added.
- Documentation updates, bug fixes, dependency updates.
# Version 2.1.5
- dnscrypt-proxy can be compiled with Go 1.21.0+
- Responses to blocked queries now include extended error codes
- Reliability of connections using HTTP/3 has been improved
- New configuration directive: `tls_key_log_file`. When defined, this
is the path to a file where TLS secret keys will be written to, so
that DoH traffic can be locally inspected.
# Version 2.1.4
- Fixes a regression from version 2.1.3: when cloaking was enabled,
blocked responses were returned for records that were not A/AAAA/PTR
even for names that were not in the cloaked list.
# Version 2.1.3
- DNS-over-HTTP/3 (QUIC) should be more reliable. In particular,
version 2.1.2 required another (non-QUIC) resolver to be present for
bootstrapping, or the resolver's IP address to be present in the
stamp. This is not the case any more.
- dnscrypt-proxy is now compatible with Go 1.20+
- Commands (-check, -show-certs, -list, -list-all) now ignore log
files and directly output the result to the standard output.
- The `cert_ignore_timestamp` configuration switch is now documented.
It allows ignoring timestamps for DNSCrypt certificate verification,
until a first server is available. This should only be used on devices
that don't have any ways to set the clock before DNS service is up.
However, a safer alternative remains to use an NTP server with a fixed
IP address (such as time.google.com), configured in the captive portals
file.
- Cloaking: when a name is cloaked, unsupported record types now
return a blocked response rather than the actual records.
- systemd: report Ready earlier as dnscrypt-proxy can itself manage
retries for updates/refreshes.
# Version 2.1.2
- Support for DoH over HTTP/3 (DoH3, HTTP over QUIC) has been added.
Compatible servers will automatically use it. Note that QUIC uses UDP
(usually over port 443, like DNSCrypt) instead of TCP.
- In previous versions, memory usage kept growing due to channels not
being properly closed, causing goroutines to pile up. This was fixed,
resulting in an important reduction of memory usage. Thanks to
@lifenjoiner for investigating and fixing this!
- DNS64: `CNAME` records are now translated like other responses.
Thanks to @ignoramous for this!
- A relay whose name has been configured, but doesn't exist in the
list of available relays is now a hard error. Thanks to @lifenjoiner!
- Mutexes/locking: bug fixes and improvements, by @ignoramous
- Official packages now include linux/riscv64 builds.
- `dnscrypt-proxy -resolve` now reports if ECS (EDNS-clientsubnet) is
supported by the server.
- `dnscrypt-proxy -list` now includes ODoH (Oblivious DoH) servers.
- Local DoH: queries made using the `GET` method are now handled.
- The service can now be installed on OpenRC-based systems.
- `PTR` queries are now supported for cloaked domains. Contributed by
Ian Bashford, thanks!
# Version 2.1.1
This is a bugfix only release, addressing regressions introduced in
version 2.1.0:
- When using DoH, cached responses were not served any more when
experiencing connectivity issues. This has been fixed.
- Time attributes in allow/block lists were ignored. This has been
fixed.
- The TTL as served to clients is now rounded and starts decreasing
before the first query is received.
- Time-based rules are properly handled again in
generate-domains-blocklist.
- DoH/ODoH: entries with an IP address and using a non-standard port
used to require help from a bootstrap resolver. This is not the case
any more.
# Version 2.1.0
- `dnscrypt-proxy` now includes support for Oblivious DoH.
- If the proxy is overloaded, cached and synthetic queries now keep being
served, while non-cached queries are delayed.
- A deprecation warning was added for `fallback_resolvers`.
- Source URLs are now randomized.
- On some platforms, redirecting the application log to a file was not
compatible with user switching; this has been fixed.
- `fallback_resolvers` was renamed to `bootstrap_resolvers` for
clarity. Please update your configuration file accordingly.
# Version 2.0.45
- Configuration changes (to be required in versions 2.1.x):
* `[blacklist]` has been renamed to `[blocked_names]`
* `[ip_blacklist]` has been renamed to `[blocked_ips]`
* `[whitelist]` has been renamed to `[allowed_names]`
* `generate-domains-blacklist.py` has been renamed to
`generate-domains-blocklist.py`, and the configuration files have been
renamed as well.
- `dnscrypt-proxy -resolve` has been completely revamped, and now requires
the configuration file to be accessible. It will send a query to an IP address
of the `dnscrypt-proxy` server by default. Sending queries to arbitrary
servers is also supported with the new `-resolve name,address` syntax.
- Relay lists can be set to `*` for automatic relay selection. When a wildcard
is used, either for the list of servers or relays, the proxy ensures that
relays and servers are on distinct networks.
- Lying resolvers are detected and reported.
- New return code: `NOT_READY` for queries received before the proxy has
been initialized.
- Server lists can't be older than a week any more, even if directory
permissions are incorrect and cache files cannot be written.
- macOS/arm64 is now officially supported.
- New feature: `allowed_ips`, to configure a set of IP addresses to
never block no matter what DNS name resolves to them.
- Hard-coded IP addresses can be immediately returned for test queries
sent by operating systems in order to check for connectivity and captive
portals. Such responses can be sent even before an interface is considered
as enabled by the operating system. This can be configured in a new section
called `[captive_portals]`.
- On Linux, OpenBSD and FreeBSD, `listen_addresses` can now include IP
addresses that haven't been assigned to an interface yet.
- The logo has been tweaked to look fine on a dark background.
- `generate-domains-blocklist.py`: regular expressions are now ignored in
time-based entries.
- Minor bug fixes and logging improvements.
- Cloaking plugin: if an entry has multiple IP addresses for a type,
all the IP addresses are now returned instead of a random one.
- Static entries can now include DNSCrypt relays.
- Name blocking: aliases relying on `SVCB` and `HTTPS` records can now
be blocked in addition to aliases via regular `CNAME` records.
- EDNS-Client-Subnet information can be added to outgoing queries.
Instead of sending the actual client IP, ECS information is user
configurable, and IP addresses will be randomly chosen for every query.
- Initial DoH queries are now checked using random names in order to
properly measure CDNs such as Tencent that ignore the padding.
- DoH: the `max-stale` cache control directive is now present in queries.
- Logs can now be sent to `/dev/stdout` instead of actual files.
- User switching is now supported on macOS.
- New download mirror (https://download.dnscrypt.net) for resolvers,
relays and parental-control.
Thanks to the nice people who contributed to this release:
- Ian Bashford
- Will Elwood
- Alison Winters
- Krish De Souza
- @hugepants
- @IceCodeNew
- @lifenjoiner
- @mibere
- @jacob755
- @petercooperjr
- @yofiji
# Version 2.0.44
- More updates to the set of block lists, thanks again to IceCodeNew.
- Netprobes and listening sockets are now ignored when the `-list`,
`-list-all`, `-show-certs` or `-check` command-line switches are used.
- `tls_client_auth` was renamed to `doh_client_x509_auth`. A section
with the previous name is temporarily ignored if empty, but will error
out if not.
- Unit tests are now working on 32-bit systems. Thanks to Will Elwood
and @lifenjoiner.
# Version 2.0.43
- Built-in support for DNS64 translation has been implemented.
(Contributed by Sergey Smirnov, thanks!)
- Connections to DoH servers can be authenticated using TLS client
certificates (Contributed by Kevin O'Sullivan, thanks!)
- Multiple stamps are now allowed for a single server in resolvers
and relays lists.
- Android: the time zone for log files is now set to the system
time zone.
- Quite a lot of updates and additions have been made to the
example domain block lists. Thanks to `IceCodeNew`!
- Cached configuration files can now be temporarily used if
they are out of date, but bootstraping is impossible. Contributed by
`lifenjoiner`, thanks!
- Precompiled macOS binaries are now notarized.
- `generate-domains-blacklists` now tries to deduplicate entries
clobbered by wildcard rules. Thanks to `Huhni`!
- `generate-domains-blacklists` can now directly write lists to a
file with the `-o` command-line option.
- cache files are now downloaded as the user the daemon will be running
as. This fixes permission issues at startup time.
- Forwarded queries are now subject to global timeouts, and can be
forced to use TCP.
- The `ct` parameter has been removed from DoH queries, as Google doesn't
require it any more.
- Service installation is now supported on FreeBSD.
- When stored into a file, service logs now only contain data from the most
recent launch. This can be changed with the new `log_file_latest` option.
- Breaking change: the `tls_client_auth` section was renamed to
`doh_client_x509_auth`. If you had a tls_client_auth section in the
configuration file, it needs to be updated.
# Version 2.0.42
- The current versions of the `dnsdist` load balancer (presumably used
by quad9, cleanbrowsing, qualityology, freetsa.org, ffmuc.net,
opennic-bongobow, sth-dnscrypt-se, ams-dnscrypt-nl and more)
is preventing queries over 1500 bytes from being received over UDP.
Temporary workarounds have been introduced to improve reliability
with these resolvers for regular DNSCrypt. Unfortunately, anonymized
DNS cannot be reliable until the issue is fixed server-side. `dnsdist`
authors are aware of it and are working on a fix.
- New option in the `[anonymized_dns]` section: `skip_incompatible`,
to ignore resolvers incompatible with Anonymized DNS instead of
using them without a relay.
- The server latency benchmark is faster while being able to perform
more retries if necessary.
- Continuous integration has been moved to GitHub Actions.
# Version 2.0.41
- Precompiled ARM binaries are compatible with ARMv5 CPUs. The
default arm builds were not compatible with older CPUs when compiled
with Go 1.14. mips64 binaries are explicitly compiled with `softfloat`
to improve compatibility.
- Quad9 seems to be only blocking fragmented queries over UDP for
some networks. They have been removed from the default list of broken
resolvers; runtime detection of support for fragments should now do
the job.
- Runtime detection of support for fragments was actually enabled.
# Version 2.0.40
- Servers blocking fragmented queries are now automatically detected.
- The server name is now only present in query logs when an actual
upstream servers was required to resolve a query.
- TLS client authentication has been added for DoH.
- The Firefox plugin is now skipped for connections coming from the
local DoH server.
- DoH RTT computation is now more accurate, especially when CDNs are
in the middle.
- The forwarding plugin is now more reliable, and handles retries over
TCP.
# Version 2.0.39
- The Firefox Local DoH service didn't properly work in version 2.0.38;
this has been fixed. Thanks to Simon Brand for the report!
# Version 2.0.38
- Entries from lists (forwarding, blacklists, whitelists) now support
inline comments.
- Reliability improvement: queries over UDP are retried after a timeout
instead of solely relying on the client.
- Reliability improvement: during temporary network outages, cached records
are now served even if they are stale.
- Bug fix: SOCKS proxies and DNS relays can be combined.
- New feature: multiple fallback resolvers are now supported (see the
new `fallback_resolvers` option. Note that `fallback_resolver` is
still supported for backward compatibility).
- Windows: the service can be installed with a configuration file
stored separately from the application.
- Security (affecting DoH): precompiled binaries of dnscrypt-proxy 2.0.37 are
built using Go 1.13.7 that fixes a TLS certificate parsing issue present in
previous versions of the compiler.
# Version 2.0.36
- New option: `block_undelegated`. When enabled, `dnscrypt-proxy` will
directly respond to queries for locally-served zones (https://sk.tl/2QqB971U)
and nonexistent zones that should have been kept local, but are frequently
leaked. This reduces latency and improves privacy.
- Conformance: the `DO` bit is now set in synthetic responses if it was
set in a question, and the `AD` bit is cleared.
- The `miegkg/dns` module was updated to version 1.1.26, that fixes a
security issue affecting non-encrypted/non-authenticated DNS traffic. In
`dnscrypt-proxy`, this only affects the forwarding feature.
# Version 2.0.35
- New option: `block_unqualified` to block `A`/`AAAA` queries with
unqualified host names. These will very rarely get an answer from upstream
resolvers, but can leak private information to these, as well as to root
servers.
- When a `CNAME` pointer is blocked, the original query name is now logged
along with the pointer. This makes it easier to know what the original
query name, so it can be whitelisted, or what the pointer was, so it
can be removed from the blacklist.
# Version 2.0.34
- Blacklisted names are now also blocked if they appear in `CNAME`
pointers.
- `dnscrypt-proxy` can now act as a local DoH *server*. Firefox can
be configured to use it, so that ESNI can be enabled without bypassing
your DNS proxy.
# Version 2.0.33
- Fixes an issue that caused some valid queries to return `PARSE_ERROR`.
# Version 2.0.32
- On certificate errors, the server name is now logged instead of the
provider name, which is generally more useful.
- IP addresses for DoH servers that require DNS lookups are now cached
for at least 12 hours.
- `ignore_system_dns` is now set to `true` by default.
- A workaround for a bug in Cisco servers has been implemented.
- A corrupted or incomplete resolvers list is now ignored, keeping the
last good known cached list until the next update. In addition, logging was
improved and unit tests were also added. Awesome contribution from William
Elwood, thanks!
- On Windows, the network probe immediately returned instead of blocking
if `netprobe_timeout` was set to `-1`. This has been fixed.
- Expired cached IP addresses now have a grace period, to avoid breaking the
service if they temporarily can't be refreshed.
- On Windows, the service now returns immediately, solving a long-standing
issue when initialization took more than 30 seconds ("The service did not
respond to the start or control request in a timely fashion"). Fantastic
work by Alison Winters, thanks!
- The `SERVER_ERROR` error code has been split into two new error codes:
`NETWORK_ERROR` (self-explanatory) and `SERVFAIL` (a response was returned,
but it includes a `SERVFAIL` error code).
- Responses are now always compressed.
# Version 2.0.31
- This version fixes two regressions introduced in version 2.0.29:
DoH server couldn't be reached over IPv6 any more, and the proxy
couldn't be interrupted while servers were being benchmarked.
# Version 2.0.30
- This version fixes a startup issue introduced in version 2.0.29,
on systems for which the service cannot be automatically installed
(such as OpenBSD and FreeBSD). Reported by @5ch17 and Vinícius Zavam,
and fixed by Will Elwood, thanks!
# Version 2.0.29
- Support for Anonymized DNS has been added!
- Wait before stopping, fixing an issue with Unbound (thanks to
Vladimir Bauer)
- DNS stamps are now included in the -list-all -json ouptut
- The netprobe_timeout setting from the configuration file or
command-line was ignored. This has been fixed.
- The TTL or cloaked entries can now be adjusted (thanks to Markus
Linnala)
- Cached IP address from DoH servers now expire (thanks to Markus
Linnala)
- DNSCrypt certificates can be fetched over Tor and SOCKS proxies
- Retries over TCP are faster
- Improved logging (thanks to Alison Winters)
- Ignore non-TXT records in certificate responses (thanks to Vladimir
Bauer)
- A lot of internal cleanups, thanks to Markus Linnala.
# Version 2.0.28
* Version 2.0.28
- Invalid server entries are now skipped instead of preventing a
source from being used. Thanks to Alison Winters for the contribution!
- Truncated responses are immediately retried over TCP instead of
@ -396,14 +11,14 @@ bytes. This also reduces latency.
and cloaked responses. And the forwarder is logged instead of the
regular server for forwarded responses.
# Version 2.0.27
* Version 2.0.27
- The X25519 implementation was changed from using the Go standard
implementation to using Cloudflare's CIRCL library. Unfortunately,
CIRCL appears to be broken on big-endian systems. That change has been
reverted.
- All the dependencies have been updated.
# Version 2.0.26
* Version 2.0.26
- A new plugin was added to prevent Firefox from bypassing the system
DNS settings.
- New configuration parameter to set how to respond to blocked
@ -423,12 +38,12 @@ bootstrapped.
- A new option, `query_meta`, is now available to add optional records
to client queries.
# Version 2.0.25
* Version 2.0.25
- The example IP address for network probes didn't work on Windows.
The example configuration file has been updated and the fallback
resolver IP is now used when no netprobe address has been configured.
# Version 2.0.24
* Version 2.0.24
- The query log now includes the time it took to complete the
transaction, the name of the resolver that sent the response and if
the response was served from the cache. Thanks to Ferdinand Holzer for
@ -450,7 +65,7 @@ Thanks to @inkblotadmirer for the report.
- Resolvers are now tried in random order to avoid favoring the first
ones at startup.
# Version 2.0.23
* Version 2.0.23
- Binaries for FreeBSD/armv7 are now available.
- .onion servers are now automatically ignored if Tor routing is not
enabled.
@ -459,15 +74,15 @@ using proxies.
- DNSCrypt communications are now automatically forced to using TCP
when a SOCKS proxy has been set up.
# Version 2.0.22
* Version 2.0.22
- The previous version had issues with the .org TLD when used in
conjunction with dnsmasq. This has been fixed.
# Version 2.0.21
* Version 2.0.21
- The change to run the Windows service as `NT AUTHORITY\NetworkService`
has been reverted, as it was reported to break logging (Windows only).
# Version 2.0.20
* Version 2.0.20
- Startup is now *way* faster, especially when using DoH servers.
- A new action: `CLOAK` is logged when queries are being cloaked.
- A cloaking rule can now map to multiple IPv4 and IPv6 addresses,
@ -481,7 +96,7 @@ generate-domains-blacklist.py script.
script.
- The Windows service is now installed as `NT AUTHORITY\NetworkService`.
# Version 2.0.19
* Version 2.0.19
- The value for `netprobe_timeout` was read from the command-line, but
not from the configuration file any more. This is a regression introduced
in the previous version, that has been fixed.
@ -490,7 +105,7 @@ in the previous version, that has been fixed.
queries with the POST method in order to work around badly configured
proxies.
# Version 2.0.18
* Version 2.0.18
- Official builds now support TLS 1.3.
- The timeout for the initial connectivity check can now be set from
the command line.
@ -499,7 +114,7 @@ the command line.
- In addition to SOCKS, HTTP and HTTPS proxies are now supported for
DoH servers.
# Version 2.0.17
* Version 2.0.17
- Go >= 1.11 is now supported
- The flipside is that Windows XP is not supported any more :(
- When dropping privileges, there is no supervisor process any more.
@ -508,7 +123,7 @@ of flags and payload sizes. This is not the case any more.
- DoH queries are smaller, since workarounds are not required any more
after Google updated their implementation.
# Version 2.0.16
* Version 2.0.16
- On Unix-like systems, the server can run as an unprivileged user,
and the main process will automatically restart if an error occurs.
- pledge() on OpenBSD.
@ -520,7 +135,7 @@ cloaking module for local development.
- The proxy doesn't quit any more if new TCP connections cannot be
created.
# Version 2.0.15
* Version 2.0.15
- Support for proxies (HTTP/SOCKS) was added. All it takes to route
all TCP queries to Tor is add `proxy = "socks5://127.0.0.1:9050"` to
the configuration file.
@ -529,16 +144,16 @@ transaction.
- Pre-built binaries for Linux are statically linked on all
architectures.
# Version 2.0.14
* Version 2.0.14
- Supports DNS-over-HTTPS draft 08.
- Netprobes don't use port 0 by default, as this causes issues with
Little Snitch and FreeBSD.
# Version 2.0.13
* Version 2.0.13
- This version fixes a crash when using DoH for queries whose size
were a multiple of the block size. Reported by @char101, thanks!
# Version 2.0.12
* Version 2.0.12
- Further compatibility fixes for Alpine Linux/i386 and Android/i386
have been made. Thanks to @aead for his help!
- The proxy will now wait for network connectivity before starting.
@ -547,7 +162,7 @@ before the network is fully configured.
- The IPv6 blocking module now returns synthetic SOA records to
improve compatibility with downstream resolvers and stub resolvers.
# Version 2.0.11
* Version 2.0.11
- This release fixes a long-standing bug that caused the proxy to
block or crash when Position-Independent Executables were produced.
This bug only showed up when compiled on (not for) Alpine Linux and
@ -555,13 +170,13 @@ Android, for some CPU architectures.
- New configuration settings: cache_neg_min_ttl and
cache_neg_max_ttl, to clamp the negative caching TTL.
# Version 2.0.10
* Version 2.0.10
- This version fixes a crash when an incomplete size is sent by a
local client for a query over TCP.
- Slight performance improvement of DNSCrypt on non-Intel CPUs such
as Raspberry Pi.
# Version 2.0.9
* Version 2.0.9
- Whitelists have been implemented: one a name matches a pattern in
the whitelist, rules from the name-based and IP-based blacklists will
be bypassed. Whitelists support the same patterns as blacklists, as
@ -587,7 +202,7 @@ especially on Mips and ARM systems.
- The ephemeral keys mode of dnscrypt-proxy v1.x was reimplemented: this
creates a new unique key for every single query.
# Version 2.0.8
* Version 2.0.8
- Multiple URLs can be defined for a source in order to improve
resiliency when servers are temporarily unreachable.
- Connections over IPv6 will be preferred over IPv4 for DoH servers
@ -601,41 +216,41 @@ Android/x86.
- `dnscrypt-proxy -list -json` and `-list-all -json` now include the
remove servers names and IP addresses.
# Version 2.0.7
* Version 2.0.7
- Bug fix: optional ports were not properly parsed with IPv6
addresses -- thanks to @bleeee for the report and fix.
- Bug fix: truncate TCP queries to the prefixed length.
- Certificates are force-refreshed after a time jump (e.g. when a
system resumes from hibernation).
# Version 2.0.6
* Version 2.0.6
- Automatic log files rotation was finally implemented.
- A new -pidfile command-line option to write the PID file was added.
# Version 2.0.5
* Version 2.0.5
- Fixes a crash occasionally happening when using DoH servers, with
stamps not containing any IP addresses, a DNSSEC-signed name, a
non-working system DNS configuration, and a fallback server supporting
DNSSEC.
# Version 2.0.4
* Version 2.0.4
- Fixes a regression with truncated packets. Thanks to @mazesy and
@the-w1nd for spotting a case triggering this!
# Version 2.0.3
* Version 2.0.3
- Load balancing: resolvers that respond promptly, but with bogus
responses are now gradually removed from the preferred pool.
- Due to popular request, Android binaries are now available! Thanks
to @sporif for his help on getting these built.
- Binaries are built using Go 1.10-final.
# Version 2.0.2
* Version 2.0.2
- Properly error out on FreeBSD and other platforms where built-in
service installation is not supported yet.
- Improved load-balancing algorithm, which should result in lower
latency.
# Version 2.0.1
* Version 2.0.1
- Cached source data were not redownloaded if the proxy was used
without interruption. This has been fixed.
- If the network is down at startup time, fall back to cached source

33
LICENSE
View file

@ -1,15 +1,18 @@
ISC License
Copyright (c) 2018-2025, Frank Denis <j at pureftpd dot org>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
/*
* ISC License
*
* Copyright (c) 2018
* Frank Denis <j at pureftpd dot org>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/

View file

@ -1,16 +1,13 @@
# ![dnscrypt-proxy 2](https://raw.github.com/dnscrypt/dnscrypt-proxy/master/logo.png?3)
[![Financial Contributors on Open Collective](https://opencollective.com/dnscrypt/all/badge.svg?label=financial+contributors)](https://opencollective.com/dnscrypt)
[![DNSCrypt-Proxy Release](https://img.shields.io/github/release/dnscrypt/dnscrypt-proxy.svg?label=Latest%20Release&style=popout)](https://github.com/dnscrypt/dnscrypt-proxy/releases/latest)
[![Build Status](https://github.com/DNSCrypt/dnscrypt-proxy/actions/workflows/releases.yml/badge.svg)](https://github.com/DNSCrypt/dnscrypt-proxy/actions/workflows/releases.yml)
[![Financial Contributors on Open Collective](https://opencollective.com/dnscrypt/all/badge.svg?label=financial+contributors)](https://opencollective.com/dnscrypt) [![DNSCrypt-Proxy Release](https://img.shields.io/github/release/dnscrypt/dnscrypt-proxy.svg?label=Latest%20Release&style=popout)](https://github.com/dnscrypt/dnscrypt-proxy/releases/latest) [![Build Status](https://travis-ci.org/dnscrypt/dnscrypt-proxy.svg?branch=master)](https://travis-ci.org/dnscrypt/dnscrypt-proxy?branch=master) [![#dnscrypt-proxy:matrix.org](https://img.shields.io/matrix/dnscrypt-proxy:matrix.org.svg?label=DNSCrypt-Proxy%20Matrix%20Chat&server_fqdn=matrix.org&style=popout)](https://matrix.to/#/#dnscrypt-proxy:matrix.org)
## Overview
A flexible DNS proxy, with support for modern encrypted DNS protocols such as [DNSCrypt v2](https://dnscrypt.info/protocol), [DNS-over-HTTPS](https://www.rfc-editor.org/rfc/rfc8484.txt), [Anonymized DNSCrypt](https://github.com/DNSCrypt/dnscrypt-protocol/blob/master/ANONYMIZED-DNSCRYPT.txt) and [ODoH (Oblivious DoH)](https://github.com/DNSCrypt/dnscrypt-resolvers/blob/master/v3/odoh-servers.md).
A flexible DNS proxy, with support for modern encrypted DNS protocols such as [DNSCrypt v2](https://dnscrypt.info/protocol) and [DNS-over-HTTPS](https://www.rfc-editor.org/rfc/rfc8484.txt).
* **[dnscrypt-proxy documentation](https://dnscrypt.info/doc) ← Start here**
* [dnscrypt-proxy documentation](https://dnscrypt.info/doc) – This project's documentation (Wiki)
* [DNSCrypt project home page](https://dnscrypt.info/)
* [Discussions](https://github.com/DNSCrypt/dnscrypt-proxy/discussions)
* [DNS-over-HTTPS and DNSCrypt resolvers](https://dnscrypt.info/public-servers)
* [Server and client implementations](https://dnscrypt.info/implementations)
* [DNS stamps](https://dnscrypt.info/stamps)
@ -18,12 +15,12 @@ A flexible DNS proxy, with support for modern encrypted DNS protocols such as [D
## [Download the latest release](https://github.com/dnscrypt/dnscrypt-proxy/releases/latest)
Available as source code and pre-built binaries for most operating systems and architectures (see below).
Available as source code and pre-built binaries for most operating
systems and architectures (see below).
## Features
* DNS traffic encryption and authentication. Supports DNS-over-HTTPS (DoH) using TLS 1.3 and QUIC, DNSCrypt, Anonymized DNS and ODoH
* Client IP addresses can be hidden using Tor, SOCKS proxies or Anonymized DNS relays
* DNS traffic encryption and authentication. Supports DNS-over-HTTPS (DoH) using TLS 1.3, and DNSCrypt.
* DNS query monitoring, with separate log files for regular and suspicious queries
* Filtering: block ads, malware, and other unwanted content. Compatible with all DNS services
* Time-based filtering, with a flexible weekly schedule
@ -31,11 +28,11 @@ Available as source code and pre-built binaries for most operating systems and a
* DNS caching, to reduce latency and improve privacy
* Local IPv6 blocking to reduce latency on IPv4-only networks
* Load balancing: pick a set of resolvers, dnscrypt-proxy will automatically measure and keep track of their speed, and balance the traffic across the fastest available ones.
* Cloaking: like a `HOSTS` file on steroids, that can return preconfigured addresses for specific names, or resolve and return the IP address of other names. This can be used for local development as well as to enforce safe search results on Google, Yahoo, DuckDuckGo and Bing
* Cloaking: like a `HOSTS` file on steroids, that can return preconfigured addresses for specific names, or resolve and return the IP address of other names. This can be used for local development as well as to enforce safe search results on Google, Yahoo and Bing.
* Automatic background updates of resolvers lists
* Can force outgoing connections to use TCP
* Supports SOCKS proxies
* Compatible with DNSSEC
* Includes a local DoH server in order to support ECH (ESNI)
## Pre-built binaries
@ -57,15 +54,13 @@ Up-to-date, pre-built binaries are available for:
* Linux/mips64le
* Linux/x86
* Linux/x86_64
* macOS/arm64
* macOS/x86_64
* MacOS X
* NetBSD/x86
* NetBSD/x86_64
* OpenBSD/x86
* OpenBSD/x86_64
* Windows
* Windows 64 bit
* Windows ARM
How to use these files, as well as how to verify their signatures, are documented in the [installation instructions](https://github.com/dnscrypt/dnscrypt-proxy/wiki/installation).
@ -73,7 +68,7 @@ How to use these files, as well as how to verify their signatures, are documente
### Code Contributors
This project exists thanks to all the people who contribute.
This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)].
<a href="https://github.com/dnscrypt/dnscrypt-proxy/graphs/contributors"><img src="https://opencollective.com/dnscrypt/contributors.svg?width=890&button=false" /></a>
### Financial Contributors

View file

@ -1,21 +0,0 @@
FROM ubuntu:latest
MAINTAINER dnscrypt-authors
RUN apt-get update && \
apt-get install -y wget wine dotnet-sdk-6.0 && \
dpkg --add-architecture i386 && apt-get update && apt-get install -y wine32
ENV WINEPREFIX=/root/.wine32 WINEARCH=win32 WINEDEBUG=-all
RUN wget https://dl.winehq.org/wine/wine-mono/8.1.0/wine-mono-8.1.0-x86.msi && \
WINEPREFIX="$HOME/.wine32" WINEARCH=win32 wineboot --init && \
WINEPREFIX="$HOME/.wine32" WINEARCH=win32 wine msiexec /i wine-mono-8.1.0-x86.msi && \
mkdir $WINEPREFIX/drive_c/temp && \
apt-get install -y unzip && \
wget https://github.com/wixtoolset/wix3/releases/download/wix3112rtm/wix311-binaries.zip -nv -O wix.zip && \
unzip wix.zip -d /wix && \
rm -f wix.zip
WORKDIR /wix

View file

@ -1,13 +0,0 @@
# Scripts and utilities related to building an .msi (Microsoft Standard Installer) file.
## Docker test image for building an MSI locally
```sh
docker build . -f Dockerfile -t ubuntu:dnscrypt-msi
```
## Test building msi files for intel win32 & win64
```sh
./build.sh
```

View file

@ -1,30 +0,0 @@
#! /bin/sh
version=0.0.0
gitver=$(git describe --tags --always --match="[0-9]*.[0-9]*.[0-9]*" --exclude='*[^0-9.]*')
if [ "$gitver" != "" ]; then
version=$gitver
fi
# build the image by running: docker build . -f Dockerfile -t ubuntu:dnscrypt-msi
if [ "$(docker image list -q ubuntu:dnscrypt-msi)" = "" ]; then
docker build . -f Dockerfile -t ubuntu:dnscrypt-msi
fi
image=ubuntu:dnscrypt-msi
for arch in x64 x86; do
binpath="win32"
if [ "$arch" = "x64" ]; then
binpath="win64"
fi
src=$(
cd ../../dnscrypt-proxy/$binpath || exit
pwd
)
echo "$src"
docker run --rm -v "$(pwd)":/wixproj -v "$src":/src $image wine candle.exe -dVersion="$version" -dPlatform=$arch -dPath=\\src -arch $arch \\wixproj\\dnscrypt.wxs -out \\wixproj\\dnscrypt-$arch.wixobj
docker run --rm -v "$(pwd)":/wixproj -v "$src":/src $image wine light.exe -out \\wixproj\\dnscrypt-proxy-$arch-"$version".msi \\wixproj\\dnscrypt-$arch.wixobj -sval
done

View file

@ -1,60 +0,0 @@
<?xml version="1.0"?>
<?if $(var.Platform)="x64" ?>
<?define Program_Files="ProgramFiles64Folder"?>
<?else ?>
<?define Program_Files="ProgramFilesFolder"?>
<?endif ?>
<?ifndef var.Version?>
<?error Undefined Version variable?>
<?endif ?>
<?ifndef var.Path?>
<?error Undefined Path variable?>
<?endif ?>
<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
<Product Id="*"
UpgradeCode="fbf99dd8-c21e-4f9b-a632-de53bb64c45e"
Name="dnscrypt-proxy"
Version="$(var.Version)"
Manufacturer="DNSCrypt"
Language="1033">
<Package InstallerVersion="200" Compressed="yes" Comments="Windows Installer Package" InstallScope="perMachine" />
<Media Id="1" Cabinet="product.cab" EmbedCab="yes" />
<MajorUpgrade DowngradeErrorMessage="A later version of [ProductName] is already installed. Setup will now exit." />
<Upgrade Id="fbf99dd8-c21e-4f9b-a632-de53bb64c45e">
<UpgradeVersion Minimum="$(var.Version)" OnlyDetect="yes" Property="NEWERVERSIONDETECTED" />
<UpgradeVersion Minimum="2.1.0" Maximum="$(var.Version)" IncludeMinimum="yes" IncludeMaximum="no" Property="OLDERVERSIONBEINGUPGRADED" />
</Upgrade>
<Condition Message="A newer version of this software is already installed.">NOT NEWERVERSIONDETECTED</Condition>
<Directory Id="TARGETDIR" Name="SourceDir">
<Directory Id="$(var.Program_Files)">
<Directory Id="INSTALLDIR" Name="DNSCrypt">
<Component Id="ApplicationFiles" Guid="7d693c0b-71d8-436a-9c84-60a11dc74092">
<File Id="dnscryptproxy.exe" KeyPath="yes" Source="$(var.Path)\dnscrypt-proxy.exe" DiskId="1"/>
<File Source="$(var.Path)\LICENSE"></File>
<File Source="$(var.Path)\service-install.bat"></File>
<File Source="$(var.Path)\service-restart.bat"></File>
<File Source="$(var.Path)\service-uninstall.bat"></File>
<File Source="$(var.Path)\example-dnscrypt-proxy.toml"></File>
</Component>
<Component Id="ConfigInstall" Guid="db7b691e-f7c7-4c9a-92e1-c6f21ce6430f" KeyPath="yes">
<Condition><![CDATA[CONFIGFILE]]></Condition>
<CopyFile Id="dnscryptproxytoml" DestinationDirectory="INSTALLDIR" DestinationName="dnscrypt-proxy.toml" SourceProperty="CONFIGFILE">
</CopyFile>
<RemoveFile Id="RemoveConfig" Directory="INSTALLDIR" Name="dnscrypt-proxy.toml" On="uninstall" />
</Component>
</Directory>
</Directory>
</Directory>
<Feature Id="Complete" Level="1">
<ComponentRef Id="ApplicationFiles" />
<ComponentRef Id="ConfigInstall" />
</Feature>
</Product>
</Wix>

View file

@ -0,0 +1,12 @@
version = 1
test_patterns = [ ]
exclude_patterns = [ ]
[[analyzers]]
name = 'go'
enabled = true
[analyzers.meta]
import_path = 'github.com/dnscrypt/dnscrypt-proxy/dnscrypt-proxy'

View file

@ -1,209 +0,0 @@
package main
import (
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/jedisct1/dlog"
"github.com/miekg/dns"
)
type CaptivePortalEntryIPs []net.IP
type CaptivePortalMap map[string]CaptivePortalEntryIPs
type CaptivePortalHandler struct {
wg sync.WaitGroup
cancelChannel chan struct{}
}
func (captivePortalHandler *CaptivePortalHandler) Stop() {
close(captivePortalHandler.cancelChannel)
captivePortalHandler.wg.Wait()
}
func (ipsMap *CaptivePortalMap) GetEntry(msg *dns.Msg) (*dns.Question, *CaptivePortalEntryIPs) {
if len(msg.Question) != 1 {
return nil, nil
}
question := &msg.Question[0]
name, err := NormalizeQName(question.Name)
if err != nil {
return nil, nil
}
ips, ok := (*ipsMap)[name]
if !ok {
return nil, nil
}
if question.Qclass != dns.ClassINET {
return nil, nil
}
return question, &ips
}
func HandleCaptivePortalQuery(msg *dns.Msg, question *dns.Question, ips *CaptivePortalEntryIPs) *dns.Msg {
respMsg := EmptyResponseFromMessage(msg)
ttl := uint32(1)
if question.Qtype == dns.TypeA {
for _, xip := range *ips {
if ip := xip.To4(); ip != nil {
rr := new(dns.A)
rr.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: ttl}
rr.A = ip
respMsg.Answer = append(respMsg.Answer, rr)
}
}
} else if question.Qtype == dns.TypeAAAA {
for _, xip := range *ips {
if xip.To4() == nil {
if ip := xip.To16(); ip != nil {
rr := new(dns.AAAA)
rr.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: ttl}
rr.AAAA = ip
respMsg.Answer = append(respMsg.Answer, rr)
}
}
}
}
qType, ok := dns.TypeToString[question.Qtype]
if !ok {
qType = fmt.Sprint(question.Qtype)
}
dlog.Infof("Query for captive portal detection: [%v] (%v)", question.Name, qType)
return respMsg
}
func handleColdStartClient(clientPc *net.UDPConn, cancelChannel chan struct{}, ipsMap *CaptivePortalMap) bool {
buffer := make([]byte, MaxDNSPacketSize-1)
clientPc.SetDeadline(time.Now().Add(time.Duration(1) * time.Second))
length, clientAddr, err := clientPc.ReadFrom(buffer)
exit := false
select {
case <-cancelChannel:
exit = true
default:
}
if exit {
return true
}
if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
return false
}
if err != nil {
dlog.Warn(err)
return true
}
packet := buffer[:length]
msg := &dns.Msg{}
if err := msg.Unpack(packet); err != nil {
return false
}
question, ips := ipsMap.GetEntry(msg)
if ips == nil {
return false
}
respMsg := HandleCaptivePortalQuery(msg, question, ips)
if respMsg == nil {
return false
}
if response, err := respMsg.Pack(); err == nil {
clientPc.WriteTo(response, clientAddr)
}
return false
}
func addColdStartListener(
ipsMap *CaptivePortalMap,
listenAddrStr string,
captivePortalHandler *CaptivePortalHandler,
) error {
network := "udp"
isIPv4 := isDigit(listenAddrStr[0])
if isIPv4 {
network = "udp4"
}
listenUDPAddr, err := net.ResolveUDPAddr(network, listenAddrStr)
if err != nil {
return err
}
clientPc, err := net.ListenUDP(network, listenUDPAddr)
if err != nil {
return err
}
captivePortalHandler.wg.Add(1)
go func() {
for !handleColdStartClient(clientPc, captivePortalHandler.cancelChannel, ipsMap) {
}
clientPc.Close()
captivePortalHandler.wg.Done()
}()
return nil
}
func ColdStart(proxy *Proxy) (*CaptivePortalHandler, error) {
if len(proxy.captivePortalMapFile) == 0 {
return nil, nil
}
lines, err := ReadTextFile(proxy.captivePortalMapFile)
if err != nil {
dlog.Warn(err)
return nil, err
}
ipsMap := make(CaptivePortalMap)
for lineNo, line := range strings.Split(lines, "\n") {
line = TrimAndStripInlineComments(line)
if len(line) == 0 {
continue
}
name, ipsStr, ok := StringTwoFields(line)
if !ok {
return nil, fmt.Errorf(
"Syntax error for a captive portal rule at line %d",
1+lineNo,
)
}
name, err = NormalizeQName(name)
if err != nil {
continue
}
if strings.Contains(ipsStr, "*") {
return nil, fmt.Errorf(
"A captive portal rule must use an exact host name at line %d",
1+lineNo,
)
}
var ips []net.IP
for _, ip := range strings.Split(ipsStr, ",") {
ipStr := strings.TrimSpace(ip)
if ip := net.ParseIP(ipStr); ip != nil {
ips = append(ips, ip)
} else {
return nil, fmt.Errorf(
"Syntax error for a captive portal rule at line %d",
1+lineNo,
)
}
}
ipsMap[name] = ips
}
listenAddrStrs := proxy.listenAddresses
captivePortalHandler := CaptivePortalHandler{
cancelChannel: make(chan struct{}),
}
ok := false
for _, listenAddrStr := range listenAddrStrs {
err = addColdStartListener(&ipsMap, listenAddrStr, &captivePortalHandler)
if err == nil {
ok = true
}
}
if ok {
err = nil
}
proxy.captivePortalMap = &ipsMap
return &captivePortalHandler, err
}

View file

@ -4,6 +4,7 @@ import (
"bytes"
"encoding/binary"
"errors"
"io/ioutil"
"net"
"os"
"strconv"
@ -24,7 +25,7 @@ const (
)
const (
MaxHTTPBodyLength = 1000000
MaxHTTPBodyLength = 4000000
)
var (
@ -39,11 +40,7 @@ var (
var (
FileDescriptors = make([]*os.File, 0)
FileDescriptorNum = uintptr(0)
)
const (
InheritedDescriptorsBase = uintptr(50)
FileDescriptorNum = 0
)
func PrefixWithSize(packet []byte) ([]byte, error) {
@ -95,6 +92,20 @@ func Max(a, b int) int {
return b
}
func MinF(a, b float64) float64 {
if a < b {
return a
}
return b
}
func MaxF(a, b float64) float64 {
if a > b {
return a
}
return b
}
func StringReverse(s string) string {
r := []rune(s)
for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
@ -111,7 +122,7 @@ func StringTwoFields(str string) (string, string, bool) {
if pos == -1 {
return "", "", false
}
a, b := strings.TrimSpace(str[:pos]), strings.TrimSpace(str[pos+1:])
a, b := strings.TrimFunc(str[:pos], unicode.IsSpace), strings.TrimFunc(str[pos+1:], unicode.IsSpace)
if len(a) == 0 || len(b) == 0 {
return a, b, false
}
@ -132,16 +143,23 @@ func StringStripSpaces(str string) string {
}, str)
}
func TrimAndStripInlineComments(str string) string {
if idx := strings.LastIndexByte(str, '#'); idx >= 0 {
if idx == 0 || str[0] == '#' {
return ""
}
if prev := str[idx-1]; prev == ' ' || prev == '\t' {
str = str[:idx-1]
func ExtractPort(str string, defaultPort int) int {
port := defaultPort
if idx := strings.LastIndex(str, ":"); idx >= 0 && idx < len(str)-1 {
if portX, err := strconv.Atoi(str[idx+1:]); err == nil {
port = portX
}
}
return strings.TrimSpace(str)
return port
}
func ExtractHost(str string) string {
if idx := strings.LastIndex(str, ":"); idx >= 0 && idx < len(str)-1 {
if _, err := strconv.Atoi(str[idx+1:]); err == nil {
str = str[:idx]
}
}
return str
}
func ExtractHostAndPort(str string, defaultPort int) (host string, port int) {
@ -155,12 +173,10 @@ func ExtractHostAndPort(str string, defaultPort int) (host string, port int) {
}
func ReadTextFile(filename string) (string, error) {
bin, err := os.ReadFile(filename)
bin, err := ioutil.ReadFile(filename)
if err != nil {
return "", err
}
bin = bytes.TrimPrefix(bin, []byte{0xef, 0xbb, 0xbf})
return string(bin), nil
}
func isDigit(b byte) bool { return b >= '0' && b <= '9' }

File diff suppressed because it is too large Load diff

View file

@ -5,6 +5,7 @@ import (
crypto_rand "crypto/rand"
"crypto/sha512"
"errors"
"math/rand"
"github.com/jedisct1/dlog"
"github.com/jedisct1/xsecretbox"
@ -44,43 +45,22 @@ func unpad(packet []byte) ([]byte, error) {
}
}
func ComputeSharedKey(
cryptoConstruction CryptoConstruction,
secretKey *[32]byte,
serverPk *[32]byte,
providerName *string,
) (sharedKey [32]byte) {
func ComputeSharedKey(cryptoConstruction CryptoConstruction, secretKey *[32]byte, serverPk *[32]byte, providerName *string) (sharedKey [32]byte) {
if cryptoConstruction == XChacha20Poly1305 {
var err error
sharedKey, err = xsecretbox.SharedKey(*secretKey, *serverPk)
if err != nil {
dlog.Criticalf("[%v] Weak XChaCha20 public key", providerName)
dlog.Criticalf("[%v] Weak public key", providerName)
}
} else {
box.Precompute(&sharedKey, serverPk, secretKey)
c := byte(0)
for i := 0; i < 32; i++ {
c |= sharedKey[i]
}
if c == 0 {
dlog.Criticalf("[%v] Weak XSalsa20 public key", providerName)
if _, err := crypto_rand.Read(sharedKey[:]); err != nil {
dlog.Fatal(err)
}
}
}
return
}
func (proxy *Proxy) Encrypt(
serverInfo *ServerInfo,
packet []byte,
proto string,
) (sharedKey *[32]byte, encrypted []byte, clientNonce []byte, err error) {
func (proxy *Proxy) Encrypt(serverInfo *ServerInfo, packet []byte, proto string) (sharedKey *[32]byte, encrypted []byte, clientNonce []byte, err error) {
nonce, clientNonce := make([]byte, NonceSize), make([]byte, HalfNonceSize)
if _, err := crypto_rand.Read(clientNonce); err != nil {
return nil, nil, nil, err
}
crypto_rand.Read(clientNonce)
copy(nonce, clientNonce)
var publicKey *[PublicKeySize]byte
if proxy.ephemeralKeys {
@ -103,17 +83,10 @@ func (proxy *Proxy) Encrypt(
minQuestionSize = Max(proxy.questionSizeEstimator.MinQuestionSize(), minQuestionSize)
} else {
var xpad [1]byte
if _, err := crypto_rand.Read(xpad[:]); err != nil {
return nil, nil, nil, err
}
rand.Read(xpad[:])
minQuestionSize += int(xpad[0])
}
paddedLength := Min(MaxDNSUDPPacketSize, (Max(minQuestionSize, QueryOverhead)+1+63) & ^63)
if serverInfo.knownBugs.fragmentsBlocked && proto == "udp" {
paddedLength = MaxDNSUDPSafePacketSize
} else if serverInfo.Relay != nil && proto == "tcp" {
paddedLength = MaxDNSPacketSize
}
paddedLength := Min(MaxDNSUDPPacketSize, (Max(minQuestionSize, QueryOverhead)+63) & ^63)
if QueryOverhead+len(packet)+1 > paddedLength {
err = errors.New("Question too large; cannot be padded")
return
@ -131,12 +104,7 @@ func (proxy *Proxy) Encrypt(
return
}
func (proxy *Proxy) Decrypt(
serverInfo *ServerInfo,
sharedKey *[32]byte,
encrypted []byte,
nonce []byte,
) ([]byte, error) {
func (proxy *Proxy) Decrypt(serverInfo *ServerInfo, sharedKey *[32]byte, encrypted []byte, nonce []byte) ([]byte, error) {
serverMagicLen := len(ServerMagic)
responseHeaderLen := serverMagicLen + NonceSize
if len(encrypted) < responseHeaderLen+TagSize+int(MinDNSPacketSize) ||

View file

@ -20,121 +20,75 @@ type CertInfo struct {
ForwardSecurity bool
}
func FetchCurrentDNSCryptCert(
proxy *Proxy,
serverName *string,
proto string,
pk ed25519.PublicKey,
serverAddress string,
providerName string,
isNew bool,
relay *DNSCryptRelay,
knownBugs ServerBugs,
) (CertInfo, int, bool, error) {
func FetchCurrentDNSCryptCert(proxy *Proxy, serverName *string, proto string, pk ed25519.PublicKey, serverAddress string, providerName string, isNew bool) (CertInfo, int, error) {
if len(pk) != ed25519.PublicKeySize {
return CertInfo{}, 0, false, errors.New("Invalid public key length")
return CertInfo{}, 0, errors.New("Invalid public key length")
}
if !strings.HasSuffix(providerName, ".") {
providerName += "."
providerName = providerName + "."
}
if serverName == nil {
serverName = &providerName
}
query := dns.Msg{}
query := new(dns.Msg)
query.SetQuestion(providerName, dns.TypeTXT)
if !strings.HasPrefix(providerName, "2.dnscrypt-cert.") {
if relay != nil && !proxy.anonDirectCertFallback {
dlog.Warnf(
"[%v] uses a non-standard provider name, enable direct cert fallback to use with a relay ('%v' doesn't start with '2.dnscrypt-cert.')",
*serverName,
providerName,
)
} else {
dlog.Warnf("[%v] uses a non-standard provider name ('%v' doesn't start with '2.dnscrypt-cert.')", *serverName, providerName)
relay = nil
}
}
tryFragmentsSupport := true
if knownBugs.fragmentsBlocked {
tryFragmentsSupport = false
}
in, rtt, fragmentsBlocked, err := DNSExchange(
proxy,
proto,
&query,
serverAddress,
relay,
serverName,
tryFragmentsSupport,
)
client := dns.Client{Net: proto, UDPSize: uint16(MaxDNSUDPPacketSize)}
in, rtt, err := client.Exchange(query, serverAddress)
if err != nil {
dlog.Noticef("[%s] TIMEOUT", *serverName)
return CertInfo{}, 0, fragmentsBlocked, err
return CertInfo{}, 0, err
}
now := uint32(time.Now().Unix())
certInfo := CertInfo{CryptoConstruction: UndefinedConstruction}
highestSerial := uint32(0)
var certCountStr string
for _, answerRr := range in.Answer {
var txt string
if t, ok := answerRr.(*dns.TXT); !ok {
dlog.Noticef("[%v] Extra record of type [%v] found in certificate", *serverName, answerRr.Header().Rrtype)
binCert, err := packTxtString(strings.Join(answerRr.(*dns.TXT).Txt, ""))
if err != nil {
dlog.Warnf("[%v] Unable to unpack the certificate", providerName)
continue
} else {
txt = strings.Join(t.Txt, "")
}
binCert := PackTXTRR(txt)
if len(binCert) < 124 {
dlog.Warnf("[%v] Certificate too short", *serverName)
dlog.Warnf("[%v] Certificate too short", providerName)
continue
}
if !bytes.Equal(binCert[:4], CertMagic[:4]) {
dlog.Warnf("[%v] Invalid cert magic", *serverName)
dlog.Warnf("[%v] Invalid cert magic", providerName)
continue
}
cryptoConstruction := CryptoConstruction(0)
switch esVersion := binary.BigEndian.Uint16(binCert[4:6]); esVersion {
case 0x0001:
cryptoConstruction = XSalsa20Poly1305
dlog.Noticef("[%v] should upgrade to XChaCha20 for encryption", *serverName)
case 0x0002:
cryptoConstruction = XChacha20Poly1305
default:
dlog.Debugf("[%v] uses an unsupported encryption system", *serverName)
dlog.Noticef("[%v] Unsupported crypto construction", providerName)
continue
}
signature := binCert[8:72]
signed := binCert[72:]
if !ed25519.Verify(pk, signed, signature) {
dlog.Warnf("[%v] Incorrect signature for provider name: [%v]", *serverName, providerName)
dlog.Warnf("[%v] Incorrect signature", providerName)
continue
}
serial := binary.BigEndian.Uint32(binCert[112:116])
tsBegin := binary.BigEndian.Uint32(binCert[116:120])
tsEnd := binary.BigEndian.Uint32(binCert[120:124])
if tsBegin >= tsEnd {
dlog.Warnf("[%v] certificate ends before it starts (%v >= %v)", *serverName, tsBegin, tsEnd)
dlog.Warnf("[%v] certificate ends before it starts (%v >= %v)", providerName, tsBegin, tsEnd)
continue
}
ttl := tsEnd - tsBegin
if ttl > 86400*7 {
dlog.Infof(
"[%v] the key validity period for this server is excessively long (%d days), significantly reducing reliability and forward security.",
*serverName,
ttl/86400,
)
dlog.Infof("[%v] the key validity period for this server is excessively long (%d days), significantly reducing reliability and forward security.", providerName, ttl/86400)
daysLeft := (tsEnd - now) / 86400
if daysLeft < 1 {
dlog.Criticalf(
"[%v] certificate will expire today -- Switch to a different resolver as soon as possible",
*serverName,
)
dlog.Criticalf("[%v] certificate will expire today -- Switch to a different resolver as soon as possible", providerName)
} else if daysLeft <= 7 {
dlog.Warnf("[%v] certificate is about to expire -- if you don't manage this server, tell the server operator about it", *serverName)
dlog.Warnf("[%v] certificate is about to expire -- if you don't manage this server, tell the server operator about it", providerName)
} else if daysLeft <= 30 {
dlog.Infof("[%v] certificate will expire in %d days", *serverName, daysLeft)
} else {
dlog.Debugf("[%v] certificate still valid for %d days", *serverName, daysLeft)
dlog.Infof("[%v] certificate will expire in %d days", providerName, daysLeft)
}
certInfo.ForwardSecurity = false
} else {
@ -142,30 +96,24 @@ func FetchCurrentDNSCryptCert(
}
if !proxy.certIgnoreTimestamp {
if now > tsEnd || now < tsBegin {
dlog.Debugf(
"[%v] Certificate not valid at the current date (now: %v is not in [%v..%v])",
*serverName,
now,
tsBegin,
tsEnd,
)
dlog.Debugf("[%v] Certificate not valid at the current date (now: %v is not in [%v..%v])", providerName, now, tsBegin, tsEnd)
continue
}
}
if serial < highestSerial {
dlog.Debugf("[%v] Superseded by a previous certificate", *serverName)
dlog.Debugf("[%v] Superseded by a previous certificate", providerName)
continue
}
if serial == highestSerial {
if cryptoConstruction < certInfo.CryptoConstruction {
dlog.Debugf("[%v] Keeping the previous, preferred crypto construction", *serverName)
dlog.Debugf("[%v] Keeping the previous, preferred crypto construction", providerName)
continue
} else {
dlog.Debugf("[%v] Upgrading the construction from %v to %v", *serverName, certInfo.CryptoConstruction, cryptoConstruction)
dlog.Debugf("[%v] Upgrading the construction from %v to %v", providerName, certInfo.CryptoConstruction, cryptoConstruction)
}
}
if cryptoConstruction != XChacha20Poly1305 && cryptoConstruction != XSalsa20Poly1305 {
dlog.Noticef("[%v] Cryptographic construction %v not supported", *serverName, cryptoConstruction)
dlog.Noticef("[%v] Cryptographic construction %v not supported", providerName, cryptoConstruction)
continue
}
var serverPk [32]byte
@ -184,7 +132,42 @@ func FetchCurrentDNSCryptCert(
certCountStr = " - additional certificate"
}
if certInfo.CryptoConstruction == UndefinedConstruction {
return certInfo, 0, fragmentsBlocked, errors.New("No usable certificate found")
return certInfo, 0, errors.New("No useable certificate found")
}
return certInfo, int(rtt.Nanoseconds() / 1000000), fragmentsBlocked, nil
return certInfo, int(rtt.Nanoseconds() / 1000000), nil
}
func isDigit(b byte) bool { return b >= '0' && b <= '9' }
func dddToByte(s []byte) byte {
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
}
func packTxtString(s string) ([]byte, error) {
bs := make([]byte, len(s))
msg := make([]byte, 0)
copy(bs, s)
for i := 0; i < len(bs); i++ {
if bs[i] == '\\' {
i++
if i == len(bs) {
break
}
if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
msg = append(msg, dddToByte(bs[i:]))
i += 2
} else if bs[i] == 't' {
msg = append(msg, '\t')
} else if bs[i] == 'r' {
msg = append(msg, '\r')
} else if bs[i] == 'n' {
msg = append(msg, '\n')
} else {
msg = append(msg, bs[i])
}
} else {
msg = append(msg, bs[i])
}
}
return msg, nil
}

View file

@ -2,93 +2,79 @@ package main
import (
"encoding/binary"
"errors"
"net"
"strings"
"time"
"unicode/utf8"
"github.com/jedisct1/dlog"
"github.com/miekg/dns"
)
func EmptyResponseFromMessage(srcMsg *dns.Msg) *dns.Msg {
dstMsg := dns.Msg{MsgHdr: srcMsg.MsgHdr, Compress: true}
dstMsg.Question = srcMsg.Question
dstMsg.Response = true
dstMsg.RecursionAvailable = true
dstMsg.RecursionDesired = srcMsg.RecursionDesired
dstMsg.CheckingDisabled = false
dstMsg.AuthenticatedData = false
if edns0 := srcMsg.IsEdns0(); edns0 != nil {
dstMsg.SetEdns0(edns0.UDPSize(), edns0.Do())
}
return &dstMsg
}
func TruncatedResponse(packet []byte) ([]byte, error) {
srcMsg := dns.Msg{}
srcMsg := new(dns.Msg)
if err := srcMsg.Unpack(packet); err != nil {
return nil, err
}
dstMsg := EmptyResponseFromMessage(&srcMsg)
dstMsg := srcMsg
dstMsg.Response = true
dstMsg.Answer = make([]dns.RR, 0)
dstMsg.Ns = make([]dns.RR, 0)
dstMsg.Extra = make([]dns.RR, 0)
dstMsg.Truncated = true
return dstMsg.Pack()
}
func RefusedResponseFromMessage(srcMsg *dns.Msg, refusedCode bool, ipv4 net.IP, ipv6 net.IP, ttl uint32) *dns.Msg {
dstMsg := EmptyResponseFromMessage(srcMsg)
ede := new(dns.EDNS0_EDE)
if edns0 := dstMsg.IsEdns0(); edns0 != nil {
edns0.Option = append(edns0.Option, ede)
func EmptyResponseFromMessage(srcMsg *dns.Msg) (*dns.Msg, error) {
dstMsg := srcMsg
dstMsg.Response = true
dstMsg.Answer = make([]dns.RR, 0)
dstMsg.Ns = make([]dns.RR, 0)
dstMsg.Extra = make([]dns.RR, 0)
return dstMsg, nil
}
func RefusedResponseFromMessage(srcMsg *dns.Msg, refusedCode bool, ipv4 net.IP, ipv6 net.IP, ttl uint32) (*dns.Msg, error) {
dstMsg, err := EmptyResponseFromMessage(srcMsg)
if err != nil {
return dstMsg, err
}
ede.InfoCode = dns.ExtendedErrorCodeFiltered
if refusedCode {
dstMsg.Rcode = dns.RcodeRefused
} else {
dstMsg.Rcode = dns.RcodeSuccess
questions := srcMsg.Question
if len(questions) == 0 {
return dstMsg
}
question := questions[0]
sendHInfoResponse := true
if len(questions) > 0 {
question := questions[0]
sendHInfoResponse := true
if ipv4 != nil && question.Qtype == dns.TypeA {
rr := new(dns.A)
rr.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: ttl}
rr.A = ipv4.To4()
if rr.A != nil {
dstMsg.Answer = []dns.RR{rr}
sendHInfoResponse = false
ede.InfoCode = dns.ExtendedErrorCodeForgedAnswer
if ipv4 != nil && question.Qtype == dns.TypeA {
rr := new(dns.A)
rr.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: ttl}
rr.A = ipv4.To4()
if rr.A != nil {
dstMsg.Answer = []dns.RR{rr}
sendHInfoResponse = false
}
} else if ipv6 != nil && question.Qtype == dns.TypeAAAA {
rr := new(dns.AAAA)
rr.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: ttl}
rr.AAAA = ipv6.To16()
if rr.AAAA != nil {
dstMsg.Answer = []dns.RR{rr}
sendHInfoResponse = false
}
}
} else if ipv6 != nil && question.Qtype == dns.TypeAAAA {
rr := new(dns.AAAA)
rr.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: ttl}
rr.AAAA = ipv6.To16()
if rr.AAAA != nil {
dstMsg.Answer = []dns.RR{rr}
sendHInfoResponse = false
ede.InfoCode = dns.ExtendedErrorCodeForgedAnswer
}
}
if sendHInfoResponse {
hinfo := new(dns.HINFO)
hinfo.Hdr = dns.RR_Header{
Name: question.Name, Rrtype: dns.TypeHINFO,
Class: dns.ClassINET, Ttl: ttl,
if sendHInfoResponse {
hinfo := new(dns.HINFO)
hinfo.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypeHINFO,
Class: dns.ClassINET, Ttl: 1}
hinfo.Cpu = "This query has been locally blocked"
hinfo.Os = "by dnscrypt-proxy"
dstMsg.Answer = []dns.RR{hinfo}
}
hinfo.Cpu = "This query has been locally blocked"
hinfo.Os = "by dnscrypt-proxy"
dstMsg.Answer = []dns.RR{hinfo}
} else {
ede.ExtraText = "This query has been locally blocked by dnscrypt-proxy"
}
}
return dstMsg
return dstMsg, nil
}
func HasTCFlag(packet []byte) bool {
@ -107,7 +93,7 @@ func Rcode(packet []byte) uint8 {
return packet[3] & 0xf
}
func NormalizeRawQName(name *[]byte) {
func NormalizeName(name *[]byte) {
for i, c := range *name {
if c >= 65 && c <= 90 {
(*name)[i] = c + 32
@ -115,38 +101,15 @@ func NormalizeRawQName(name *[]byte) {
}
}
func NormalizeQName(str string) (string, error) {
if len(str) == 0 || str == "." {
return ".", nil
func StripTrailingDot(str string) string {
if len(str) > 1 && strings.HasSuffix(str, ".") {
str = str[:len(str)-1]
}
hasUpper := false
str = strings.TrimSuffix(str, ".")
strLen := len(str)
for i := 0; i < strLen; i++ {
c := str[i]
if c >= utf8.RuneSelf {
return str, errors.New("Query name is not an ASCII string")
}
hasUpper = hasUpper || ('A' <= c && c <= 'Z')
}
if !hasUpper {
return str, nil
}
var b strings.Builder
b.Grow(len(str))
for i := 0; i < strLen; i++ {
c := str[i]
if 'A' <= c && c <= 'Z' {
c += 'a' - 'A'
}
b.WriteByte(c)
}
return b.String(), nil
return str
}
func getMinTTL(msg *dns.Msg, minTTL uint32, maxTTL uint32, cacheNegMinTTL uint32, cacheNegMaxTTL uint32) time.Duration {
if (msg.Rcode != dns.RcodeSuccess && msg.Rcode != dns.RcodeNameError) ||
(len(msg.Answer) <= 0 && len(msg.Ns) <= 0) {
if (msg.Rcode != dns.RcodeSuccess && msg.Rcode != dns.RcodeNameError) || (len(msg.Answer) <= 0 && len(msg.Ns) <= 0) {
return time.Duration(cacheNegMinTTL) * time.Second
}
var ttl uint32
@ -203,14 +166,8 @@ func setMaxTTL(msg *dns.Msg, ttl uint32) {
}
func updateTTL(msg *dns.Msg, expiration time.Time) {
until := time.Until(expiration)
ttl := uint32(0)
if until > 0 {
ttl = uint32(until / time.Second)
if until-time.Duration(ttl)*time.Second >= time.Second/2 {
ttl += 1
}
}
ttl := uint32(time.Until(expiration) / time.Second)
for _, rr := range msg.Answer {
rr.Header().Ttl = ttl
}
@ -218,291 +175,10 @@ func updateTTL(msg *dns.Msg, expiration time.Time) {
rr.Header().Ttl = ttl
}
for _, rr := range msg.Extra {
if rr.Header().Rrtype != dns.TypeOPT {
rr.Header().Ttl = ttl
header := rr.Header()
if header.Rrtype == dns.TypeOPT {
continue
}
rr.Header().Ttl = ttl
}
}
func hasEDNS0Padding(packet []byte) (bool, error) {
msg := dns.Msg{}
if err := msg.Unpack(packet); err != nil {
return false, err
}
if edns0 := msg.IsEdns0(); edns0 != nil {
for _, option := range edns0.Option {
if option.Option() == dns.EDNS0PADDING {
return true, nil
}
}
}
return false, nil
}
func addEDNS0PaddingIfNoneFound(msg *dns.Msg, unpaddedPacket []byte, paddingLen int) ([]byte, error) {
edns0 := msg.IsEdns0()
if edns0 == nil {
msg.SetEdns0(uint16(MaxDNSPacketSize), false)
edns0 = msg.IsEdns0()
if edns0 == nil {
return unpaddedPacket, nil
}
}
for _, option := range edns0.Option {
if option.Option() == dns.EDNS0PADDING {
return unpaddedPacket, nil
}
}
ext := new(dns.EDNS0_PADDING)
padding := make([]byte, paddingLen)
for i := range padding {
padding[i] = 'X'
}
ext.Padding = padding[:paddingLen]
edns0.Option = append(edns0.Option, ext)
return msg.Pack()
}
func removeEDNS0Options(msg *dns.Msg) bool {
edns0 := msg.IsEdns0()
if edns0 == nil {
return false
}
edns0.Option = []dns.EDNS0{}
return true
}
func dddToByte(s []byte) byte {
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
}
func PackTXTRR(s string) []byte {
bs := make([]byte, len(s))
msg := make([]byte, 0)
copy(bs, s)
for i := 0; i < len(bs); i++ {
if bs[i] == '\\' {
i++
if i == len(bs) {
break
}
if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
msg = append(msg, dddToByte(bs[i:]))
i += 2
} else if bs[i] == 't' {
msg = append(msg, '\t')
} else if bs[i] == 'r' {
msg = append(msg, '\r')
} else if bs[i] == 'n' {
msg = append(msg, '\n')
} else {
msg = append(msg, bs[i])
}
} else {
msg = append(msg, bs[i])
}
}
return msg
}
type DNSExchangeResponse struct {
response *dns.Msg
rtt time.Duration
priority int
fragmentsBlocked bool
err error
}
func DNSExchange(
proxy *Proxy,
proto string,
query *dns.Msg,
serverAddress string,
relay *DNSCryptRelay,
serverName *string,
tryFragmentsSupport bool,
) (*dns.Msg, time.Duration, bool, error) {
for {
cancelChannel := make(chan struct{})
maxTries := 3
channel := make(chan DNSExchangeResponse, 2*maxTries)
var err error
options := 0
for tries := 0; tries < maxTries; tries++ {
if tryFragmentsSupport {
queryCopy := query.Copy()
queryCopy.Id += uint16(options)
go func(query *dns.Msg, delay time.Duration) {
time.Sleep(delay)
option := DNSExchangeResponse{err: errors.New("Canceled")}
select {
case <-cancelChannel:
default:
option = _dnsExchange(proxy, proto, query, serverAddress, relay, 1500)
}
option.fragmentsBlocked = false
option.priority = 0
channel <- option
}(queryCopy, time.Duration(200*tries)*time.Millisecond)
options++
}
queryCopy := query.Copy()
queryCopy.Id += uint16(options)
go func(query *dns.Msg, delay time.Duration) {
time.Sleep(delay)
option := DNSExchangeResponse{err: errors.New("Canceled")}
select {
case <-cancelChannel:
default:
option = _dnsExchange(proxy, proto, query, serverAddress, relay, 480)
}
option.fragmentsBlocked = true
option.priority = 1
channel <- option
}(queryCopy, time.Duration(250*tries)*time.Millisecond)
options++
}
var bestOption *DNSExchangeResponse
for i := 0; i < options; i++ {
if dnsExchangeResponse := <-channel; dnsExchangeResponse.err == nil {
if bestOption == nil || dnsExchangeResponse.priority < bestOption.priority ||
(dnsExchangeResponse.priority == bestOption.priority && dnsExchangeResponse.rtt < bestOption.rtt) {
bestOption = &dnsExchangeResponse
if bestOption.priority == 0 {
close(cancelChannel)
break
}
}
} else {
err = dnsExchangeResponse.err
}
}
if bestOption != nil {
if bestOption.fragmentsBlocked {
dlog.Debugf("[%v] public key retrieval succeeded but server is blocking fragments", *serverName)
} else {
dlog.Debugf("[%v] public key retrieval succeeded", *serverName)
}
return bestOption.response, bestOption.rtt, bestOption.fragmentsBlocked, nil
}
if relay == nil || !proxy.anonDirectCertFallback {
if err == nil {
err = errors.New("Unable to reach the server")
}
return nil, 0, false, err
}
dlog.Infof(
"Unable to get the public key for [%v] via relay [%v], retrying over a direct connection",
*serverName,
relay.RelayUDPAddr.IP,
)
relay = nil
}
}
func _dnsExchange(
proxy *Proxy,
proto string,
query *dns.Msg,
serverAddress string,
relay *DNSCryptRelay,
paddedLen int,
) DNSExchangeResponse {
var packet []byte
var rtt time.Duration
if proto == "udp" {
qNameLen, padding := len(query.Question[0].Name), 0
if qNameLen < paddedLen {
padding = paddedLen - qNameLen
}
if padding > 0 {
opt := new(dns.OPT)
opt.Hdr.Name = "."
ext := new(dns.EDNS0_PADDING)
ext.Padding = make([]byte, padding)
opt.Option = append(opt.Option, ext)
query.Extra = []dns.RR{opt}
}
binQuery, err := query.Pack()
if err != nil {
return DNSExchangeResponse{err: err}
}
udpAddr, err := net.ResolveUDPAddr("udp", serverAddress)
if err != nil {
return DNSExchangeResponse{err: err}
}
upstreamAddr := udpAddr
if relay != nil {
proxy.prepareForRelay(udpAddr.IP, udpAddr.Port, &binQuery)
upstreamAddr = relay.RelayUDPAddr
}
now := time.Now()
pc, err := net.DialUDP("udp", nil, upstreamAddr)
if err != nil {
return DNSExchangeResponse{err: err}
}
defer pc.Close()
if err := pc.SetDeadline(time.Now().Add(proxy.timeout)); err != nil {
return DNSExchangeResponse{err: err}
}
if _, err := pc.Write(binQuery); err != nil {
return DNSExchangeResponse{err: err}
}
packet = make([]byte, MaxDNSPacketSize)
length, err := pc.Read(packet)
if err != nil {
return DNSExchangeResponse{err: err}
}
rtt = time.Since(now)
packet = packet[:length]
} else {
binQuery, err := query.Pack()
if err != nil {
return DNSExchangeResponse{err: err}
}
tcpAddr, err := net.ResolveTCPAddr("tcp", serverAddress)
if err != nil {
return DNSExchangeResponse{err: err}
}
upstreamAddr := tcpAddr
if relay != nil {
proxy.prepareForRelay(tcpAddr.IP, tcpAddr.Port, &binQuery)
upstreamAddr = relay.RelayTCPAddr
}
now := time.Now()
var pc net.Conn
proxyDialer := proxy.xTransport.proxyDialer
if proxyDialer == nil {
pc, err = net.DialTCP("tcp", nil, upstreamAddr)
} else {
pc, err = (*proxyDialer).Dial("tcp", tcpAddr.String())
}
if err != nil {
return DNSExchangeResponse{err: err}
}
defer pc.Close()
if err := pc.SetDeadline(time.Now().Add(proxy.timeout)); err != nil {
return DNSExchangeResponse{err: err}
}
binQuery, err = PrefixWithSize(binQuery)
if err != nil {
return DNSExchangeResponse{err: err}
}
if _, err := pc.Write(binQuery); err != nil {
return DNSExchangeResponse{err: err}
}
packet, err = ReadPrefixed(&pc)
if err != nil {
return DNSExchangeResponse{err: err}
}
rtt = time.Since(now)
}
msg := dns.Msg{}
if err := msg.Unpack(packet); err != nil {
return DNSExchangeResponse{err: err}
}
return DNSExchangeResponse{response: &msg, rtt: rtt, err: nil}
}

View file

@ -6,6 +6,10 @@ import (
"github.com/VividCortex/ewma"
)
const (
SizeEstimatorEwmaDecay = 100.0
)
type QuestionSizeEstimator struct {
sync.RWMutex
minQuestionSize int
@ -13,10 +17,7 @@ type QuestionSizeEstimator struct {
}
func NewQuestionSizeEstimator() QuestionSizeEstimator {
return QuestionSizeEstimator{
minQuestionSize: InitialMinQuestionSize,
ewma: &ewma.SimpleEWMA{},
}
return QuestionSizeEstimator{minQuestionSize: InitialMinQuestionSize, ewma: ewma.NewMovingAverage(SizeEstimatorEwmaDecay)}
}
func (questionSizeEstimator *QuestionSizeEstimator) MinQuestionSize() int {

View file

@ -1,7 +0,0 @@
##############################
# Allowed IPs List #
##############################
#192.168.0.*
#fe80:53:* # IPv6 prefix example
#81.169.145.105

View file

@ -1,6 +1,6 @@
###########################
# Blocklist #
# Blacklist #
###########################
## Rules for name-based query blocking, one per line
@ -21,7 +21,7 @@ banner.*
banners.*
creatives.*
oas.*
oascentral.* # inline comments are allowed after a pound sign
oascentral.*
stats.*
tag.*
telemetry.*
@ -31,13 +31,6 @@ eth0.me
*.workgroup
## Prevent usage of Apple private relay, that bypasses DNS
# mask.apple-dns.net
# mask.icloud.com
# mask-api.icloud.com
# doh.dns.apple.com
## Time-based rules

View file

@ -1,17 +0,0 @@
##############################
# IP blocklist #
##############################
## Rules for blocking DNS responses if they contain
## IP addresses matching patterns.
##
## Sample feeds of suspect IP addresses:
## - https://github.com/stamparm/ipsum
## - https://github.com/tg12/bad_packets_blocklist
## - https://isc.sans.edu/block.txt
## - https://block.energized.pro/extensions/ips/formats/list.txt
## - https://www.iblocklist.com/lists
163.5.1.4
94.46.118.*
fe80:53:* # IPv6 prefix example

View file

@ -1,27 +0,0 @@
###########################################
# Captive portal test names #
###########################################
## Some operating systems send queries to these names after a network change,
## in order to check if connectivity beyond the router is possible without
## going through a captive portal.
##
## This is a list of hard-coded IP addresses that will be returned when queries
## for these names are received, even before the operating system reports an interface
## as usable for reaching the Internet.
##
## Note that IPv6 addresses don't need to be specified within brackets,
## as there are no port numbers.
captive.apple.com 17.253.109.201, 17.253.113.202
connectivitycheck.gstatic.com 64.233.162.94, 64.233.164.94, 64.233.165.94, 64.233.177.94, 64.233.185.94, 74.125.132.94, 74.125.136.94, 74.125.20.94, 74.125.21.94, 74.125.28.94
connectivitycheck.android.com 64.233.162.100, 64.233.162.101, 64.233.162.102, 64.233.162.113, 64.233.162.138, 64.233.162.139
www.msftncsi.com 2.16.106.89, 2.16.106.91, 23.0.175.137, 23.0.175.146, 23.192.47.155, 23.192.47.203, 23.199.63.160, 23.199.63.184, 23.199.63.208, 23.204.146.160, 23.204.146.163, 23.46.238.243, 23.46.239.24, 23.48.39.16, 23.48.39.48, 23.55.38.139, 23.55.38.146, 23.59.190.185, 23.59.190.195
dns.msftncsi.com 131.107.255.255, fd3e:4f5a:5b81::1
www.msftconnecttest.com 13.107.4.52
ipv6.msftconnecttest.com 2a01:111:2003::52
ipv4only.arpa 192.0.0.170, 192.0.0.171
## Adding IP addresses of NTP servers is also a good idea
time.google.com 216.239.35.0, 2001:4860:4806::

View file

@ -13,7 +13,7 @@ www.google.* forcesafesearch.google.com
www.bing.com strict.bing.com
yandex.ru familysearch.yandex.ru # inline comments are allowed after a pound sign
yandex.ru familysearch.yandex.ru
=duckduckgo.com safe.duckduckgo.com
@ -35,10 +35,3 @@ localhost ::1
# ads.* 192.168.100.1
# ads.* 192.168.100.2
# ads.* ::1
# PTR records can be created by setting cloak_ptr in the main configuration file
# Entries with wild cards will not have PTR records created, but multiple
# names for the same IP are supported
# example.com 192.168.100.1
# my.example.com 192.168.100.1

View file

@ -21,25 +21,18 @@
## Servers from the "public-resolvers" source (see down below) can
## be viewed here: https://dnscrypt.info/public-servers
##
## The proxy will automatically pick working servers from this list.
## Note that the require_* filters do NOT apply when using this setting.
##
## By default, this list is empty and all registered servers matching the
## require_* filters will be used instead.
## If this line is commented, all registered servers matching the require_* filters
## will be used.
##
## The proxy will automatically pick the fastest, working servers from the list.
## Remove the leading # first to enable this; lines starting with # are ignored.
# server_names = ['scaleway-fr', 'google', 'yandex', 'cloudflare']
## List of local addresses and ports to listen to. Can be IPv4 and/or IPv6.
## Example with both IPv4 and IPv6:
## listen_addresses = ['127.0.0.1:53', '[::1]:53']
##
## To listen to all IPv4 addresses, use `listen_addresses = ['0.0.0.0:53']`
## To listen to all IPv4+IPv6 addresses, use `listen_addresses = ['[::]:53']`
listen_addresses = ['127.0.0.1:53']
listen_addresses = ['127.0.0.1:53', '[::1]:53']
## Maximum number of simultaneous client connections to accept
@ -55,7 +48,7 @@ max_clients = 250
# user_name = 'nobody'
## Require servers (from remote sources) to satisfy specific properties
## Require servers (from static + remote sources) to satisfy specific properties
# Use servers reachable over IPv4
ipv4_servers = true
@ -69,9 +62,6 @@ dnscrypt_servers = true
# Use servers implementing the DNS-over-HTTPS protocol
doh_servers = true
# Use servers implementing the Oblivious DoH protocol
odoh_servers = false
## Require servers defined by remote sources to satisfy specific properties
@ -81,7 +71,7 @@ require_dnssec = false
# Server must not log user queries (declarative)
require_nolog = true
# Server must not enforce its own blocklist (for parental control, ads blocking...)
# Server must not enforce its own blacklist (for parental control, ads blocking...)
require_nofilter = true
# Server names to avoid even if they match all criteria
@ -97,13 +87,6 @@ disabled_server_names = []
force_tcp = false
## Enable *experimental* support for HTTP/3 (DoH3, HTTP over QUIC)
## Note that, like DNSCrypt but unlike other HTTP versions, this uses
## UDP and (usually) port 443 instead of TCP.
http3 = false
## SOCKS proxy
## Uncomment the following line to route all TCP connections to a local Tor node
## Tor doesn't support UDP, so set `force_tcp` to `true` as well.
@ -117,45 +100,30 @@ http3 = false
# http_proxy = 'http://127.0.0.1:8888'
## How long a DNS query will wait for a response, in milliseconds.
## If you have a network with *a lot* of latency, you may need to
## increase this. Startup may be slower if you do so.
## Don't increase it too much. 10000 is the highest reasonable value.
## A timeout below 5000 is not recommended.
## How long a DNS query will wait for a response, in milliseconds
timeout = 5000
timeout = 2500
## Keepalive for HTTP (HTTPS, HTTP/2, HTTP/3) queries, in seconds
## Keepalive for HTTP (HTTPS, HTTP/2) queries, in seconds
keepalive = 30
## Add EDNS-client-subnet information to outgoing queries
##
## Multiple networks can be listed; they will be randomly chosen.
## These networks don't have to match your actual networks.
# edns_client_subnet = ['0.0.0.0/0', '2001:db8::/32']
## Response for blocked queries. Options are `refused`, `hinfo` (default) or
## an IP response. To give an IP response, use the format `a:<IPv4>,aaaa:<IPv6>`.
## Response for blocked queries. Options are `refused`, `hinfo` (default) or
## an IP response. To give an IP response, use the format `a:<IPv4>,aaaa:<IPv6>`.
## Using the `hinfo` option means that some responses will be lies.
## Unfortunately, the `hinfo` option appears to be required for Android 8+
# blocked_query_response = 'refused'
## Load-balancing strategy: 'p2' (default), 'ph', 'p<n>', 'first' or 'random'
## Randomly choose 1 of the fastest 2, half, n, 1 or all live servers by latency.
## The response quality still depends on the server itself.
## Load-balancing strategy: 'p2' (default), 'ph', 'first' or 'random'
# lb_strategy = 'p2'
## Set to `true` to constantly try to estimate the latency of all the resolvers
## and adjust the load-balancing parameters accordingly, or to `false` to disable.
## Default is `true` that makes 'p2' `lb_strategy` work well.
# lb_estimator = true
@ -165,43 +133,21 @@ keepalive = 30
# log_level = 2
## Log file for the application, as an alternative to sending logs to
## the standard system logging service (syslog/Windows event log).
##
## This file is different from other log files, and will not be
## automatically rotated by the application.
## log file for the application
# log_file = 'dnscrypt-proxy.log'
## When using a log file, only keep logs from the most recent launch.
# log_file_latest = true
## Use the system logger (syslog on Unix, Event Log on Windows)
# use_syslog = true
## The maximum concurrency to reload certificates from the resolvers.
## Default is 10.
# cert_refresh_concurrency = 10
## Delay, in minutes, after which certificates are reloaded
cert_refresh_delay = 240
## Initially don't check DNSCrypt server certificates for expiration, and
## only start checking them after a first successful connection to a resolver.
## This can be useful on routers with no battery-backed clock.
# cert_ignore_timestamp = false
## DNSCrypt: Create a new, unique key for every single DNS query
## This may improve privacy but can also have a significant impact on CPU usage
## Only enable if you don't have a lot of network load
@ -214,77 +160,45 @@ cert_refresh_delay = 240
# tls_disable_session_tickets = false
## DoH: Use TLS 1.2 and specific cipher suite instead of the server preference
## DoH: Use a specific cipher suite instead of the server preference
## 49199 = TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
## 49195 = TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
## 52392 = TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
## 52393 = TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
## 4865 = TLS_AES_128_GCM_SHA256
## 4867 = TLS_CHACHA20_POLY1305_SHA256
##
## On non-Intel CPUs such as MIPS routers and ARM systems (Android, Raspberry Pi...),
## uncommenting the following line may improve performance.
## the following suite improves performance.
## This may also help on Intel CPUs running 32-bit operating systems.
## However, this can cause issues fetching sources or connecting to some HTTP servers,
## and should not be set on regular CPUs.
##
## Keep tls_cipher_suite undefined to let the app automatically choose secure parameters.
## Keep tls_cipher_suite empty if you have issues fetching sources or
## connecting to some DoH servers. Google and Cloudflare are fine with it.
# tls_cipher_suite = [52392, 49199]
## Log TLS key material to a file, for debugging purposes only.
## This file will contain the TLS master key, which can be used to decrypt
## all TLS traffic to/from DoH servers.
## Never ever enable except for debugging purposes with a tool such as mitmproxy.
## Fallback resolver
## This is a normal, non-encrypted DNS resolver, that will be only used
## for one-shot queries when retrieving the initial resolvers list, and
## only if the system DNS configuration doesn't work.
## No user application queries will ever be leaked through this resolver,
## and it will not be used after IP addresses of resolvers URLs have been found.
## It will never be used if lists have already been cached, and if stamps
## don't include host names without IP addresses.
## It will not be used if the configured system DNS works.
## A resolver supporting DNSSEC is recommended. This may become mandatory.
##
## People in China may need to use 114.114.114.114:53 here.
## Other popular options include 8.8.8.8 and 1.1.1.1.
# tls_key_log_file = '/tmp/keylog.txt'
fallback_resolver = '9.9.9.9:53'
## Bootstrap resolvers
##
## These are normal, non-encrypted DNS resolvers, that will be only used
## for one-shot queries when retrieving the initial resolvers list and if
## the system DNS configuration doesn't work.
##
## No user queries will ever be leaked through these resolvers, and they will
## not be used after IP addresses of DoH resolvers have been found (if you are
## using DoH).
##
## They will never be used if lists have already been cached, and if the stamps
## of the configured servers already include IP addresses (which is the case for
## most of DoH servers, and for all DNSCrypt servers and relays).
##
## They will not be used if the configured system DNS works, or after the
## proxy already has at least one usable secure resolver.
##
## Resolvers supporting DNSSEC are recommended, and, if you are using
## DoH, bootstrap resolvers should ideally be operated by a different entity
## than the DoH servers you will be using, especially if you have IPv6 enabled.
##
## People in China may want to use 114.114.114.114:53 here.
## Other popular options include 8.8.8.8, 9.9.9.9 and 1.1.1.1.
##
## If more than one resolver is specified, they will be tried in sequence.
##
## TL;DR: put valid standard resolver addresses here. Your actual queries will
## not be sent there. If you're using DNSCrypt or Anonymized DNS and your
## lists are up to date, these resolvers will not even be used.
## Never let dnscrypt-proxy try to use the system DNS settings;
## unconditionally use the fallback resolver.
bootstrap_resolvers = ['9.9.9.11:53', '8.8.8.8:53']
## When internal DNS resolution is required, for example to retrieve
## the resolvers list:
##
## - queries will be sent to dnscrypt-proxy itself, if it is already
## running with active servers (*)
## - or else, queries will be sent to fallback servers
## - finally, if `ignore_system_dns` is `false`, queries will be sent
## to the system DNS
##
## (*) this is incompatible with systemd sockets.
## `listen_addrs` must not be empty.
ignore_system_dns = true
ignore_system_dns = false
## Maximum time (in seconds) to wait for network connectivity before
@ -319,10 +233,8 @@ netprobe_address = '9.9.9.9:53'
## These strings will be added as TXT records to queries.
## Do not use, except on servers explicitly asking for extra data
## to be present.
## encrypted-dns-server can be configured to use this for access control
## in the [access_control] section
# query_meta = ['key1:value1', 'key2:value2', 'token:MySecretToken']
# query_meta = ["key1:value1", "key2:value2", "key3:value3"]
## Automatic log files rotation
@ -342,43 +254,23 @@ log_files_max_backups = 1
# Filters #
#########################
## Note: if you are using dnsmasq, disable the `dnssec` option in dnsmasq if you
## configure dnscrypt-proxy to do any kind of filtering (including the filters
## below and blocklists).
## You can still choose resolvers that do DNSSEC validation.
## Immediately respond to IPv6-related queries with an empty response
## This makes things faster when there is no IPv6 connectivity, but can
## also cause reliability issues with some stub resolvers.
## Do not enable if you added a validating resolver such as dnsmasq in front
## of the proxy.
block_ipv6 = false
## Immediately respond to A and AAAA queries for host names without a domain name
## This also prevents "dotless domain names" from being resolved upstream.
block_unqualified = true
## Immediately respond to queries for local zones instead of leaking them to
## upstream resolvers (always causing errors or timeouts).
block_undelegated = true
## TTL for synthetic responses sent when a request has been blocked (due to
## IPv6 or blocklists).
reject_ttl = 10
##################################################################################
# Route queries for specific domains to a dedicated set of servers #
##################################################################################
## See the `example-forwarding-rules.txt` file for an example
## Example map entries (one entry per line):
## example.com 9.9.9.9
## example.net 9.9.9.9,8.8.8.8,1.1.1.1
# forwarding_rules = 'forwarding-rules.txt'
@ -391,18 +283,13 @@ reject_ttl = 10
## Cloaking returns a predefined address for a specific name.
## In addition to acting as a HOSTS file, it can also return the IP address
## of a different name. It will also do CNAME flattening.
## If 'cloak_ptr' is set, then PTR (reverse lookups) are enabled
## for cloaking rules that do not contain wild cards.
##
## See the `example-cloaking-rules.txt` file for an example
## Example map entries (one entry per line)
## example.com 10.1.1.1
## www.google.com forcesafesearch.google.com
# cloaking_rules = 'cloaking-rules.txt'
## TTL used when serving entries in cloaking-rules.txt
# cloak_ttl = 600
# cloak_ptr = false
###########################
@ -416,12 +303,12 @@ cache = true
## Cache size
cache_size = 4096
cache_size = 512
## Minimum TTL for cached entries
cache_min_ttl = 2400
cache_min_ttl = 600
## Maximum TTL for cached entries
@ -440,53 +327,6 @@ cache_neg_max_ttl = 600
########################################
# Captive portal handling #
########################################
[captive_portals]
## A file that contains a set of names used by operating systems to
## check for connectivity and captive portals, along with hard-coded
## IP addresses to return.
# map_file = 'example-captive-portals.txt'
##################################
# Local DoH server #
##################################
[local_doh]
## dnscrypt-proxy can act as a local DoH server. By doing so, web browsers
## requiring a direct connection to a DoH server in order to enable some
## features will enable these, without bypassing your DNS proxy.
## Addresses that the local DoH server should listen to
# listen_addresses = ['127.0.0.1:3000']
## Path of the DoH URL. This is not a file, but the part after the hostname
## in the URL. By convention, `/dns-query` is frequently chosen.
## For each `listen_address` the complete URL to access the server will be:
## `https://<listen_address><path>` (ex: `https://127.0.0.1/dns-query`)
# path = '/dns-query'
## Certificate file and key - Note that the certificate has to be trusted.
## Can be generated using the following command:
## openssl req -x509 -nodes -newkey rsa:2048 -days 5000 -sha256 -keyout localhost.pem -out localhost.pem
## See the documentation (wiki) for more information.
# cert_file = 'localhost.pem'
# cert_key_file = 'localhost.pem'
###############################
# Query logging #
###############################
@ -495,20 +335,20 @@ cache_neg_max_ttl = 600
[query_log]
## Path to the query log file (absolute, or relative to the same directory as the config file)
## Can be set to /dev/stdout in order to log to the standard output.
## Path to the query log file (absolute, or relative to the same directory as the executable file)
## Can be /dev/stdout to log to the standard output (and set log_files_max_size to 0)
# file = 'query.log'
# file = 'query.log'
## Query log format (currently supported: tsv and ltsv)
## Query log format (currently supported: tsv and ltsv)
format = 'tsv'
format = 'tsv'
## Do not log these query types, to reduce verbosity. Keep empty to log everything.
## Do not log these query types, to reduce verbosity. Keep empty to log everything.
# ignored_qtypes = ['DNSKEY', 'NS']
# ignored_qtypes = ['DNSKEY', 'NS']
@ -522,22 +362,22 @@ format = 'tsv'
[nx_log]
## Path to the query log file (absolute, or relative to the same directory as the config file)
## Path to the query log file (absolute, or relative to the same directory as the executable file)
# file = 'nx.log'
# file = 'nx.log'
## Query log format (currently supported: tsv and ltsv)
## Query log format (currently supported: tsv and ltsv)
format = 'tsv'
format = 'tsv'
######################################################
# Pattern-based blocking (blocklists) #
# Pattern-based blocking (blacklists) #
######################################################
## Blocklists are made of one pattern per line. Example of valid patterns:
## Blacklists are made of one pattern per line. Example of valid patterns:
##
## example.com
## =example.com
@ -546,108 +386,81 @@ format = 'tsv'
## ads*.example.*
## ads*.example[0-9]*.com
##
## Example blocklist files can be found at https://download.dnscrypt.info/blocklists/
## A script to build blocklists from public feeds can be found in the
## `utils/generate-domains-blocklists` directory of the dnscrypt-proxy source code.
## Example blacklist files can be found at https://download.dnscrypt.info/blacklists/
## A script to build blacklists from public feeds can be found in the
## `utils/generate-domains-blacklists` directory of the dnscrypt-proxy source code.
[blocked_names]
[blacklist]
## Path to the file of blocking rules (absolute, or relative to the same directory as the config file)
## Path to the file of blocking rules (absolute, or relative to the same directory as the executable file)
# blocked_names_file = 'blocked-names.txt'
# blacklist_file = 'blacklist.txt'
## Optional path to a file logging blocked queries
## Optional path to a file logging blocked queries
# log_file = 'blocked-names.log'
# log_file = 'blocked.log'
## Optional log format: tsv or ltsv (default: tsv)
## Optional log format: tsv or ltsv (default: tsv)
# log_format = 'tsv'
# log_format = 'tsv'
###########################################################
# Pattern-based IP blocking (IP blocklists) #
# Pattern-based IP blocking (IP blacklists) #
###########################################################
## IP blocklists are made of one pattern per line. Example of valid patterns:
## IP blacklists are made of one pattern per line. Example of valid patterns:
##
## 127.*
## fe80:abcd:*
## 192.168.1.4
[blocked_ips]
[ip_blacklist]
## Path to the file of blocking rules (absolute, or relative to the same directory as the config file)
## Path to the file of blocking rules (absolute, or relative to the same directory as the executable file)
# blocked_ips_file = 'blocked-ips.txt'
# blacklist_file = 'ip-blacklist.txt'
## Optional path to a file logging blocked queries
## Optional path to a file logging blocked queries
# log_file = 'blocked-ips.log'
# log_file = 'ip-blocked.log'
## Optional log format: tsv or ltsv (default: tsv)
## Optional log format: tsv or ltsv (default: tsv)
# log_format = 'tsv'
# log_format = 'tsv'
######################################################
# Pattern-based allow lists (blocklists bypass) #
# Pattern-based whitelisting (blacklists bypass) #
######################################################
## Allowlists support the same patterns as blocklists
## If a name matches an allowlist entry, the corresponding session
## Whitelists support the same patterns as blacklists
## If a name matches a whitelist entry, the corresponding session
## will bypass names and IP filters.
##
## Time-based rules are also supported to make some websites only accessible at specific times of the day.
[allowed_names]
[whitelist]
## Path to the file of allow list rules (absolute, or relative to the same directory as the config file)
## Path to the file of whitelisting rules (absolute, or relative to the same directory as the executable file)
# allowed_names_file = 'allowed-names.txt'
# whitelist_file = 'whitelist.txt'
## Optional path to a file logging allowed queries
## Optional path to a file logging whitelisted queries
# log_file = 'allowed-names.log'
# log_file = 'whitelisted.log'
## Optional log format: tsv or ltsv (default: tsv)
## Optional log format: tsv or ltsv (default: tsv)
# log_format = 'tsv'
#########################################################
# Pattern-based allowed IPs lists (blocklists bypass) #
#########################################################
## Allowed IP lists support the same patterns as IP blocklists
## If an IP response matches an allowed entry, the corresponding session
## will bypass IP filters.
##
## Time-based rules are also supported to make some websites only accessible at specific times of the day.
[allowed_ips]
## Path to the file of allowed ip rules (absolute, or relative to the same directory as the config file)
# allowed_ips_file = 'allowed-ips.txt'
## Optional path to a file logging allowed queries
# log_file = 'allowed-ips.log'
## Optional log format: tsv or ltsv (default: tsv)
# log_format = 'tsv'
# log_format = 'tsv'
@ -656,33 +469,34 @@ format = 'tsv'
##########################################
## One or more weekly schedules can be defined here.
## Patterns in the name-based blocked_names file can optionally be followed with @schedule_name
## Patterns in the name-based blocklist can optionally be followed with @schedule_name
## to apply the pattern 'schedule_name' only when it matches a time range of that schedule.
##
## For example, the following rule in a blocklist file:
## For example, the following rule in a blacklist file:
## *.youtube.* @time-to-sleep
## would block access to YouTube during the times defined by the 'time-to-sleep' schedule.
## would block access to YouTube only during the days, and period of the days
## define by the 'time-to-sleep' schedule.
##
## {after='21:00', before= '7:00'} matches 0:00-7:00 and 21:00-0:00
## {after= '9:00', before='18:00'} matches 9:00-18:00
[schedules]
# [schedules.time-to-sleep]
# mon = [{after='21:00', before='7:00'}]
# tue = [{after='21:00', before='7:00'}]
# wed = [{after='21:00', before='7:00'}]
# thu = [{after='21:00', before='7:00'}]
# fri = [{after='23:00', before='7:00'}]
# sat = [{after='23:00', before='7:00'}]
# sun = [{after='21:00', before='7:00'}]
# [schedules.'time-to-sleep']
# mon = [{after='21:00', before='7:00'}]
# tue = [{after='21:00', before='7:00'}]
# wed = [{after='21:00', before='7:00'}]
# thu = [{after='21:00', before='7:00'}]
# fri = [{after='23:00', before='7:00'}]
# sat = [{after='23:00', before='7:00'}]
# sun = [{after='21:00', before='7:00'}]
# [schedules.work]
# mon = [{after='9:00', before='18:00'}]
# tue = [{after='9:00', before='18:00'}]
# wed = [{after='9:00', before='18:00'}]
# thu = [{after='9:00', before='18:00'}]
# fri = [{after='9:00', before='17:00'}]
# [schedules.'work']
# mon = [{after='9:00', before='18:00'}]
# tue = [{after='9:00', before='18:00'}]
# wed = [{after='9:00', before='18:00'}]
# thu = [{after='9:00', before='18:00'}]
# fri = [{after='9:00', before='17:00'}]
@ -702,209 +516,41 @@ format = 'tsv'
## must include the prefixes.
##
## If the `urls` property is missing, cache files and valid signatures
## must already be present. This doesn't prevent these cache files from
## must be already present; This doesn't prevent these cache files from
## expiring after `refresh_delay` hours.
## `refreshed_delay` must be in the [24..168] interval.
## The minimum delay of 24 hours (1 day) avoids unnecessary requests to servers.
## The maximum delay of 168 hours (1 week) ensures cache freshness.
[sources]
### An example of a remote source from https://github.com/DNSCrypt/dnscrypt-resolvers
## An example of a remote source from https://github.com/DNSCrypt/dnscrypt-resolvers
[sources.public-resolvers]
urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/public-resolvers.md', 'https://download.dnscrypt.info/resolvers-list/v3/public-resolvers.md']
cache_file = 'public-resolvers.md'
minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
refresh_delay = 73
prefix = ''
[sources.'public-resolvers']
urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v2/public-resolvers.md', 'https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md']
cache_file = 'public-resolvers.md'
minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
prefix = ''
### Anonymized DNS relays
[sources.relays]
urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/relays.md', 'https://download.dnscrypt.info/resolvers-list/v3/relays.md']
cache_file = 'relays.md'
minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
refresh_delay = 73
prefix = ''
### ODoH (Oblivious DoH) servers and relays
# [sources.odoh-servers]
# urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/odoh-servers.md', 'https://download.dnscrypt.info/resolvers-list/v3/odoh-servers.md']
# cache_file = 'odoh-servers.md'
# minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
# refresh_delay = 73
# prefix = ''
# [sources.odoh-relays]
# urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/odoh-relays.md', 'https://download.dnscrypt.info/resolvers-list/v3/odoh-relays.md']
# cache_file = 'odoh-relays.md'
# minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
# refresh_delay = 73
# prefix = ''
### Quad9
## Quad9 over DNSCrypt - https://quad9.net/
# [sources.quad9-resolvers]
# urls = ['https://www.quad9.net/quad9-resolvers.md']
# minisign_key = 'RWQBphd2+f6eiAqBsvDZEBXBGHQBJfeG6G+wJPPKxCZMoEQYpmoysKUN'
# cache_file = 'quad9-resolvers.md'
# prefix = 'quad9-'
# urls = ['https://www.quad9.net/quad9-resolvers.md']
# minisign_key = 'RWQBphd2+f6eiAqBsvDZEBXBGHQBJfeG6G+wJPPKxCZMoEQYpmoysKUN'
# cache_file = 'quad9-resolvers.md'
# prefix = 'quad9-'
### Another example source, with resolvers censoring some websites not appropriate for children
### This is a subset of the `public-resolvers` list, so enabling both is useless.
## Another example source, with resolvers censoring some websites not appropriate for children
## This is a subset of the `public-resolvers` list, so enabling both is useless
# [sources.parental-control]
# urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/parental-control.md', 'https://download.dnscrypt.info/resolvers-list/v3/parental-control.md']
# cache_file = 'parental-control.md'
# minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
### dnscry.pt servers - See https://www.dnscry.pt
# [sources.dnscry-pt-resolvers]
# urls = ["https://www.dnscry.pt/resolvers.md"]
# minisign_key = "RWQM31Nwkqh01x88SvrBL8djp1NH56Rb4mKLHz16K7qsXgEomnDv6ziQ"
# cache_file = "dnscry.pt-resolvers.md"
# refresh_delay = 73
# prefix = "dnscry.pt-"
# [sources.'parental-control']
# urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v2/parental-control.md', 'https://download.dnscrypt.info/resolvers-list/v2/parental-control.md']
# cache_file = 'parental-control.md'
# minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
#########################################
# Servers with known bugs #
#########################################
[broken_implementations]
## Cisco servers currently cannot handle queries larger than 1472 bytes, and don't
## truncate responses larger than questions as expected by the DNSCrypt protocol.
## This prevents large responses from being received over UDP and over relays.
##
## Older versions of the `dnsdist` server software had a bug with queries larger
## than 1500 bytes. This is fixed since `dnsdist` version 1.5.0, but
## some server may still run an outdated version.
##
## The list below enables workarounds to make non-relayed usage more reliable
## until the servers are fixed.
fragments_blocked = ['cisco', 'cisco-ipv6', 'cisco-familyshield', 'cisco-familyshield-ipv6', 'cisco-sandbox', 'cleanbrowsing-adult', 'cleanbrowsing-adult-ipv6', 'cleanbrowsing-family', 'cleanbrowsing-family-ipv6', 'cleanbrowsing-security', 'cleanbrowsing-security-ipv6']
#################################################################
# Certificate-based client authentication for DoH #
#################################################################
## Use a X509 certificate to authenticate yourself when connecting to DoH servers.
## This is only useful if you are operating your own, private DoH server(s).
## 'creds' maps servers to certificates, and supports multiple entries.
## If you are not using the standard root CA, an optional "root_ca"
## property set to the path to a root CRT file can be added to a server entry.
[doh_client_x509_auth]
# creds = [
# { server_name='*', client_cert='client.crt', client_key='client.key' }
# ]
################################
# Anonymized DNS #
################################
[anonymized_dns]
## Routes are indirect ways to reach DNSCrypt servers.
##
## A route maps a server name ("server_name") to one or more relays that will be
## used to connect to that server.
##
## A relay can be specified as a DNS Stamp (either a relay stamp, or a
## DNSCrypt stamp) or a server name.
##
## The following example routes "example-server-1" via `anon-example-1` or `anon-example-2`,
## and "example-server-2" via the relay whose relay DNS stamp is
## "sdns://gRIxMzcuNzQuMjIzLjIzNDo0NDM".
##
## !!! THESE ARE JUST EXAMPLES !!!
##
## Review the list of available relays from the "relays.md" file, and, for each
## server you want to use, define the relays you want connections to go through.
##
## Carefully choose relays and servers so that they are run by different entities.
##
## "server_name" can also be set to "*" to define a default route, for all servers:
## { server_name='*', via=['anon-example-1', 'anon-example-2'] }
##
## If a route is ["*"], the proxy automatically picks a relay on a distinct network.
## { server_name='*', via=['*'] } is also an option, but is likely to be suboptimal.
##
## Manual selection is always recommended over automatic selection, so that you can
## select (relay,server) pairs that work well and fit your own criteria (close by or
## in different countries, operated by different entities, on distinct ISPs...)
# routes = [
# { server_name='example-server-1', via=['anon-example-1', 'anon-example-2'] },
# { server_name='example-server-2', via=['sdns://gRIxMzcuNzQuMjIzLjIzNDo0NDM'] }
# ]
## Skip resolvers incompatible with anonymization instead of using them directly
skip_incompatible = false
## If public server certificates for a non-conformant server cannot be
## retrieved via a relay, try getting them directly. Actual queries
## will then always go through relays.
# direct_cert_fallback = false
###############################
# DNS64 #
###############################
## DNS64 is a mechanism for synthesizing AAAA records from A records.
## It is used with an IPv6/IPv4 translator to enable client-server
## communication between an IPv6-only client and an IPv4-only server,
## without requiring any changes to either the IPv6 or the IPv4 node,
## for the class of applications that work through NATs.
##
## There are two options to synthesize such records:
## Option 1: Using a set of static IPv6 prefixes;
## Option 2: By discovering the IPv6 prefix from DNS64-enabled resolver.
##
## If both options are configured - only static prefixes are used.
## (Ref. RFC6147, RFC6052, RFC7050)
##
## Do not enable unless you know what DNS64 is and why you need it, or else
## you won't be able to connect to anything at all.
[dns64]
## Static prefix(es) as Pref64::/n CIDRs
# prefix = ['64:ff9b::/96']
## DNS64-enabled resolver(s) to discover Pref64::/n CIDRs
## These resolvers are used to query for Well-Known IPv4-only Name (WKN) "ipv4only.arpa." to discover only.
## Set with your ISP's resolvers in case of custom prefixes (other than Well-Known Prefix 64:ff9b::/96).
## IMPORTANT: Default resolvers listed below support Well-Known Prefix 64:ff9b::/96 only.
# resolver = ['[2606:4700:4700::64]:53', '[2001:4860:4860::64]:53']
########################################
# Static entries #
########################################
## Optional, local, static list of additional servers
## Mostly useful for testing your own servers.
[static]
# [static.myserver]
# stamp = 'sdns://AQcAAAAAAAAAAAAQMi5kbnNjcnlwdC1jZXJ0Lg'
# [static.'myserver']
# stamp = 'sdns:AQcAAAAAAAAAAAAQMi5kbnNjcnlwdC1jZXJ0Lg'

View file

@ -7,44 +7,8 @@
## <domain> <server address>[:port] [, <server address>[:port]...]
## IPv6 addresses can be specified by enclosing the address in square brackets.
## The following keywords can also be used instead of a server address:
## $BOOTSTRAP to use the default bootstrap resolvers
## $DHCP to use the default DNS resolvers provided by the DHCP server
## In order to enable this feature, the "forwarding_rules" property needs to
## be set to this file name inside the main configuration file.
## Blocking IPv6 may prevent local devices from being discovered.
## If this happens, set `block_ipv6` to `false` in the main config file.
## Forward *.lan, *.home, *.home.arpa, and *.localdomain to 192.168.1.1
# lan 192.168.1.1
# home 192.168.1.1
# home.arpa 192.168.1.1
# localdomain 192.168.1.1
# 192.in-addr.arpa 192.168.1.1
## Forward *.local to the resolvers provided by the DHCP server
# local $DHCP
## Forward *.internal to 192.168.1.1, and if it doesn't work, to the
## DNS from the local DHCP server, and if it still doesn't work, to the
## bootstrap resolvers
# internal 192.168.1.1,$DHCP,$BOOTSTRAP
## Forward queries for example.com and *.example.com to 9.9.9.9 and 8.8.8.8
# example.com 9.9.9.9,8.8.8.8
## Forward queries to a resolver using IPv6
# ipv6.example.com [2001:DB8::42]
## Forward to a non-standard port number
# x.example.com 192.168.0.1:1053
# y.example.com [2001:DB8::42]:1053
## Forward queries for .onion names to a local Tor client
## Tor must be configured with the following in the torrc file:
## DNSPort 9053
## AutomapHostsOnResolve 1
# onion 127.0.0.1:9053
# example.com 9.9.9.9,8.8.8.8

View file

@ -1,28 +1,22 @@
###########################
# Allowlist #
# Whitelist #
###########################
## Rules for allowing queries based on name, one per line
## Rules for name-based query whitelisting, one per line
##
## Example of valid patterns:
##
## ads.* | matches anything with an "ads." prefix
## *.example.com | matches example.com and all names within that zone such as www.example.com
## example.com | identical to the above
## =example.com | allows example.com but not *.example.com
## =example.com | whitelists example.com but not *.example.com
## *sex* | matches any name containing that substring
## ads[0-9]* | matches "ads" followed by one or more digits
## ads*.example* | *, ? and [] can be used anywhere, but prefixes/suffixes are faster
# That one may be blocked due to 'tracker' being in the name.
tracker.debian.org
# That one may be blocked due to 'ads' being in the name.
# However, blocking it prevents all sponsored links from the Google
# search engine from being opened.
googleadservices.com
## Time-based rules

View file

@ -1,40 +0,0 @@
//go:build gofuzzbeta
package main
import (
"encoding/hex"
"testing"
stamps "github.com/jedisct1/go-dnsstamps"
)
func FuzzParseODoHTargetConfigs(f *testing.F) {
configs_hex := "0020000100010020aacc53b3df0c6eb2d7d5ce4ddf399593376c9903ba6a52a52c3a2340f97bb764"
configs, _ := hex.DecodeString(configs_hex)
f.Add(configs)
f.Fuzz(func(t *testing.T, configs []byte) {
if _, err := parseODoHTargetConfigs(configs); err != nil {
t.Skip()
}
})
}
func FuzzParseStampParser(f *testing.F) {
f.Add("sdns://AgcAAAAAAAAACzEwNC4yMS42Ljc4AA1kb2guY3J5cHRvLnN4Ci9kbnMtcXVlcnk")
f.Add("sdns://AgcAAAAAAAAAGlsyNjA2OjQ3MDA6MzAzNzo6NjgxNTo2NGVdABJkb2gtaXB2Ni5jcnlwdG8uc3gKL2Rucy1xdWVyeQ")
f.Add(
"sdns://AQcAAAAAAAAADTUxLjE1LjEyMi4yNTAg6Q3ZfapcbHgiHKLF7QFoli0Ty1Vsz3RXs1RUbxUrwZAcMi5kbnNjcnlwdC1jZXJ0LnNjYWxld2F5LWFtcw",
)
f.Add(
"sdns://AQcAAAAAAAAAFlsyMDAxOmJjODoxODIwOjUwZDo6MV0g6Q3ZfapcbHgiHKLF7QFoli0Ty1Vsz3RXs1RUbxUrwZAcMi5kbnNjcnlwdC1jZXJ0LnNjYWxld2F5LWFtcw",
)
f.Add("sdns://gQ8xNjMuMTcyLjE4MC4xMjU")
f.Add("sdns://BQcAAAAAAAAADm9kb2guY3J5cHRvLnN4Ci9kbnMtcXVlcnk")
f.Add("sdns://hQcAAAAAAAAAACCi3jNJDEdtNW4tvHN8J3lpIklSa2Wrj7qaNCgEgci9_BpvZG9oLXJlbGF5LmVkZ2Vjb21wdXRlLmFwcAEv")
f.Fuzz(func(t *testing.T, stamp string) {
if _, err := stamps.NewServerStampFromString(stamp); err != nil {
t.Skip()
}
})
}

View file

@ -1,141 +0,0 @@
package main
import (
"encoding/base64"
"fmt"
"io"
"net"
"net/http"
"strings"
"time"
"github.com/jedisct1/dlog"
"github.com/miekg/dns"
)
type localDoHHandler struct {
proxy *Proxy
}
func (handler localDoHHandler) ServeHTTP(writer http.ResponseWriter, request *http.Request) {
proxy := handler.proxy
if !proxy.clientsCountInc() {
dlog.Warnf("Too many incoming connections (max=%d)", proxy.maxClients)
return
}
defer proxy.clientsCountDec()
dataType := "application/dns-message"
writer.Header().Set("Server", "dnscrypt-proxy")
if request.URL.Path != proxy.localDoHPath {
writer.WriteHeader(404)
return
}
packet := []byte{}
var err error
start := time.Now()
if request.Method == "POST" &&
request.Header.Get("Content-Type") == dataType {
packet, err = io.ReadAll(io.LimitReader(request.Body, int64(MaxDNSPacketSize)))
if err != nil {
dlog.Warnf("No body in a local DoH query")
return
}
} else if request.Method == "GET" && request.Header.Get("Accept") == dataType {
encodedPacket := request.URL.Query().Get("dns")
if len(encodedPacket) >= MinDNSPacketSize*4/3 && len(encodedPacket) <= MaxDNSPacketSize*4/3 {
packet, err = base64.RawURLEncoding.DecodeString(encodedPacket)
if err != nil {
dlog.Warnf("Invalid base64 in a local DoH query")
return
}
}
}
if len(packet) < MinDNSPacketSize {
writer.Header().Set("Content-Type", "text/plain")
writer.WriteHeader(400)
writer.Write([]byte("dnscrypt-proxy local DoH server\n"))
return
}
clientAddr, err := net.ResolveTCPAddr("tcp", request.RemoteAddr)
if err != nil {
dlog.Errorf("Unable to get the client address: [%v]", err)
return
}
xClientAddr := net.Addr(clientAddr)
hasEDNS0Padding, err := hasEDNS0Padding(packet)
if err != nil {
writer.WriteHeader(400)
return
}
response := proxy.processIncomingQuery("local_doh", proxy.mainProto, packet, &xClientAddr, nil, start, false)
if len(response) == 0 {
writer.WriteHeader(500)
return
}
msg := dns.Msg{}
if err := msg.Unpack(packet); err != nil {
writer.WriteHeader(500)
return
}
responseLen := len(response)
paddedLen := dohPaddedLen(responseLen)
padLen := paddedLen - responseLen
if hasEDNS0Padding {
response, err = addEDNS0PaddingIfNoneFound(&msg, response, padLen)
if err != nil {
dlog.Critical(err)
return
}
} else {
pad := strings.Repeat("X", padLen)
writer.Header().Set("X-Pad", pad)
}
writer.Header().Set("Content-Type", dataType)
writer.Header().Set("Content-Length", fmt.Sprint(len(response)))
writer.WriteHeader(200)
writer.Write(response)
}
func (proxy *Proxy) localDoHListener(acceptPc *net.TCPListener) {
defer acceptPc.Close()
if len(proxy.localDoHCertFile) == 0 || len(proxy.localDoHCertKeyFile) == 0 {
dlog.Fatal("A certificate and a key are required to start a local DoH service")
}
httpServer := &http.Server{
ReadTimeout: proxy.timeout,
WriteTimeout: proxy.timeout,
Handler: localDoHHandler{proxy: proxy},
}
httpServer.SetKeepAlivesEnabled(true)
if err := httpServer.ServeTLS(acceptPc, proxy.localDoHCertFile, proxy.localDoHCertKeyFile); err != nil {
dlog.Fatal(err)
}
}
func dohPaddedLen(unpaddedLen int) int {
boundaries := [...]int{
64,
128,
192,
256,
320,
384,
512,
704,
768,
896,
960,
1024,
1088,
1152,
2688,
4080,
MaxDNSPacketSize,
}
for _, boundary := range boundaries {
if boundary >= unpaddedLen {
return boundary
}
}
return unpaddedLen
}

View file

@ -1,47 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDb7g6EQhbfby97
k4oMbZTzdi2TWFBs7qK/QwgOu+L6EhNHPO1ZEU29v0APFBFJO5zyyAk9bZ9k9tPB
bCuVVI9jEUfLH3UCjEQPG6XI2w++uVh0yALvc/uurCvRHVlle/V7cAoikndc2SjE
RQUALbACIqwD5g0F77BYwcsreB4GH253/R6Q2/CJZ4jNHPjkocOJiVr3ejA0kkoN
MXpGUXWcrVVk20M2A1CeO7HAulLRcklEdoHE3v46pjp0iZK0F9LyZX1U1ql+4QL3
iQttoZ4tMg83lFHSt4G9PrpIhzXr9W4NW822faSvrIwwN/JbItUmRa7n/3+MkuJQ
IGGNDayXAgMBAAECggEBANs0fmGSocuXvYL1Pi4+9qxnCOwIpTi97Zam0BwnZwcL
Bw4FCyiwV4UdX1LoFIailT9i49rHLYzre4oZL6OKgdQjQCSTuQOOHLPWQbpdpWba
w/C5/jr+pkemMZIfJ6BAGiArPt7Qj4oKpFhj1qUj5H9sYXkNTcOx8Fm25rLv6TT9
O7wg0oCpyG+iBSbCYBp9mDMz8pfo4P3BhcFiyKCKeiAC6KuHU81dvuKeFB4XQK+X
no2NqDqe6MBkmTqjNNy+wi1COR7lu34LPiWU5Hq5PdIEqBBUMjlMI6oYlhlgNTdx
SvsqFz3Xs6kpAhJTrSiAqscPYosgaMQxo+LI26PJnikCgYEA9n0OERkm0wSBHnHY
Kx8jaxNYg93jEzVnEgI/MBTJZqEyCs9fF6Imv737VawEN/BhesZZX7bGZQfDo8AT
aiSa5upkkSGXEqTu5ytyoKFTb+dJ/qmx3+zP6dPVzDnc8WPYMoUg7vvjZkXXJgZX
+oMlMUW1wWiDNI3wP19W9Is6xssCgYEA5GqkUBEns6eTFJV0JKqbEORJJ7lx5NZe
cIx+jPpLkILG4mOKOg1TBx0wkxa9cELtsNsM+bPtu9OqRMhsfPBmsXDHhJwg0Z6G
eDTfYYPkpRhwZvl6jBZn9sLVR9wfg2hE+n0lfV3mceg336KOkwAehDU84SWZ2e0S
esqkpbHJa+UCgYA7PY0O8POSzcdWkNf6bS5vAqRIdSCpMjGGc4HKRYSuJNnJHVPm
czNK7Bcm3QPaiexzvI4oYd5G09niVjyUSx3rl7P56Y/MjFVau+d90agjAfyXtyMo
BVtnAGGnBtUiMvP4GGT06xcZMnnmCqpEbBaZQ/7N8Bdwnxh5sqlMdtX2hwKBgAhL
hyQRO2vezgyVUN50A6WdZLq4lVZGIq/bqkzcWhopZaebDc4F5doASV9OGBsXkyI1
EkePLTcA/NH6pVX0NQaEnfpG4To7k46R/PrBm3ATbyGONdEYjzX65VvytoJDKx4d
pVrkKhZA5KaOdLcJ7hHHDSrv/qJXZbBn44rQ5guxAoGBAJ6oeUsUUETakxlmIhmK
xuQmWqLf97BKt8r6Z8CqHKWK7vpG2OmgFYCQGaR7angQ8hmAOv6jM56XhoagDBoc
UoaoEyo9/uCk6NRUkUMj7Tk/5UQSiWLceVH27w+icMFhf1b7EmmNfk+APsiathO5
j4edf1AinVCPwRVVu1dtLL5P
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIDAjCCAeoCCQCptj0+TjjIJjANBgkqhkiG9w0BAQsFADBDMREwDwYDVQQKDAhE
TlNDcnlwdDEaMBgGA1UECwwRTG9jYWwgdGVzdCBzZXJ2ZXIxEjAQBgNVBAMMCWxv
Y2FsaG9zdDAeFw0xOTExMTgxNDA2MzBaFw0zMzA3MjcxNDA2MzBaMEMxETAPBgNV
BAoMCEROU0NyeXB0MRowGAYDVQQLDBFMb2NhbCB0ZXN0IHNlcnZlcjESMBAGA1UE
AwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2+4O
hEIW328ve5OKDG2U83Ytk1hQbO6iv0MIDrvi+hITRzztWRFNvb9ADxQRSTuc8sgJ
PW2fZPbTwWwrlVSPYxFHyx91AoxEDxulyNsPvrlYdMgC73P7rqwr0R1ZZXv1e3AK
IpJ3XNkoxEUFAC2wAiKsA+YNBe+wWMHLK3geBh9ud/0ekNvwiWeIzRz45KHDiYla
93owNJJKDTF6RlF1nK1VZNtDNgNQnjuxwLpS0XJJRHaBxN7+OqY6dImStBfS8mV9
VNapfuEC94kLbaGeLTIPN5RR0reBvT66SIc16/VuDVvNtn2kr6yMMDfyWyLVJkWu
5/9/jJLiUCBhjQ2slwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQA6Vz5HnGuy8jZz
5i8ipbcDMCZNdpYYnxgD53hEKOfoSv7LaF0ztD8Kmg3s5LHv9EHlkK3+G6FWRGiP
9f6IbtRITaiVQP3M13T78hpN5Qq5jgsqjR7ZcN7Etr6ZFd7G/0+mzqbyBuW/3szt
RdX/YLy1csvjbZoNNuXGWRohXjg0Mjko2tRLmARvxA/gZV5zWycv3BD2BPzyCdS9
MDMYSF0RPiL8+alfwLNqLcqMA5liHlmZa85uapQyoUI3ksKJkEgU53aD8cYhH9Yn
6mVpsrvrcRLBiHlbi24QBolhFkCSRK8bXes8XDIPuD8iYRwlrVBwOakMFQWMqNfI
IMOKJomU
-----END CERTIFICATE-----

View file

@ -1,40 +0,0 @@
package main
import (
"io"
"os"
"github.com/jedisct1/dlog"
"gopkg.in/natefinch/lumberjack.v2"
)
func Logger(logMaxSize int, logMaxAge int, logMaxBackups int, fileName string) io.Writer {
if fileName == "/dev/stdout" {
return os.Stdout
}
if st, _ := os.Stat(fileName); st != nil && !st.Mode().IsRegular() {
if st.Mode().IsDir() {
dlog.Fatalf("[%v] is a directory", fileName)
}
fp, err := os.OpenFile(fileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o644)
if err != nil {
dlog.Fatalf("Unable to access [%v]: [%v]", fileName, err)
}
return fp
}
if fp, err := os.OpenFile(fileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o644); err == nil {
fp.Close()
} else {
dlog.Errorf("Unable to create [%v]: [%v]", fileName, err)
}
logger := &lumberjack.Logger{
LocalTime: true,
MaxSize: logMaxSize,
MaxAge: logMaxAge,
MaxBackups: logMaxBackups,
Filename: fileName,
Compress: true,
}
return logger
}

View file

@ -7,88 +7,54 @@ import (
"fmt"
"math/rand"
"os"
"runtime"
"sync"
"github.com/facebookgo/pidfile"
"github.com/jedisct1/dlog"
"github.com/kardianos/service"
)
const (
AppVersion = "2.1.8"
AppVersion = "2.0.28"
DefaultConfigFileName = "dnscrypt-proxy.toml"
)
type App struct {
wg sync.WaitGroup
quit chan struct{}
proxy *Proxy
flags *ConfigFlags
proxy Proxy
}
func main() {
tzErr := TimezoneSetup()
dlog.Init("dnscrypt-proxy", dlog.SeverityNotice, "DAEMON")
if tzErr != nil {
dlog.Warnf("Timezone setup failed: [%v]", tzErr)
}
runtime.MemProfileRate = 0
os.Setenv("GODEBUG", os.Getenv("GODEBUG")+",tls13=1")
seed := make([]byte, 8)
if _, err := crypto_rand.Read(seed); err != nil {
dlog.Fatal(err)
}
rand.Seed(int64(binary.LittleEndian.Uint64(seed)))
crypto_rand.Read(seed)
rand.Seed(int64(binary.LittleEndian.Uint64(seed[:])))
pwd, err := os.Getwd()
if err != nil {
dlog.Fatal("Unable to find the path to the current directory")
}
svcFlag := flag.String("service", "", fmt.Sprintf("Control the system service: %q", service.ControlAction))
version := flag.Bool("version", false, "print current proxy version")
flags := ConfigFlags{}
flags.Resolve = flag.String("resolve", "", "resolve a DNS name (string can be <name> or <name>,<resolver address>)")
flags.List = flag.Bool("list", false, "print the list of available resolvers for the enabled filters")
flags.ListAll = flag.Bool("list-all", false, "print the complete list of available resolvers, ignoring filters")
flags.IncludeRelays = flag.Bool("include-relays", false, "include the list of available relays in the output of -list and -list-all")
flags.JSONOutput = flag.Bool("json", false, "output list as JSON")
flags.Check = flag.Bool("check", false, "check the configuration file and exit")
flags.ConfigFile = flag.String("config", DefaultConfigFileName, "Path to the configuration file")
flags.Child = flag.Bool("child", false, "Invokes program as a child process")
flags.NetprobeTimeoutOverride = flag.Int("netprobe-timeout", 60, "Override the netprobe timeout")
flags.ShowCerts = flag.Bool("show-certs", false, "print DoH certificate chain hashes")
flag.Parse()
if *version {
fmt.Println(AppVersion)
os.Exit(0)
}
if fullexecpath, err := os.Executable(); err == nil {
WarnIfMaybeWritableByOtherUsers(fullexecpath)
}
app := &App{
flags: &flags,
}
svcConfig := &service.Config{
Name: "dnscrypt-proxy",
DisplayName: "DNSCrypt client proxy",
Description: "Encrypted/authenticated DNS proxy",
WorkingDirectory: pwd,
Arguments: []string{"-config", *flags.ConfigFile},
}
svcFlag := flag.String("service", "", fmt.Sprintf("Control the system service: %q", service.ControlAction))
app := &App{}
svc, err := service.New(app, svcConfig)
if err != nil {
svc = nil
dlog.Debug(err)
}
app.proxy = NewProxy()
_ = ServiceManagerStartNotify()
if err := ConfigLoad(&app.proxy, svcFlag); err != nil {
dlog.Fatal(err)
}
if len(*svcFlag) != 0 {
if svc == nil {
dlog.Fatal("Built-in service installation is not supported on this platform")
@ -110,7 +76,7 @@ func main() {
return
}
if svc != nil {
if err := svc.Run(); err != nil {
if err = svc.Run(); err != nil {
dlog.Fatal(err)
}
} else {
@ -119,38 +85,34 @@ func main() {
}
func (app *App) Start(service service.Service) error {
if service != nil {
go func() {
app.AppMain()
}()
} else {
app.AppMain()
}
return nil
}
func (app *App) AppMain() {
if err := ConfigLoad(app.proxy, app.flags); err != nil {
dlog.Fatal(err)
}
if err := PidFileCreate(); err != nil {
dlog.Errorf("Unable to create the PID file: [%v]", err)
}
if err := app.proxy.InitPluginsGlobals(); err != nil {
proxy := &app.proxy
if err := InitPluginsGlobals(&proxy.pluginsGlobals, proxy); err != nil {
dlog.Fatal(err)
}
app.quit = make(chan struct{})
app.wg.Add(1)
app.proxy.StartProxy()
runtime.GC()
if service != nil {
go func() {
app.AppMain(proxy)
}()
} else {
app.AppMain(proxy)
}
return nil
}
func (app *App) AppMain(proxy *Proxy) {
pidfile.Write()
proxy.StartProxy()
<-app.quit
dlog.Notice("Quit signal received...")
app.wg.Done()
}
func (app *App) Stop(service service.Service) error {
if err := PidFileRemove(); err != nil {
dlog.Warnf("Failed to remove the PID file: [%v]", err)
if pidFilePath := pidfile.GetPidfilePath(); len(pidFilePath) > 1 {
os.Remove(pidFilePath)
}
dlog.Notice("Stopped.")
return nil

View file

@ -1,25 +1,19 @@
//go:build !windows
// +build !windows
package main
import (
"errors"
"net"
"time"
"github.com/jedisct1/dlog"
)
func NetProbe(proxy *Proxy, address string, timeout int) error {
if len(address) <= 0 || timeout == 0 {
func NetProbe(address string, timeout int) error {
if len(address) <= 0 || timeout <= 0 {
return nil
}
if captivePortalHandler, err := ColdStart(proxy); err == nil {
if captivePortalHandler != nil {
defer captivePortalHandler.Stop()
}
} else {
dlog.Critical(err)
}
remoteUDPAddr, err := net.ResolveUDPAddr("udp", address)
if err != nil {
return err
@ -28,7 +22,7 @@ func NetProbe(proxy *Proxy, address string, timeout int) error {
if timeout < 0 {
timeout = MaxTimeout
} else {
timeout = Min(MaxTimeout, timeout)
timeout = Max(MaxTimeout, timeout)
}
for tries := timeout; tries > 0; tries-- {
pc, err := net.DialUDP("udp", nil, remoteUDPAddr)
@ -45,6 +39,7 @@ func NetProbe(proxy *Proxy, address string, timeout int) error {
dlog.Notice("Network connectivity detected")
return nil
}
dlog.Error("Timeout while waiting for network connectivity")
return nil
es := "Timeout while waiting for network connectivity"
dlog.Error(es)
return errors.New(es)
}

View file

@ -1,23 +1,17 @@
package main
import (
"errors"
"net"
"time"
"time"
"github.com/jedisct1/dlog"
)
func NetProbe(proxy *Proxy, address string, timeout int) error {
func NetProbe(address string, timeout int) error {
if len(address) <= 0 || timeout == 0 {
return nil
}
if captivePortalHandler, err := ColdStart(proxy); err == nil {
if captivePortalHandler != nil {
defer captivePortalHandler.Stop()
}
} else {
dlog.Critical(err)
}
remoteUDPAddr, err := net.ResolveUDPAddr("udp", address)
if err != nil {
return err
@ -26,15 +20,14 @@ func NetProbe(proxy *Proxy, address string, timeout int) error {
if timeout < 0 {
timeout = MaxTimeout
} else {
timeout = Min(MaxTimeout, timeout)
timeout = Max(MaxTimeout, timeout)
}
for tries := timeout; tries > 0; tries-- {
pc, err := net.DialUDP("udp", nil, remoteUDPAddr)
if err == nil {
// Write at least 1 byte. This ensures that sockets are ready to use for writing.
// Windows specific: during the system startup, sockets can be created but the underlying buffers may not be
// setup yet. If this is the case Write fails with WSAENOBUFS: "An operation on a socket could not be
// performed because the system lacked sufficient buffer space or because a queue was full"
// Windows specific: during the system startup, sockets can be created but the underlying buffers may not be setup yet. If this is the case
// Write fails with WSAENOBUFS: "An operation on a socket could not be performed because the system lacked sufficient buffer space or because a queue was full"
_, err = pc.Write([]byte{0})
}
if err != nil {
@ -50,6 +43,7 @@ func NetProbe(proxy *Proxy, address string, timeout int) error {
dlog.Notice("Network connectivity detected")
return nil
}
dlog.Error("Timeout while waiting for network connectivity")
return nil
es := "Timeout while waiting for network connectivity"
dlog.Error(es)
return errors.New(es)
}

View file

@ -1,191 +0,0 @@
package main
import (
"crypto/subtle"
"encoding/binary"
"fmt"
"github.com/jedisct1/dlog"
hpkecompact "github.com/jedisct1/go-hpke-compact"
)
const (
odohVersion = uint16(0x0001)
odohTestVersion = uint16(0xff06)
maxODoHConfigs = 10
)
type ODoHTargetConfig struct {
suite *hpkecompact.Suite
keyID []byte
publicKey []byte
}
func encodeLengthValue(b []byte) []byte {
lengthBuffer := make([]byte, 2)
binary.BigEndian.PutUint16(lengthBuffer, uint16(len(b)))
return append(lengthBuffer, b...)
}
func parseODoHTargetConfig(config []byte) (ODoHTargetConfig, error) {
if len(config) < 8 {
return ODoHTargetConfig{}, fmt.Errorf("Malformed config")
}
kemID := binary.BigEndian.Uint16(config[0:2])
kdfID := binary.BigEndian.Uint16(config[2:4])
aeadID := binary.BigEndian.Uint16(config[4:6])
publicKeyLength := binary.BigEndian.Uint16(config[6:8])
publicKey := config[8:]
if len(publicKey) != int(publicKeyLength) {
return ODoHTargetConfig{}, fmt.Errorf("Malformed config")
}
suite, err := hpkecompact.NewSuite(hpkecompact.KemID(kemID), hpkecompact.KdfID(kdfID), hpkecompact.AeadID(aeadID))
if err != nil {
return ODoHTargetConfig{}, err
}
_, _, err = suite.NewClientContext(publicKey, []byte("odoh query"), nil)
if err != nil {
return ODoHTargetConfig{}, err
}
keyID, err := suite.Expand(suite.Extract(config, nil), []byte("odoh key id"), uint16(suite.Hash().Size()))
if err != nil {
return ODoHTargetConfig{}, err
}
return ODoHTargetConfig{
suite: suite,
publicKey: publicKey,
keyID: encodeLengthValue(keyID),
}, nil
}
func parseODoHTargetConfigs(configs []byte) ([]ODoHTargetConfig, error) {
if len(configs) <= 2 {
return nil, fmt.Errorf("Server didn't return any ODoH configurations")
}
length := binary.BigEndian.Uint16(configs)
if len(configs) != int(length)+2 {
return nil, fmt.Errorf("Malformed configs")
}
targets := make([]ODoHTargetConfig, 0)
offset := 2
for {
if offset+4 > len(configs) || len(targets) >= maxODoHConfigs {
break
}
configVersion := binary.BigEndian.Uint16(configs[offset : offset+2])
configLength := binary.BigEndian.Uint16(configs[offset+2 : offset+4])
if configVersion == odohVersion || configVersion == odohTestVersion {
if configVersion != odohVersion {
dlog.Debugf("Server still uses the legacy 0x%x ODoH version", configVersion)
}
target, err := parseODoHTargetConfig(configs[offset+4 : offset+4+int(configLength)])
if err == nil {
targets = append(targets, target)
}
}
offset = offset + int(configLength) + 4
}
return targets, nil
}
type ODoHQuery struct {
suite *hpkecompact.Suite
ctx hpkecompact.ClientContext
odohPlaintext []byte
odohMessage []byte
}
func (t ODoHTargetConfig) encryptQuery(query []byte) (ODoHQuery, error) {
clientCtx, encryptedSharedSecret, err := t.suite.NewClientContext(t.publicKey, []byte("odoh query"), nil)
if err != nil {
return ODoHQuery{}, err
}
odohPlaintext := make([]byte, 4+len(query))
binary.BigEndian.PutUint16(odohPlaintext[0:2], uint16(len(query)))
copy(odohPlaintext[2:], query)
aad := append([]byte{0x01}, t.keyID...)
ciphertext, err := clientCtx.EncryptToServer(odohPlaintext, aad)
if err != nil {
return ODoHQuery{}, err
}
encryptedMessage := encodeLengthValue(append(encryptedSharedSecret, ciphertext...))
odohMessage := append(append([]byte{0x01}, t.keyID...), encryptedMessage...)
return ODoHQuery{
suite: t.suite,
odohPlaintext: odohPlaintext,
odohMessage: odohMessage,
ctx: clientCtx,
}, nil
}
func (q ODoHQuery) decryptResponse(response []byte) ([]byte, error) {
if len(response) < 3 {
return nil, fmt.Errorf("Malformed response")
}
messageType := response[0]
if messageType != uint8(0x02) {
return nil, fmt.Errorf("Malformed response")
}
responseNonceLength := binary.BigEndian.Uint16(response[1:3])
if len(response) < 5+int(responseNonceLength) {
return nil, fmt.Errorf("Malformed response")
}
responseNonceEnc := response[1 : 3+responseNonceLength]
secret, err := q.ctx.Export([]byte("odoh response"), q.suite.KeyBytes)
if err != nil {
return nil, err
}
salt := append(q.odohPlaintext, responseNonceEnc...)
prk := q.suite.Extract(secret, salt)
key, err := q.suite.Expand(prk, []byte("odoh key"), q.suite.KeyBytes)
if err != nil {
return nil, err
}
nonce, err := q.suite.Expand(prk, []byte("odoh nonce"), q.suite.NonceBytes)
if err != nil {
return nil, err
}
cipher, err := q.suite.NewRawCipher(key)
if err != nil {
return nil, err
}
ctLength := binary.BigEndian.Uint16(response[3+int(responseNonceLength) : 5+int(responseNonceLength)])
if int(ctLength) != len(response[5+int(responseNonceLength):]) {
return nil, fmt.Errorf("Malformed response")
}
ct := response[5+int(responseNonceLength):]
aad := response[0 : 3+int(responseNonceLength)]
responsePlaintext, err := cipher.Open(nil, nonce, ct, aad)
if err != nil {
return nil, err
}
responseLength := binary.BigEndian.Uint16(responsePlaintext[0:2])
valid := 1
for i := 4 + int(responseLength); i < len(responsePlaintext); i++ {
valid &= subtle.ConstantTimeByteEq(response[i], 0x00)
}
if valid != 1 {
return nil, fmt.Errorf("Malformed response")
}
return responsePlaintext[2 : 2+int(responseLength)], nil
}

View file

@ -30,7 +30,7 @@ type PatternMatcher struct {
indirectVals map[string]interface{}
}
func NewPatternMatcher() *PatternMatcher {
func NewPatternPatcher() *PatternMatcher {
patternMatcher := PatternMatcher{
blockedPrefixes: critbitgo.NewTrie(),
blockedSuffixes: critbitgo.NewTrie(),
@ -51,7 +51,7 @@ func isGlobCandidate(str string) bool {
return false
}
func (patternMatcher *PatternMatcher) Add(pattern string, val interface{}, position int) error {
func (patternMatcher *PatternMatcher) Add(pattern string, val interface{}, position int) (PatternType, error) {
leadingStar := strings.HasPrefix(pattern, "*")
trailingStar := strings.HasSuffix(pattern, "*")
exact := strings.HasPrefix(pattern, "=")
@ -60,24 +60,24 @@ func (patternMatcher *PatternMatcher) Add(pattern string, val interface{}, posit
patternType = PatternTypePattern
_, err := filepath.Match(pattern, "example.com")
if len(pattern) < 2 || err != nil {
return fmt.Errorf("Syntax error in block rules at pattern %d", position)
return patternType, fmt.Errorf("Syntax error in block rules at pattern %d", position)
}
} else if leadingStar && trailingStar {
patternType = PatternTypeSubstring
if len(pattern) < 3 {
return fmt.Errorf("Syntax error in block rules at pattern %d", position)
return patternType, fmt.Errorf("Syntax error in block rules at pattern %d", position)
}
pattern = pattern[1 : len(pattern)-1]
} else if trailingStar {
patternType = PatternTypePrefix
if len(pattern) < 2 {
return fmt.Errorf("Syntax error in block rules at pattern %d", position)
return patternType, fmt.Errorf("Syntax error in block rules at pattern %d", position)
}
pattern = pattern[:len(pattern)-1]
} else if exact {
patternType = PatternTypeExact
if len(pattern) < 2 {
return fmt.Errorf("Syntax error in block rules at pattern %d", position)
return patternType, fmt.Errorf("Syntax error in block rules at pattern %d", position)
}
pattern = pattern[1:]
} else {
@ -112,7 +112,7 @@ func (patternMatcher *PatternMatcher) Add(pattern string, val interface{}, posit
default:
dlog.Fatal("Unexpected block type")
}
return nil
return patternType, nil
}
func (patternMatcher *PatternMatcher) Eval(qName string) (reject bool, reason string, val interface{}) {
@ -120,13 +120,9 @@ func (patternMatcher *PatternMatcher) Eval(qName string) (reject bool, reason st
return false, "", nil
}
if xval := patternMatcher.blockedExact[qName]; xval != nil {
return true, qName, xval
}
revQname := StringReverse(qName)
if match, xval, found := patternMatcher.blockedSuffixes.LongestPrefix([]byte(revQname)); found {
if len(match) == len(revQname) || revQname[len(match)] == '.' {
if len(match) == len(qName) || revQname[len(match)] == '.' {
return true, "*." + StringReverse(string(match)), xval
}
if len(match) < len(revQname) && len(revQname) > 0 {
@ -157,5 +153,9 @@ func (patternMatcher *PatternMatcher) Eval(qName string) (reject bool, reason st
}
}
if xval := patternMatcher.blockedExact[qName]; xval != nil {
return true, qName, xval
}
return false, "", nil
}

View file

@ -1,7 +0,0 @@
//go:build !unix
package main
func WarnIfMaybeWritableByOtherUsers(p string) {
// No-op
}

View file

@ -1,38 +0,0 @@
//go:build unix
package main
import (
"os"
"path"
"github.com/jedisct1/dlog"
)
func maybeWritableByOtherUsers(p string) (bool, string, error) {
p = path.Clean(p)
for p != "/" && p != "." {
st, err := os.Stat(p)
if err != nil {
return false, p, err
}
mode := st.Mode()
if mode.Perm()&2 != 0 && !(st.IsDir() && mode&os.ModeSticky == os.ModeSticky) {
return true, p, nil
}
p = path.Dir(p)
}
return false, "", nil
}
func WarnIfMaybeWritableByOtherUsers(p string) {
if ok, px, err := maybeWritableByOtherUsers(p); ok {
if px == p {
dlog.Criticalf("[%s] is writable by other system users - If this is not intentional, it is recommended to fix the access permissions", p)
} else {
dlog.Warnf("[%s] can be modified by other system users because [%s] is writable by other users - If this is not intentional, it is recommended to fix the access permissions", p, px)
}
} else if err != nil {
dlog.Warnf("Error while checking if [%s] is accessible: [%s] : [%s]", p, px, err)
}
}

View file

@ -1,29 +0,0 @@
package main
import (
"flag"
"os"
"path/filepath"
"strconv"
"github.com/dchest/safefile"
)
var pidFile = flag.String("pidfile", "", "Store the PID into a file")
func PidFileCreate() error {
if pidFile == nil || len(*pidFile) == 0 {
return nil
}
if err := os.MkdirAll(filepath.Dir(*pidFile), 0o755); err != nil {
return err
}
return safefile.WriteFile(*pidFile, []byte(strconv.Itoa(os.Getpid())), 0o644)
}
func PidFileRemove() error {
if pidFile == nil || len(*pidFile) == 0 {
return nil
}
return os.Remove(*pidFile)
}

View file

@ -1,157 +0,0 @@
package main
import (
"errors"
"fmt"
"io"
"net"
"strings"
"time"
iradix "github.com/hashicorp/go-immutable-radix"
"github.com/jedisct1/dlog"
"github.com/miekg/dns"
)
type PluginAllowedIP struct {
allowedPrefixes *iradix.Tree
allowedIPs map[string]interface{}
logger io.Writer
format string
}
func (plugin *PluginAllowedIP) Name() string {
return "allow_ip"
}
func (plugin *PluginAllowedIP) Description() string {
return "Allows DNS queries containing specific IP addresses"
}
func (plugin *PluginAllowedIP) Init(proxy *Proxy) error {
dlog.Noticef("Loading the set of allowed IP rules from [%s]", proxy.allowedIPFile)
lines, err := ReadTextFile(proxy.allowedIPFile)
if err != nil {
return err
}
plugin.allowedPrefixes = iradix.New()
plugin.allowedIPs = make(map[string]interface{})
for lineNo, line := range strings.Split(lines, "\n") {
line = TrimAndStripInlineComments(line)
if len(line) == 0 {
continue
}
ip := net.ParseIP(line)
trailingStar := strings.HasSuffix(line, "*")
if len(line) < 2 || (ip != nil && trailingStar) {
dlog.Errorf("Suspicious allowed IP rule [%s] at line %d", line, lineNo)
continue
}
if trailingStar {
line = line[:len(line)-1]
}
if strings.HasSuffix(line, ":") || strings.HasSuffix(line, ".") {
line = line[:len(line)-1]
}
if len(line) == 0 {
dlog.Errorf("Empty allowed IP rule at line %d", lineNo)
continue
}
if strings.Contains(line, "*") {
dlog.Errorf("Invalid rule: [%s] - wildcards can only be used as a suffix at line %d", line, lineNo)
continue
}
line = strings.ToLower(line)
if trailingStar {
plugin.allowedPrefixes, _, _ = plugin.allowedPrefixes.Insert([]byte(line), 0)
} else {
plugin.allowedIPs[line] = true
}
}
if len(proxy.allowedIPLogFile) == 0 {
return nil
}
plugin.logger = Logger(proxy.logMaxSize, proxy.logMaxAge, proxy.logMaxBackups, proxy.allowedIPLogFile)
plugin.format = proxy.allowedIPFormat
return nil
}
func (plugin *PluginAllowedIP) Drop() error {
return nil
}
func (plugin *PluginAllowedIP) Reload() error {
return nil
}
func (plugin *PluginAllowedIP) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
answers := msg.Answer
if len(answers) == 0 {
return nil
}
allowed, reason, ipStr := false, "", ""
for _, answer := range answers {
header := answer.Header()
Rrtype := header.Rrtype
if header.Class != dns.ClassINET || (Rrtype != dns.TypeA && Rrtype != dns.TypeAAAA) {
continue
}
if Rrtype == dns.TypeA {
ipStr = answer.(*dns.A).A.String()
} else if Rrtype == dns.TypeAAAA {
ipStr = answer.(*dns.AAAA).AAAA.String() // IPv4-mapped IPv6 addresses are converted to IPv4
}
if _, found := plugin.allowedIPs[ipStr]; found {
allowed, reason = true, ipStr
break
}
match, _, found := plugin.allowedPrefixes.Root().LongestPrefix([]byte(ipStr))
if found {
if len(match) == len(ipStr) || (ipStr[len(match)] == '.' || ipStr[len(match)] == ':') {
allowed, reason = true, string(match)+"*"
break
}
}
}
if allowed {
pluginsState.sessionData["whitelisted"] = true
if plugin.logger != nil {
qName := pluginsState.qName
var clientIPStr string
switch pluginsState.clientProto {
case "udp":
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
case "tcp", "local_doh":
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
default:
// Ignore internal flow.
return nil
}
var line string
if plugin.format == "tsv" {
now := time.Now()
year, month, day := now.Date()
hour, minute, second := now.Clock()
tsStr := fmt.Sprintf("[%d-%02d-%02d %02d:%02d:%02d]", year, int(month), day, hour, minute, second)
line = fmt.Sprintf(
"%s\t%s\t%s\t%s\t%s\n",
tsStr,
clientIPStr,
StringQuote(qName),
StringQuote(ipStr),
StringQuote(reason),
)
} else if plugin.format == "ltsv" {
line = fmt.Sprintf("time:%d\thost:%s\tqname:%s\tip:%s\tmessage:%s\n", time.Now().Unix(), clientIPStr, StringQuote(qName), StringQuote(ipStr), StringQuote(reason))
} else {
dlog.Fatalf("Unexpected log format: [%s]", plugin.format)
}
if plugin.logger == nil {
return errors.New("Log file not initialized")
}
_, _ = plugin.logger.Write([]byte(line))
}
}
return nil
}

View file

@ -3,20 +3,21 @@ package main
import (
"errors"
"fmt"
"io"
"net"
"strings"
"time"
"unicode"
iradix "github.com/hashicorp/go-immutable-radix"
"github.com/hashicorp/go-immutable-radix"
"github.com/jedisct1/dlog"
"github.com/miekg/dns"
"gopkg.in/natefinch/lumberjack.v2"
)
type PluginBlockIP struct {
blockedPrefixes *iradix.Tree
blockedIPs map[string]interface{}
logger io.Writer
logger *lumberjack.Logger
format string
}
@ -30,15 +31,15 @@ func (plugin *PluginBlockIP) Description() string {
func (plugin *PluginBlockIP) Init(proxy *Proxy) error {
dlog.Noticef("Loading the set of IP blocking rules from [%s]", proxy.blockIPFile)
lines, err := ReadTextFile(proxy.blockIPFile)
bin, err := ReadTextFile(proxy.blockIPFile)
if err != nil {
return err
}
plugin.blockedPrefixes = iradix.New()
plugin.blockedIPs = make(map[string]interface{})
for lineNo, line := range strings.Split(lines, "\n") {
line = TrimAndStripInlineComments(line)
if len(line) == 0 {
for lineNo, line := range strings.Split(string(bin), "\n") {
line = strings.TrimFunc(line, unicode.IsSpace)
if len(line) == 0 || strings.HasPrefix(line, "#") {
continue
}
ip := net.ParseIP(line)
@ -71,7 +72,7 @@ func (plugin *PluginBlockIP) Init(proxy *Proxy) error {
if len(proxy.blockIPLogFile) == 0 {
return nil
}
plugin.logger = Logger(proxy.logMaxSize, proxy.logMaxAge, proxy.logMaxBackups, proxy.blockIPLogFile)
plugin.logger = &lumberjack.Logger{LocalTime: true, MaxSize: proxy.logMaxSize, MaxAge: proxy.logMaxAge, MaxBackups: proxy.logMaxBackups, Filename: proxy.blockIPLogFile, Compress: true}
plugin.format = proxy.blockIPFormat
return nil
@ -121,31 +122,27 @@ func (plugin *PluginBlockIP) Eval(pluginsState *PluginsState, msg *dns.Msg) erro
pluginsState.action = PluginsActionReject
pluginsState.returnCode = PluginsReturnCodeReject
if plugin.logger != nil {
qName := pluginsState.qName
var clientIPStr string
switch pluginsState.clientProto {
case "udp":
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
case "tcp", "local_doh":
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
default:
// Ignore internal flow.
questions := msg.Question
if len(questions) != 1 {
return nil
}
qName := strings.ToLower(StripTrailingDot(questions[0].Name))
if len(qName) < 2 {
return nil
}
var clientIPStr string
if pluginsState.clientProto == "udp" {
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
} else {
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
}
var line string
if plugin.format == "tsv" {
now := time.Now()
year, month, day := now.Date()
hour, minute, second := now.Clock()
tsStr := fmt.Sprintf("[%d-%02d-%02d %02d:%02d:%02d]", year, int(month), day, hour, minute, second)
line = fmt.Sprintf(
"%s\t%s\t%s\t%s\t%s\n",
tsStr,
clientIPStr,
StringQuote(qName),
StringQuote(ipStr),
StringQuote(reason),
)
line = fmt.Sprintf("%s\t%s\t%s\t%s\t%s\n", tsStr, clientIPStr, StringQuote(qName), StringQuote(ipStr), StringQuote(reason))
} else if plugin.format == "ltsv" {
line = fmt.Sprintf("time:%d\thost:%s\tqname:%s\tip:%s\tmessage:%s\n", time.Now().Unix(), clientIPStr, StringQuote(qName), StringQuote(ipStr), StringQuote(reason))
} else {
@ -154,7 +151,7 @@ func (plugin *PluginBlockIP) Eval(pluginsState *PluginsState, msg *dns.Msg) erro
if plugin.logger == nil {
return errors.New("Log file not initialized")
}
_, _ = plugin.logger.Write([]byte(line))
plugin.logger.Write([]byte(line))
}
}
return nil

View file

@ -29,18 +29,23 @@ func (plugin *PluginBlockIPv6) Reload() error {
}
func (plugin *PluginBlockIPv6) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
question := msg.Question[0]
questions := msg.Question
if len(questions) != 1 {
return nil
}
question := questions[0]
if question.Qclass != dns.ClassINET || question.Qtype != dns.TypeAAAA {
return nil
}
synth := EmptyResponseFromMessage(msg)
hinfo := new(dns.HINFO)
hinfo.Hdr = dns.RR_Header{
Name: question.Name, Rrtype: dns.TypeHINFO,
Class: dns.ClassINET, Ttl: 86400,
synth, err := EmptyResponseFromMessage(msg)
if err != nil {
return err
}
hinfo := new(dns.HINFO)
hinfo.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypeHINFO,
Class: dns.ClassINET, Ttl: 86400}
hinfo.Cpu = "AAAA queries have been locally blocked by dnscrypt-proxy"
hinfo.Os = "Set block_ipv6 to false to disable that feature"
hinfo.Os = "Set block_ipv6 to false to disable this feature"
synth.Answer = []dns.RR{hinfo}
qName := question.Name
i := strings.Index(qName, ".")
@ -56,10 +61,8 @@ func (plugin *PluginBlockIPv6) Eval(pluginsState *PluginsState, msg *dns.Msg) er
soa.Minttl = 2400
soa.Expire = 604800
soa.Retry = 300
soa.Hdr = dns.RR_Header{
Name: parentZone, Rrtype: dns.TypeSOA,
Class: dns.ClassINET, Ttl: 60,
}
soa.Hdr = dns.RR_Header{Name: parentZone, Rrtype: dns.TypeSOA,
Class: dns.ClassINET, Ttl: 60}
synth.Ns = []dns.RR{soa}
pluginsState.synthResponse = synth
pluginsState.action = PluginsActionSynth

View file

@ -3,80 +3,23 @@ package main
import (
"errors"
"fmt"
"io"
"net"
"strings"
"time"
"unicode"
"github.com/jedisct1/dlog"
"github.com/miekg/dns"
lumberjack "gopkg.in/natefinch/lumberjack.v2"
)
type BlockedNames struct {
type PluginBlockName struct {
allWeeklyRanges *map[string]WeeklyRanges
patternMatcher *PatternMatcher
logger io.Writer
logger *lumberjack.Logger
format string
}
const aliasesLimit = 8
var blockedNames *BlockedNames
func (blockedNames *BlockedNames) check(pluginsState *PluginsState, qName string, aliasFor *string) (bool, error) {
reject, reason, xweeklyRanges := blockedNames.patternMatcher.Eval(qName)
if aliasFor != nil {
reason = reason + " (alias for [" + *aliasFor + "])"
}
var weeklyRanges *WeeklyRanges
if xweeklyRanges != nil {
weeklyRanges = xweeklyRanges.(*WeeklyRanges)
}
if reject {
if weeklyRanges != nil && !weeklyRanges.Match() {
reject = false
}
}
if !reject {
return false, nil
}
pluginsState.action = PluginsActionReject
pluginsState.returnCode = PluginsReturnCodeReject
if blockedNames.logger != nil {
var clientIPStr string
switch pluginsState.clientProto {
case "udp":
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
case "tcp", "local_doh":
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
default:
// Ignore internal flow.
return false, nil
}
var line string
if blockedNames.format == "tsv" {
now := time.Now()
year, month, day := now.Date()
hour, minute, second := now.Clock()
tsStr := fmt.Sprintf("[%d-%02d-%02d %02d:%02d:%02d]", year, int(month), day, hour, minute, second)
line = fmt.Sprintf("%s\t%s\t%s\t%s\n", tsStr, clientIPStr, StringQuote(qName), StringQuote(reason))
} else if blockedNames.format == "ltsv" {
line = fmt.Sprintf("time:%d\thost:%s\tqname:%s\tmessage:%s\n", time.Now().Unix(), clientIPStr, StringQuote(qName), StringQuote(reason))
} else {
dlog.Fatalf("Unexpected log format: [%s]", blockedNames.format)
}
if blockedNames.logger == nil {
return false, errors.New("Log file not initialized")
}
_, _ = blockedNames.logger.Write([]byte(line))
}
return true, nil
}
// ---
type PluginBlockName struct{}
func (plugin *PluginBlockName) Name() string {
return "block_name"
}
@ -87,48 +30,45 @@ func (plugin *PluginBlockName) Description() string {
func (plugin *PluginBlockName) Init(proxy *Proxy) error {
dlog.Noticef("Loading the set of blocking rules from [%s]", proxy.blockNameFile)
lines, err := ReadTextFile(proxy.blockNameFile)
bin, err := ReadTextFile(proxy.blockNameFile)
if err != nil {
return err
}
xBlockedNames := BlockedNames{
allWeeklyRanges: proxy.allWeeklyRanges,
patternMatcher: NewPatternMatcher(),
}
for lineNo, line := range strings.Split(lines, "\n") {
line = TrimAndStripInlineComments(line)
if len(line) == 0 {
plugin.allWeeklyRanges = proxy.allWeeklyRanges
plugin.patternMatcher = NewPatternPatcher()
for lineNo, line := range strings.Split(string(bin), "\n") {
line = strings.TrimFunc(line, unicode.IsSpace)
if len(line) == 0 || strings.HasPrefix(line, "#") {
continue
}
parts := strings.Split(line, "@")
timeRangeName := ""
if len(parts) == 2 {
line = strings.TrimSpace(parts[0])
timeRangeName = strings.TrimSpace(parts[1])
line = strings.TrimFunc(parts[0], unicode.IsSpace)
timeRangeName = strings.TrimFunc(parts[1], unicode.IsSpace)
} else if len(parts) > 2 {
dlog.Errorf("Syntax error in block rules at line %d -- Unexpected @ character", 1+lineNo)
continue
}
var weeklyRanges *WeeklyRanges
if len(timeRangeName) > 0 {
weeklyRangesX, ok := (*xBlockedNames.allWeeklyRanges)[timeRangeName]
weeklyRangesX, ok := (*plugin.allWeeklyRanges)[timeRangeName]
if !ok {
dlog.Errorf("Time range [%s] not found at line %d", timeRangeName, 1+lineNo)
} else {
weeklyRanges = &weeklyRangesX
}
}
if err := xBlockedNames.patternMatcher.Add(line, weeklyRanges, lineNo+1); err != nil {
if _, err := plugin.patternMatcher.Add(line, weeklyRanges, lineNo+1); err != nil {
dlog.Error(err)
continue
}
}
blockedNames = &xBlockedNames
if len(proxy.blockNameLogFile) == 0 {
return nil
}
blockedNames.logger = Logger(proxy.logMaxSize, proxy.logMaxAge, proxy.logMaxBackups, proxy.blockNameLogFile)
blockedNames.format = proxy.blockNameFormat
plugin.logger = &lumberjack.Logger{LocalTime: true, MaxSize: proxy.logMaxSize, MaxAge: proxy.logMaxAge, MaxBackups: proxy.logMaxBackups, Filename: proxy.blockNameLogFile, Compress: true}
plugin.format = proxy.blockNameFormat
return nil
}
@ -142,69 +82,50 @@ func (plugin *PluginBlockName) Reload() error {
}
func (plugin *PluginBlockName) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
if blockedNames == nil || pluginsState.sessionData["whitelisted"] != nil {
if pluginsState.sessionData["whitelisted"] != nil {
return nil
}
_, err := blockedNames.check(pluginsState, pluginsState.qName, nil)
return err
}
// ---
type PluginBlockNameResponse struct{}
func (plugin *PluginBlockNameResponse) Name() string {
return "block_name"
}
func (plugin *PluginBlockNameResponse) Description() string {
return "Block DNS responses matching name patterns"
}
func (plugin *PluginBlockNameResponse) Init(proxy *Proxy) error {
return nil
}
func (plugin *PluginBlockNameResponse) Drop() error {
return nil
}
func (plugin *PluginBlockNameResponse) Reload() error {
return nil
}
func (plugin *PluginBlockNameResponse) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
if blockedNames == nil || pluginsState.sessionData["whitelisted"] != nil {
questions := msg.Question
if len(questions) != 1 {
return nil
}
aliasFor := pluginsState.qName
aliasesLeft := aliasesLimit
answers := msg.Answer
for _, answer := range answers {
header := answer.Header()
if header.Class != dns.ClassINET {
continue
qName := strings.ToLower(StripTrailingDot(questions[0].Name))
reject, reason, xweeklyRanges := plugin.patternMatcher.Eval(qName)
var weeklyRanges *WeeklyRanges
if xweeklyRanges != nil {
weeklyRanges = xweeklyRanges.(*WeeklyRanges)
}
if reject {
if weeklyRanges != nil && !weeklyRanges.Match() {
reject = false
}
var target string
if header.Rrtype == dns.TypeCNAME {
target = answer.(*dns.CNAME).Target
} else if header.Rrtype == dns.TypeSVCB && answer.(*dns.SVCB).Priority == 0 {
target = answer.(*dns.SVCB).Target
} else if header.Rrtype == dns.TypeHTTPS && answer.(*dns.HTTPS).Priority == 0 {
target = answer.(*dns.HTTPS).Target
} else {
continue
}
target, err := NormalizeQName(target)
if err != nil {
return err
}
if blocked, err := blockedNames.check(pluginsState, target, &aliasFor); blocked || err != nil {
return err
}
aliasesLeft--
if aliasesLeft == 0 {
break
}
if reject {
pluginsState.action = PluginsActionReject
pluginsState.returnCode = PluginsReturnCodeReject
if plugin.logger != nil {
var clientIPStr string
if pluginsState.clientProto == "udp" {
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
} else {
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
}
var line string
if plugin.format == "tsv" {
now := time.Now()
year, month, day := now.Date()
hour, minute, second := now.Clock()
tsStr := fmt.Sprintf("[%d-%02d-%02d %02d:%02d:%02d]", year, int(month), day, hour, minute, second)
line = fmt.Sprintf("%s\t%s\t%s\t%s\n", tsStr, clientIPStr, StringQuote(qName), StringQuote(reason))
} else if plugin.format == "ltsv" {
line = fmt.Sprintf("time:%d\thost:%s\tqname:%s\tmessage:%s\n", time.Now().Unix(), clientIPStr, StringQuote(qName), StringQuote(reason))
} else {
dlog.Fatalf("Unexpected log format: [%s]", plugin.format)
}
if plugin.logger == nil {
return errors.New("Log file not initialized")
}
plugin.logger.Write([]byte(line))
}
}
return nil

View file

@ -1,202 +0,0 @@
package main
import (
"github.com/k-sone/critbitgo"
"github.com/miekg/dns"
)
var undelegatedSet = []string{
"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa",
"0.in-addr.arpa",
"1",
"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa",
"10.in-addr.arpa",
"100.100.in-addr.arpa",
"100.51.198.in-addr.arpa",
"101.100.in-addr.arpa",
"102.100.in-addr.arpa",
"103.100.in-addr.arpa",
"104.100.in-addr.arpa",
"105.100.in-addr.arpa",
"106.100.in-addr.arpa",
"107.100.in-addr.arpa",
"108.100.in-addr.arpa",
"109.100.in-addr.arpa",
"110.100.in-addr.arpa",
"111.100.in-addr.arpa",
"112.100.in-addr.arpa",
"113.0.203.in-addr.arpa",
"113.100.in-addr.arpa",
"114.100.in-addr.arpa",
"115.100.in-addr.arpa",
"116.100.in-addr.arpa",
"117.100.in-addr.arpa",
"118.100.in-addr.arpa",
"119.100.in-addr.arpa",
"120.100.in-addr.arpa",
"121.100.in-addr.arpa",
"122.100.in-addr.arpa",
"123.100.in-addr.arpa",
"124.100.in-addr.arpa",
"125.100.in-addr.arpa",
"126.100.in-addr.arpa",
"127.100.in-addr.arpa",
"127.in-addr.arpa",
"16.172.in-addr.arpa",
"168.192.in-addr.arpa",
"17.172.in-addr.arpa",
"18.172.in-addr.arpa",
"19.172.in-addr.arpa",
"2.0.192.in-addr.arpa",
"20.172.in-addr.arpa",
"21.172.in-addr.arpa",
"22.172.in-addr.arpa",
"23.172.in-addr.arpa",
"24.172.in-addr.arpa",
"25.172.in-addr.arpa",
"254.169.in-addr.arpa",
"255.255.255.255.in-addr.arpa",
"26.172.in-addr.arpa",
"27.172.in-addr.arpa",
"28.172.in-addr.arpa",
"29.172.in-addr.arpa",
"30.172.in-addr.arpa",
"31.172.in-addr.arpa",
"64.100.in-addr.arpa",
"65.100.in-addr.arpa",
"66.100.in-addr.arpa",
"67.100.in-addr.arpa",
"68.100.in-addr.arpa",
"69.100.in-addr.arpa",
"70.100.in-addr.arpa",
"71.100.in-addr.arpa",
"72.100.in-addr.arpa",
"73.100.in-addr.arpa",
"74.100.in-addr.arpa",
"75.100.in-addr.arpa",
"76.100.in-addr.arpa",
"77.100.in-addr.arpa",
"78.100.in-addr.arpa",
"79.100.in-addr.arpa",
"8.b.d.0.1.0.0.2.ip6.arpa",
"8.e.f.ip6.arpa",
"80.100.in-addr.arpa",
"81.100.in-addr.arpa",
"82.100.in-addr.arpa",
"83.100.in-addr.arpa",
"84.100.in-addr.arpa",
"85.100.in-addr.arpa",
"86.100.in-addr.arpa",
"87.100.in-addr.arpa",
"88.100.in-addr.arpa",
"89.100.in-addr.arpa",
"9.e.f.ip6.arpa",
"90.100.in-addr.arpa",
"91.100.in-addr.arpa",
"92.100.in-addr.arpa",
"93.100.in-addr.arpa",
"94.100.in-addr.arpa",
"95.100.in-addr.arpa",
"96.100.in-addr.arpa",
"97.100.in-addr.arpa",
"98.100.in-addr.arpa",
"99.100.in-addr.arpa",
"a.e.f.ip6.arpa",
"airdream",
"api",
"b.e.f.ip6.arpa",
"bbrouter",
"belkin",
"bind",
"blinkap",
"corp",
"d.f.ip6.arpa",
"davolink",
"dearmyrouter",
"dhcp",
"dlink",
"domain",
"envoy",
"example",
"f.f.ip6.arpa",
"fritz.box",
"grp",
"gw==",
"home",
"home.arpa",
"hub",
"internal",
"intra",
"intranet",
"invalid",
"ksyun",
"lan",
"loc",
"local",
"localdomain",
"localhost",
"localnet",
"mail",
"modem",
"mynet",
"myrouter",
"novalocal",
"onion",
"openstacklocal",
"priv",
"private",
"prv",
"router",
"telus",
"test",
"totolink",
"wlan_ap",
"workgroup",
"zghjccbob3n0",
}
type PluginBlockUndelegated struct {
suffixes *critbitgo.Trie
}
func (plugin *PluginBlockUndelegated) Name() string {
return "block_undelegated"
}
func (plugin *PluginBlockUndelegated) Description() string {
return "Block undelegated DNS names"
}
func (plugin *PluginBlockUndelegated) Init(proxy *Proxy) error {
suffixes := critbitgo.NewTrie()
for _, line := range undelegatedSet {
pattern := StringReverse(line)
suffixes.Insert([]byte(pattern), true)
}
plugin.suffixes = suffixes
return nil
}
func (plugin *PluginBlockUndelegated) Drop() error {
return nil
}
func (plugin *PluginBlockUndelegated) Reload() error {
return nil
}
func (plugin *PluginBlockUndelegated) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
revQname := StringReverse(pluginsState.qName)
match, _, found := plugin.suffixes.LongestPrefix([]byte(revQname))
if !found {
return nil
}
if len(match) == len(revQname) || revQname[len(match)] == '.' {
synth := EmptyResponseFromMessage(msg)
synth.Rcode = dns.RcodeNameError
pluginsState.synthResponse = synth
pluginsState.action = PluginsActionSynth
pluginsState.returnCode = PluginsReturnCodeSynth
}
return nil
}

View file

@ -1,46 +0,0 @@
package main
import (
"strings"
"github.com/miekg/dns"
)
type PluginBlockUnqualified struct{}
func (plugin *PluginBlockUnqualified) Name() string {
return "block_unqualified"
}
func (plugin *PluginBlockUnqualified) Description() string {
return "Block unqualified DNS names"
}
func (plugin *PluginBlockUnqualified) Init(proxy *Proxy) error {
return nil
}
func (plugin *PluginBlockUnqualified) Drop() error {
return nil
}
func (plugin *PluginBlockUnqualified) Reload() error {
return nil
}
func (plugin *PluginBlockUnqualified) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
question := msg.Question[0]
if question.Qclass != dns.ClassINET || (question.Qtype != dns.TypeA && question.Qtype != dns.TypeAAAA) {
return nil
}
if strings.IndexByte(pluginsState.qName, '.') >= 0 {
return nil
}
synth := EmptyResponseFromMessage(msg)
synth.Rcode = dns.RcodeNameError
pluginsState.synthResponse = synth
pluginsState.action = PluginsActionSynth
pluginsState.returnCode = PluginsReturnCodeSynth
return nil
}

View file

@ -3,15 +3,14 @@ package main
import (
"crypto/sha512"
"encoding/binary"
"errors"
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
"github.com/miekg/dns"
sieve "github.com/opencoff/go-sieve"
)
const StaleResponseTTL = 30 * time.Second
type CachedResponse struct {
expiration time.Time
msg dns.Msg
@ -19,95 +18,15 @@ type CachedResponse struct {
type CachedResponses struct {
sync.RWMutex
cache *sieve.Sieve[[32]byte, CachedResponse]
cache *lru.ARCCache
}
var cachedResponses CachedResponses
func computeCacheKey(pluginsState *PluginsState, msg *dns.Msg) [32]byte {
question := msg.Question[0]
h := sha512.New512_256()
var tmp [5]byte
binary.LittleEndian.PutUint16(tmp[0:2], question.Qtype)
binary.LittleEndian.PutUint16(tmp[2:4], question.Qclass)
if pluginsState.dnssec {
tmp[4] = 1
}
h.Write(tmp[:])
normalizedRawQName := []byte(question.Name)
NormalizeRawQName(&normalizedRawQName)
h.Write(normalizedRawQName)
var sum [32]byte
h.Sum(sum[:0])
return sum
type PluginCacheResponse struct {
cachedResponses *CachedResponses
}
// ---
type PluginCache struct{}
func (plugin *PluginCache) Name() string {
return "cache"
}
func (plugin *PluginCache) Description() string {
return "DNS cache (reader)."
}
func (plugin *PluginCache) Init(proxy *Proxy) error {
return nil
}
func (plugin *PluginCache) Drop() error {
return nil
}
func (plugin *PluginCache) Reload() error {
return nil
}
func (plugin *PluginCache) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
cacheKey := computeCacheKey(pluginsState, msg)
cachedResponses.RLock()
if cachedResponses.cache == nil {
cachedResponses.RUnlock()
return nil
}
cached, ok := cachedResponses.cache.Get(cacheKey)
if !ok {
cachedResponses.RUnlock()
return nil
}
expiration := cached.expiration
synth := cached.msg.Copy()
cachedResponses.RUnlock()
synth.Id = msg.Id
synth.Response = true
synth.Compress = true
synth.Question = msg.Question
if time.Now().After(expiration) {
expiration2 := time.Now().Add(StaleResponseTTL)
updateTTL(synth, expiration2)
pluginsState.sessionData["stale"] = synth
return nil
}
updateTTL(synth, expiration)
pluginsState.synthResponse = synth
pluginsState.action = PluginsActionSynth
pluginsState.cacheHit = true
return nil
}
// ---
type PluginCacheResponse struct{}
func (plugin *PluginCacheResponse) Name() string {
return "cache_response"
}
@ -129,36 +48,112 @@ func (plugin *PluginCacheResponse) Reload() error {
}
func (plugin *PluginCacheResponse) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
plugin.cachedResponses = &cachedResponses
if msg.Rcode != dns.RcodeSuccess && msg.Rcode != dns.RcodeNameError && msg.Rcode != dns.RcodeNotAuth {
return nil
}
if msg.Truncated {
return nil
}
cacheKey := computeCacheKey(pluginsState, msg)
ttl := getMinTTL(
msg,
pluginsState.cacheMinTTL,
pluginsState.cacheMaxTTL,
pluginsState.cacheNegMinTTL,
pluginsState.cacheNegMaxTTL,
)
cacheKey, err := computeCacheKey(pluginsState, msg)
if err != nil {
return err
}
ttl := getMinTTL(msg, pluginsState.cacheMinTTL, pluginsState.cacheMaxTTL, pluginsState.cacheNegMinTTL, pluginsState.cacheNegMaxTTL)
cachedResponse := CachedResponse{
expiration: time.Now().Add(ttl),
msg: *msg,
}
cachedResponses.Lock()
if cachedResponses.cache == nil {
var err error
cachedResponses.cache = sieve.New[[32]byte, CachedResponse](pluginsState.cacheSize)
if cachedResponses.cache == nil {
cachedResponses.Unlock()
plugin.cachedResponses.Lock()
defer plugin.cachedResponses.Unlock()
if plugin.cachedResponses.cache == nil {
plugin.cachedResponses.cache, err = lru.NewARC(pluginsState.cacheSize)
if err != nil {
return err
}
}
cachedResponses.cache.Add(cacheKey, cachedResponse)
cachedResponses.Unlock()
plugin.cachedResponses.cache.Add(cacheKey, cachedResponse)
updateTTL(msg, cachedResponse.expiration)
return nil
}
type PluginCache struct {
cachedResponses *CachedResponses
}
func (plugin *PluginCache) Name() string {
return "cache"
}
func (plugin *PluginCache) Description() string {
return "DNS cache (reader)."
}
func (plugin *PluginCache) Init(proxy *Proxy) error {
return nil
}
func (plugin *PluginCache) Drop() error {
return nil
}
func (plugin *PluginCache) Reload() error {
return nil
}
func (plugin *PluginCache) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
plugin.cachedResponses = &cachedResponses
cacheKey, err := computeCacheKey(pluginsState, msg)
if err != nil {
return nil
}
plugin.cachedResponses.RLock()
defer plugin.cachedResponses.RUnlock()
if plugin.cachedResponses.cache == nil {
return nil
}
cachedAny, ok := plugin.cachedResponses.cache.Get(cacheKey)
if !ok {
return nil
}
cached := cachedAny.(CachedResponse)
if time.Now().After(cached.expiration) {
return nil
}
updateTTL(&cached.msg, cached.expiration)
synth := cached.msg
synth.Id = msg.Id
synth.Response = true
synth.Compress = true
synth.Question = msg.Question
pluginsState.synthResponse = &synth
pluginsState.action = PluginsActionSynth
pluginsState.cacheHit = true
return nil
}
func computeCacheKey(pluginsState *PluginsState, msg *dns.Msg) ([32]byte, error) {
questions := msg.Question
if len(questions) != 1 {
return [32]byte{}, errors.New("No question present")
}
question := questions[0]
h := sha512.New512_256()
var tmp [5]byte
binary.LittleEndian.PutUint16(tmp[0:2], question.Qtype)
binary.LittleEndian.PutUint16(tmp[2:4], question.Qclass)
if pluginsState.dnssec {
tmp[4] = 1
}
h.Write(tmp[:])
normalizedName := []byte(question.Name)
NormalizeName(&normalizedName)
h.Write(normalizedName)
var sum [32]byte
h.Sum(sum[:0])
return sum, nil
}

View file

@ -1,44 +0,0 @@
package main
import (
"github.com/jedisct1/dlog"
"github.com/miekg/dns"
)
type PluginCaptivePortal struct {
captivePortalMap *CaptivePortalMap
}
func (plugin *PluginCaptivePortal) Name() string {
return "captive portal handlers"
}
func (plugin *PluginCaptivePortal) Description() string {
return "Handle test queries operating systems make to detect Wi-Fi captive portal"
}
func (plugin *PluginCaptivePortal) Init(proxy *Proxy) error {
plugin.captivePortalMap = proxy.captivePortalMap
dlog.Notice("Captive portals handler enabled")
return nil
}
func (plugin *PluginCaptivePortal) Drop() error {
return nil
}
func (plugin *PluginCaptivePortal) Reload() error {
return nil
}
func (plugin *PluginCaptivePortal) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
question, ips := plugin.captivePortalMap.GetEntry(msg)
if ips == nil {
return nil
}
if synth := HandleCaptivePortalQuery(msg, question, ips); synth != nil {
pluginsState.synthResponse = synth
pluginsState.action = PluginsActionSynth
}
return nil
}

View file

@ -19,14 +19,12 @@ type CloakedName struct {
lastUpdate *time.Time
lineNo int
isIP bool
PTR []string
}
type PluginCloak struct {
sync.RWMutex
patternMatcher *PatternMatcher
ttl uint32
createPTR bool
}
func (plugin *PluginCloak) Name() string {
@ -39,24 +37,23 @@ func (plugin *PluginCloak) Description() string {
func (plugin *PluginCloak) Init(proxy *Proxy) error {
dlog.Noticef("Loading the set of cloaking rules from [%s]", proxy.cloakFile)
lines, err := ReadTextFile(proxy.cloakFile)
bin, err := ReadTextFile(proxy.cloakFile)
if err != nil {
return err
}
plugin.ttl = proxy.cloakTTL
plugin.createPTR = proxy.cloakedPTR
plugin.patternMatcher = NewPatternMatcher()
plugin.ttl = proxy.cacheMinTTL
plugin.patternMatcher = NewPatternPatcher()
cloakedNames := make(map[string]*CloakedName)
for lineNo, line := range strings.Split(lines, "\n") {
line = TrimAndStripInlineComments(line)
if len(line) == 0 {
for lineNo, line := range strings.Split(string(bin), "\n") {
line = strings.TrimFunc(line, unicode.IsSpace)
if len(line) == 0 || strings.HasPrefix(line, "#") {
continue
}
var target string
parts := strings.FieldsFunc(line, unicode.IsSpace)
if len(parts) == 2 {
line = strings.TrimSpace(parts[0])
target = strings.TrimSpace(parts[1])
line = strings.TrimFunc(parts[0], unicode.IsSpace)
target = strings.TrimFunc(parts[1], unicode.IsSpace)
} else if len(parts) > 2 {
dlog.Errorf("Syntax error in cloaking rules at line %d -- Unexpected space character", 1+lineNo)
continue
@ -70,12 +67,11 @@ func (plugin *PluginCloak) Init(proxy *Proxy) error {
if !found {
cloakedName = &CloakedName{}
}
ip := net.ParseIP(target)
if ip != nil {
if ip := net.ParseIP(target); ip != nil {
if ipv4 := ip.To4(); ipv4 != nil {
cloakedName.ipv4 = append(cloakedName.ipv4, ipv4)
cloakedName.ipv4 = append((*cloakedName).ipv4, ipv4)
} else if ipv6 := ip.To16(); ipv6 != nil {
cloakedName.ipv6 = append(cloakedName.ipv6, ipv6)
cloakedName.ipv6 = append((*cloakedName).ipv6, ipv6)
} else {
dlog.Errorf("Invalid IP address in cloaking rule at line %d", 1+lineNo)
continue
@ -86,46 +82,13 @@ func (plugin *PluginCloak) Init(proxy *Proxy) error {
}
cloakedName.lineNo = lineNo + 1
cloakedNames[line] = cloakedName
if !plugin.createPTR || strings.Contains(line, "*") || !cloakedName.isIP {
continue
}
var ptrLine string
if ipv4 := ip.To4(); ipv4 != nil {
reversed, _ := dns.ReverseAddr(ip.To4().String())
ptrLine = strings.TrimSuffix(reversed, ".")
} else {
reversed, _ := dns.ReverseAddr(cloakedName.ipv6[0].To16().String())
ptrLine = strings.TrimSuffix(reversed, ".")
}
ptrQueryLine := ptrEntryToQuery(ptrLine)
ptrCloakedName, found := cloakedNames[ptrQueryLine]
if !found {
ptrCloakedName = &CloakedName{}
}
ptrCloakedName.isIP = true
ptrCloakedName.PTR = append((*ptrCloakedName).PTR, ptrNameToFQDN(line))
ptrCloakedName.lineNo = lineNo + 1
cloakedNames[ptrQueryLine] = ptrCloakedName
}
for line, cloakedName := range cloakedNames {
if err := plugin.patternMatcher.Add(line, cloakedName, cloakedName.lineNo); err != nil {
return err
}
plugin.patternMatcher.Add(line, cloakedName, cloakedName.lineNo)
}
return nil
}
func ptrEntryToQuery(ptrEntry string) string {
return "=" + ptrEntry
}
func ptrNameToFQDN(ptrLine string) string {
ptrLine = strings.TrimPrefix(ptrLine, "=")
return ptrLine + "."
}
func (plugin *PluginCloak) Drop() error {
return nil
}
@ -135,23 +98,25 @@ func (plugin *PluginCloak) Reload() error {
}
func (plugin *PluginCloak) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
question := msg.Question[0]
if question.Qclass != dns.ClassINET || question.Qtype == dns.TypeNS || question.Qtype == dns.TypeSOA {
questions := msg.Question
if len(questions) != 1 {
return nil
}
question := questions[0]
if question.Qclass != dns.ClassINET || (question.Qtype != dns.TypeA && question.Qtype != dns.TypeAAAA) {
return nil
}
qName := strings.ToLower(StripTrailingDot(questions[0].Name))
if len(qName) < 2 {
return nil
}
now := time.Now()
plugin.RLock()
_, _, xcloakedName := plugin.patternMatcher.Eval(pluginsState.qName)
_, _, xcloakedName := plugin.patternMatcher.Eval(qName)
if xcloakedName == nil {
plugin.RUnlock()
return nil
}
if question.Qtype != dns.TypeA && question.Qtype != dns.TypeAAAA && question.Qtype != dns.TypePTR {
plugin.RUnlock()
pluginsState.action = PluginsActionReject
pluginsState.returnCode = PluginsReturnCodeCloak
return nil
}
cloakedName := xcloakedName.(*CloakedName)
ttl, expired := plugin.ttl, false
if cloakedName.lastUpdate != nil {
@ -188,35 +153,36 @@ func (plugin *PluginCloak) Eval(pluginsState *PluginsState, msg *dns.Msg) error
plugin.Unlock()
plugin.RLock()
}
plugin.RUnlock()
synth := EmptyResponseFromMessage(msg)
synth.Answer = []dns.RR{}
var ip *net.IP
if question.Qtype == dns.TypeA {
for _, ip := range cloakedName.ipv4 {
rr := new(dns.A)
rr.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: ttl}
rr.A = ip
synth.Answer = append(synth.Answer, rr)
ipLen := len(cloakedName.ipv4)
if ipLen > 0 {
ip = &cloakedName.ipv4[rand.Intn(ipLen)]
}
} else if question.Qtype == dns.TypeAAAA {
for _, ip := range cloakedName.ipv6 {
rr := new(dns.AAAA)
rr.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: ttl}
rr.AAAA = ip
synth.Answer = append(synth.Answer, rr)
}
} else if question.Qtype == dns.TypePTR {
for _, ptr := range cloakedName.PTR {
rr := new(dns.PTR)
rr.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: ttl}
rr.Ptr = ptr
synth.Answer = append(synth.Answer, rr)
} else {
ipLen := len(cloakedName.ipv6)
if ipLen > 0 {
ip = &cloakedName.ipv6[rand.Intn(ipLen)]
}
}
rand.Shuffle(
len(synth.Answer),
func(i, j int) { synth.Answer[i], synth.Answer[j] = synth.Answer[j], synth.Answer[i] },
)
plugin.RUnlock()
synth, err := EmptyResponseFromMessage(msg)
if err != nil {
return err
}
if ip == nil {
synth.Answer = []dns.RR{}
} else if question.Qtype == dns.TypeA {
rr := new(dns.A)
rr.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: ttl}
rr.A = *ip
synth.Answer = []dns.RR{rr}
} else {
rr := new(dns.AAAA)
rr.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: ttl}
rr.AAAA = *ip
synth.Answer = []dns.RR{rr}
}
pluginsState.synthResponse = synth
pluginsState.action = PluginsActionSynth
pluginsState.returnCode = PluginsReturnCodeCloak

View file

@ -1,265 +0,0 @@
package main
import (
"errors"
"net"
"sync"
"time"
"github.com/jedisct1/dlog"
"github.com/miekg/dns"
)
const rfc7050WKN = "ipv4only.arpa."
var (
rfc7050WKA1 = net.IPv4(192, 0, 0, 170)
rfc7050WKA2 = net.IPv4(192, 0, 0, 171)
)
type PluginDNS64 struct {
pref64Mutex *sync.RWMutex
pref64 []*net.IPNet
dns64Resolvers []string
ipv4Resolver string
proxy *Proxy
}
func (plugin *PluginDNS64) Name() string {
return "dns64"
}
func (plugin *PluginDNS64) Description() string {
return "Synthesize DNS64 AAAA responses"
}
func (plugin *PluginDNS64) Init(proxy *Proxy) error {
if len(proxy.listenAddresses) == 0 {
return errors.New("At least one listening IP address must be configured for the DNS64 plugin to work")
}
plugin.ipv4Resolver = proxy.listenAddresses[0] // query is sent to ourselves
plugin.pref64Mutex = new(sync.RWMutex)
plugin.proxy = proxy
if len(proxy.dns64Prefixes) != 0 {
plugin.pref64Mutex.Lock()
defer plugin.pref64Mutex.Unlock()
for _, prefStr := range proxy.dns64Prefixes {
_, pref, err := net.ParseCIDR(prefStr)
if err != nil {
return err
}
dlog.Noticef("Registered DNS64 prefix [%s]", pref.String())
plugin.pref64 = append(plugin.pref64, pref)
}
} else if len(proxy.dns64Resolvers) != 0 {
plugin.dns64Resolvers = proxy.dns64Resolvers
if err := plugin.refreshPref64(); err != nil {
return err
}
} else {
return nil
}
dlog.Notice("DNS64 map enabled")
return nil
}
func (plugin *PluginDNS64) Drop() error {
return nil
}
func (plugin *PluginDNS64) Reload() error {
return nil
}
func (plugin *PluginDNS64) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
if hasAAAAAnswer(msg) {
return nil
}
question := pluginsState.questionMsg.Question[0]
if question.Qclass != dns.ClassINET || question.Qtype != dns.TypeAAAA {
return nil
}
msgA := pluginsState.questionMsg.Copy()
msgA.SetQuestion(question.Name, dns.TypeA)
msgAPacket, err := msgA.Pack()
if err != nil {
return err
}
if !plugin.proxy.clientsCountInc() {
return errors.New("Too many concurrent connections to handle DNS64 subqueries")
}
respPacket := plugin.proxy.processIncomingQuery(
"trampoline",
plugin.proxy.mainProto,
msgAPacket,
nil,
nil,
time.Now(),
false,
)
plugin.proxy.clientsCountDec()
resp := dns.Msg{}
if err := resp.Unpack(respPacket); err != nil {
return err
}
if resp.Rcode != dns.RcodeSuccess {
return nil
}
if len(resp.Answer) == 0 {
return nil
}
initialTTL := uint32(600)
for _, ns := range resp.Ns {
header := ns.Header()
if header.Rrtype == dns.TypeSOA {
initialTTL = header.Ttl
}
}
synth64 := make([]dns.RR, 0)
for _, answer := range resp.Answer {
header := answer.Header()
if header.Rrtype == dns.TypeCNAME {
synth64 = append(synth64, answer)
} else if header.Rrtype == dns.TypeA {
ttl := initialTTL
if ttl > header.Ttl {
ttl = header.Ttl
}
ipv4 := answer.(*dns.A).A.To4()
if ipv4 != nil {
plugin.pref64Mutex.RLock()
for _, prefix := range plugin.pref64 {
ipv6 := translateToIPv6(ipv4, prefix)
synthAAAA := new(dns.AAAA)
synthAAAA.Hdr = dns.RR_Header{
Name: header.Name,
Rrtype: dns.TypeAAAA,
Class: header.Class,
Ttl: ttl,
}
synthAAAA.AAAA = ipv6
synth64 = append(synth64, synthAAAA)
}
plugin.pref64Mutex.RUnlock()
}
}
}
msg.Answer = synth64
msg.AuthenticatedData = false
msg.SetEdns0(uint16(MaxDNSUDPSafePacketSize), false)
pluginsState.returnCode = PluginsReturnCodeCloak
return nil
}
func hasAAAAAnswer(msg *dns.Msg) bool {
for _, answer := range msg.Answer {
if answer.Header().Rrtype == dns.TypeAAAA {
return true
}
}
return false
}
func translateToIPv6(ipv4 net.IP, prefix *net.IPNet) net.IP {
ipv6 := make(net.IP, net.IPv6len)
copy(ipv6, prefix.IP)
n, _ := prefix.Mask.Size()
ipShift := n / 8
for i := 0; i < net.IPv4len; i++ {
if ipShift+i == 8 {
ipShift++
}
ipv6[ipShift+i] = ipv4[i]
}
return ipv6
}
func (plugin *PluginDNS64) fetchPref64(resolver string) error {
msg := new(dns.Msg)
msg.SetQuestion(rfc7050WKN, dns.TypeAAAA)
client := new(dns.Client)
resp, _, err := client.Exchange(msg, resolver)
if err != nil {
return err
}
if resp == nil || resp.Rcode != dns.RcodeSuccess {
return errors.New("Unable to fetch Pref64")
}
uniqPrefixes := make(map[string]struct{})
prefixes := make([]*net.IPNet, 0)
for _, answer := range resp.Answer {
if answer.Header().Rrtype == dns.TypeAAAA {
ipv6 := answer.(*dns.AAAA).AAAA
if ipv6 != nil && len(ipv6) == net.IPv6len {
prefEnd := 0
if wka := net.IPv4(ipv6[12], ipv6[13], ipv6[14], ipv6[15]); wka.Equal(rfc7050WKA1) ||
wka.Equal(rfc7050WKA2) { // 96
prefEnd = 12
} else if wka := net.IPv4(ipv6[9], ipv6[10], ipv6[11], ipv6[12]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { // 64
prefEnd = 8
} else if wka := net.IPv4(ipv6[7], ipv6[9], ipv6[10], ipv6[11]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { // 56
prefEnd = 7
} else if wka := net.IPv4(ipv6[6], ipv6[7], ipv6[9], ipv6[10]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { // 48
prefEnd = 6
} else if wka := net.IPv4(ipv6[5], ipv6[6], ipv6[7], ipv6[9]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { // 40
prefEnd = 5
} else if wka := net.IPv4(ipv6[4], ipv6[5], ipv6[6], ipv6[7]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { // 32
prefEnd = 4
}
if prefEnd > 0 {
prefix := new(net.IPNet)
prefix.IP = append(ipv6[:prefEnd], net.IPv6zero[prefEnd:]...)
prefix.Mask = net.CIDRMask(prefEnd*8, 128)
if _, ok := uniqPrefixes[prefix.String()]; !ok {
prefixes = append(prefixes, prefix)
uniqPrefixes[prefix.String()] = struct{}{}
dlog.Infof("Registered DNS64 prefix [%s]", prefix.String())
}
}
}
}
}
if len(prefixes) == 0 {
return errors.New("Empty Pref64 list")
}
plugin.pref64Mutex.Lock()
defer plugin.pref64Mutex.Unlock()
plugin.pref64 = prefixes
return nil
}
func (plugin *PluginDNS64) refreshPref64() error {
for _, resolver := range plugin.dns64Resolvers {
if err := plugin.fetchPref64(resolver); err == nil {
break
}
}
plugin.pref64Mutex.RLock()
defer plugin.pref64Mutex.RUnlock()
if len(plugin.pref64) == 0 {
return errors.New("Empty Pref64 list")
}
return nil
}

View file

@ -1,80 +0,0 @@
package main
import (
"math/rand"
"net"
"github.com/jedisct1/dlog"
"github.com/miekg/dns"
)
type PluginECS struct {
nets []*net.IPNet
}
func (plugin *PluginECS) Name() string {
return "ecs"
}
func (plugin *PluginECS) Description() string {
return "Set EDNS-client-subnet information in outgoing queries."
}
func (plugin *PluginECS) Init(proxy *Proxy) error {
plugin.nets = proxy.ednsClientSubnets
dlog.Notice("ECS plugin enabled")
return nil
}
func (plugin *PluginECS) Drop() error {
return nil
}
func (plugin *PluginECS) Reload() error {
return nil
}
func (plugin *PluginECS) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
var options *[]dns.EDNS0
for _, extra := range msg.Extra {
if extra.Header().Rrtype == dns.TypeOPT {
options = &extra.(*dns.OPT).Option
for _, option := range *options {
if option.Option() == dns.EDNS0SUBNET {
return nil
}
}
break
}
}
if options == nil {
msg.SetEdns0(uint16(pluginsState.maxPayloadSize), false)
for _, extra := range msg.Extra {
if extra.Header().Rrtype == dns.TypeOPT {
options = &extra.(*dns.OPT).Option
break
}
}
}
if options == nil {
return nil
}
prr := dns.EDNS0_SUBNET{}
prr.Code = dns.EDNS0SUBNET
net := plugin.nets[rand.Intn(len(plugin.nets))]
bits, totalSize := net.Mask.Size()
if totalSize == 32 {
prr.Family = 1
} else if totalSize == 128 {
prr.Family = 2
} else {
return nil
}
prr.SourceNetmask = uint8(bits)
prr.SourceScope = 0
prr.Address = net.IP
*options = append(*options, &prr)
return nil
}

View file

@ -9,7 +9,8 @@ import (
"github.com/miekg/dns"
)
type PluginFirefox struct{}
type PluginFirefox struct {
}
func (plugin *PluginFirefox) Name() string {
return "firefox"
@ -33,18 +34,22 @@ func (plugin *PluginFirefox) Reload() error {
}
func (plugin *PluginFirefox) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
if pluginsState.clientProto == "local_doh" {
questions := msg.Question
if len(questions) != 1 {
return nil
}
question := msg.Question[0]
question := questions[0]
if question.Qclass != dns.ClassINET || (question.Qtype != dns.TypeA && question.Qtype != dns.TypeAAAA) {
return nil
}
qName := pluginsState.qName
if qName != "use-application-dns.net" && !strings.HasSuffix(qName, ".use-application-dns.net") {
qName := strings.ToLower(question.Name)
if qName != "use-application-dns.net." && !strings.HasSuffix(qName, ".use-application-dns.net.") {
return nil
}
synth := EmptyResponseFromMessage(msg)
synth, err := EmptyResponseFromMessage(msg)
if err != nil {
return err
}
synth.Rcode = dns.RcodeNameError
pluginsState.synthResponse = synth
pluginsState.action = PluginsActionSynth

View file

@ -5,34 +5,19 @@ import (
"math/rand"
"net"
"strings"
"unicode"
"github.com/jedisct1/dlog"
"github.com/lifenjoiner/dhcpdns"
"github.com/miekg/dns"
)
type SearchSequenceItemType int
const (
Explicit SearchSequenceItemType = iota
Bootstrap
DHCP
)
type SearchSequenceItem struct {
typ SearchSequenceItemType
type PluginForwardEntry struct {
domain string
servers []string
}
type PluginForwardEntry struct {
domain string
sequence []SearchSequenceItem
}
type PluginForward struct {
forwardMap []PluginForwardEntry
bootstrapResolvers []string
dhcpdns []*dhcpdns.Detector
forwardMap []PluginForwardEntry
}
func (plugin *PluginForward) Name() string {
@ -45,103 +30,35 @@ func (plugin *PluginForward) Description() string {
func (plugin *PluginForward) Init(proxy *Proxy) error {
dlog.Noticef("Loading the set of forwarding rules from [%s]", proxy.forwardFile)
if proxy.xTransport != nil {
plugin.bootstrapResolvers = proxy.xTransport.bootstrapResolvers
}
lines, err := ReadTextFile(proxy.forwardFile)
bin, err := ReadTextFile(proxy.forwardFile)
if err != nil {
return err
}
requiresDHCP := false
for lineNo, line := range strings.Split(lines, "\n") {
line = TrimAndStripInlineComments(line)
if len(line) == 0 {
for lineNo, line := range strings.Split(string(bin), "\n") {
line = strings.TrimFunc(line, unicode.IsSpace)
if len(line) == 0 || strings.HasPrefix(line, "#") {
continue
}
domain, serversStr, ok := StringTwoFields(line)
domain = strings.TrimPrefix(domain, "*.")
if strings.Contains(domain, "*") {
ok = false
}
if !ok {
return fmt.Errorf(
"Syntax error for a forwarding rule at line %d. Expected syntax: example.com 9.9.9.9,8.8.8.8",
1+lineNo,
)
return fmt.Errorf("Syntax error for a forwarding rule at line %d. Expected syntax: example.com: 9.9.9.9,8.8.8.8", 1+lineNo)
}
domain = strings.ToLower(domain)
var sequence []SearchSequenceItem
var servers []string
for _, server := range strings.Split(serversStr, ",") {
server = strings.TrimSpace(server)
switch server {
case "$BOOTSTRAP":
if len(plugin.bootstrapResolvers) == 0 {
return fmt.Errorf(
"Syntax error for a forwarding rule at line %d. No bootstrap resolvers available",
1+lineNo,
)
}
if len(sequence) > 0 && sequence[len(sequence)-1].typ == Bootstrap {
// Ignore repetitions
} else {
sequence = append(sequence, SearchSequenceItem{typ: Bootstrap})
dlog.Infof("Forwarding [%s] to the bootstrap servers", domain)
}
case "$DHCP":
if len(sequence) > 0 && sequence[len(sequence)-1].typ == DHCP {
// Ignore repetitions
} else {
sequence = append(sequence, SearchSequenceItem{typ: DHCP})
dlog.Infof("Forwarding [%s] to the DHCP servers", domain)
}
requiresDHCP = true
default:
if strings.HasPrefix(server, "$") {
dlog.Criticalf("Unknown keyword [%s] at line %d", server, 1+lineNo)
continue
}
if server, err = normalizeIPAndOptionalPort(server, "53"); err != nil {
dlog.Criticalf("Syntax error for a forwarding rule at line %d: %s", 1+lineNo, err)
continue
}
idxServers := -1
for i, item := range sequence {
if item.typ == Explicit {
idxServers = i
}
}
if idxServers == -1 {
sequence = append(sequence, SearchSequenceItem{typ: Explicit, servers: []string{server}})
} else {
sequence[idxServers].servers = append(sequence[idxServers].servers, server)
}
dlog.Infof("Forwarding [%s] to [%s]", domain, server)
server = strings.TrimFunc(server, unicode.IsSpace)
if net.ParseIP(server) != nil {
server = fmt.Sprintf("%s:%d", server, 53)
}
servers = append(servers, server)
}
if len(servers) == 0 {
continue
}
plugin.forwardMap = append(plugin.forwardMap, PluginForwardEntry{
domain: domain,
sequence: sequence,
domain: domain, servers: servers,
})
}
if requiresDHCP {
if len(proxy.userName) > 0 {
dlog.Warn("DHCP/DNS detection may not work when 'user_name' is set or when starting as a non-root user")
}
if proxy.SourceIPv6 {
dlog.Notice("Starting a DHCP/DNS detector for IPv6")
d6 := &dhcpdns.Detector{RemoteIPPort: "[2001:DB8::53]:80"}
go d6.Serve(9, 10)
plugin.dhcpdns = append(plugin.dhcpdns, d6)
}
if proxy.SourceIPv4 {
dlog.Notice("Starting a DHCP/DNS detector for IPv4")
d4 := &dhcpdns.Detector{RemoteIPPort: "192.0.2.53:80"}
go d4.Serve(9, 10)
plugin.dhcpdns = append(plugin.dhcpdns, d4)
}
}
return nil
}
@ -154,122 +71,33 @@ func (plugin *PluginForward) Reload() error {
}
func (plugin *PluginForward) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
qName := pluginsState.qName
qNameLen := len(qName)
var sequence []SearchSequenceItem
questions := msg.Question
if len(questions) != 1 {
return nil
}
question := strings.ToLower(StripTrailingDot(questions[0].Name))
questionLen := len(question)
var servers []string
for _, candidate := range plugin.forwardMap {
candidateLen := len(candidate.domain)
if candidateLen > qNameLen {
if candidateLen > questionLen {
continue
}
if (qName[qNameLen-candidateLen:] == candidate.domain &&
(candidateLen == qNameLen || (qName[qNameLen-candidateLen-1] == '.'))) ||
(candidate.domain == ".") {
sequence = candidate.sequence
if question[questionLen-candidateLen:] == candidate.domain && (candidateLen == questionLen || (question[questionLen-candidateLen-1] == '.')) {
servers = candidate.servers
break
}
}
if len(sequence) == 0 {
if len(servers) == 0 {
return nil
}
var err error
var respMsg *dns.Msg
tries := 4
for _, item := range sequence {
var server string
switch item.typ {
case Explicit:
server = item.servers[rand.Intn(len(item.servers))]
case Bootstrap:
server = plugin.bootstrapResolvers[rand.Intn(len(plugin.bootstrapResolvers))]
case DHCP:
const maxInconsistency = 9
for _, dhcpdns := range plugin.dhcpdns {
inconsistency, ip, dhcpDNS, err := dhcpdns.Status()
if err != nil && ip != "" && inconsistency > maxInconsistency {
dlog.Infof("No response from the DHCP server while resolving [%s]", qName)
continue
}
if len(dhcpDNS) > 0 {
server = net.JoinHostPort(dhcpDNS[rand.Intn(len(dhcpDNS))].String(), "53")
break
}
}
if len(server) == 0 {
dlog.Infof("DHCP didn't provide any DNS server to forward [%s]", qName)
continue
}
}
pluginsState.serverName = server
if len(server) == 0 {
continue
}
if tries == 0 {
break
}
tries--
dlog.Debugf("Forwarding [%s] to [%s]", qName, server)
client := dns.Client{Net: pluginsState.serverProto, Timeout: pluginsState.timeout}
respMsg, _, err = client.Exchange(msg, server)
if err != nil {
continue
}
if respMsg.Truncated {
client.Net = "tcp"
respMsg, _, err = client.Exchange(msg, server)
if err != nil {
continue
}
}
if len(sequence) > 0 {
switch respMsg.Rcode {
case dns.RcodeNameError, dns.RcodeRefused, dns.RcodeNotAuth:
continue
}
}
if edns0 := respMsg.IsEdns0(); edns0 == nil || !edns0.Do() {
respMsg.AuthenticatedData = false
}
respMsg.Id = msg.Id
pluginsState.synthResponse = respMsg
pluginsState.action = PluginsActionSynth
pluginsState.returnCode = PluginsReturnCodeForward
return nil
server := servers[rand.Intn(len(servers))]
pluginsState.serverName = server
respMsg, err := dns.Exchange(msg, server)
if err != nil {
return err
}
return err
}
func normalizeIPAndOptionalPort(addr string, defaultPort string) (string, error) {
var host, port string
var err error
if strings.HasPrefix(addr, "[") {
if !strings.Contains(addr, "]:") {
if addr[len(addr)-1] != ']' {
return "", fmt.Errorf("invalid IPv6 format: missing closing ']'")
}
host = addr[1 : len(addr)-1]
port = defaultPort
} else {
host, port, err = net.SplitHostPort(addr)
if err != nil {
return "", err
}
}
} else {
host, port, err = net.SplitHostPort(addr)
if err != nil {
host = addr
port = defaultPort
}
}
ip := net.ParseIP(host)
if ip == nil {
return "", fmt.Errorf("invalid IP address: [%s]", host)
}
if ip.To4() != nil {
return fmt.Sprintf("%s:%s", ip.String(), port), nil
}
return fmt.Sprintf("[%s]:%s", ip.String(), port), nil
pluginsState.synthResponse = respMsg
pluginsState.action = PluginsActionSynth
return nil
}

View file

@ -30,18 +30,12 @@ func (plugin *PluginGetSetPayloadSize) Eval(pluginsState *PluginsState, msg *dns
dnssec := false
if edns0 != nil {
pluginsState.maxUnencryptedUDPSafePayloadSize = int(edns0.UDPSize())
pluginsState.originalMaxPayloadSize = Max(
pluginsState.maxUnencryptedUDPSafePayloadSize-ResponseOverhead,
pluginsState.originalMaxPayloadSize,
)
pluginsState.originalMaxPayloadSize = Max(pluginsState.maxUnencryptedUDPSafePayloadSize-ResponseOverhead, pluginsState.originalMaxPayloadSize)
dnssec = edns0.Do()
}
var options *[]dns.EDNS0
pluginsState.dnssec = dnssec
pluginsState.maxPayloadSize = Min(
MaxDNSUDPPacketSize-ResponseOverhead,
Max(pluginsState.originalMaxPayloadSize, pluginsState.maxPayloadSize),
)
pluginsState.maxPayloadSize = Min(MaxDNSUDPPacketSize-ResponseOverhead, Max(pluginsState.originalMaxPayloadSize, pluginsState.maxPayloadSize))
if pluginsState.maxPayloadSize > 512 {
extra2 := []dns.RR{}
for _, extra := range msg.Extra {

View file

@ -3,16 +3,16 @@ package main
import (
"errors"
"fmt"
"io"
"net"
"time"
"github.com/jedisct1/dlog"
"github.com/miekg/dns"
lumberjack "gopkg.in/natefinch/lumberjack.v2"
)
type PluginNxLog struct {
logger io.Writer
logger *lumberjack.Logger
format string
}
@ -25,7 +25,7 @@ func (plugin *PluginNxLog) Description() string {
}
func (plugin *PluginNxLog) Init(proxy *Proxy) error {
plugin.logger = Logger(proxy.logMaxSize, proxy.logMaxAge, proxy.logMaxBackups, proxy.nxLogFile)
plugin.logger = &lumberjack.Logger{LocalTime: true, MaxSize: proxy.logMaxSize, MaxAge: proxy.logMaxAge, MaxBackups: proxy.logMaxBackups, Filename: proxy.nxLogFile, Compress: true}
plugin.format = proxy.nxLogFormat
return nil
@ -43,22 +43,22 @@ func (plugin *PluginNxLog) Eval(pluginsState *PluginsState, msg *dns.Msg) error
if msg.Rcode != dns.RcodeNameError {
return nil
}
var clientIPStr string
switch pluginsState.clientProto {
case "udp":
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
case "tcp", "local_doh":
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
default:
// Ignore internal flow.
questions := msg.Question
if len(questions) == 0 {
return nil
}
question := msg.Question[0]
question := questions[0]
qType, ok := dns.TypeToString[question.Qtype]
if !ok {
qType = string(qType)
}
qName := pluginsState.qName
var clientIPStr string
if pluginsState.clientProto == "udp" {
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
} else {
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
}
qName := StripTrailingDot(question.Name)
var line string
if plugin.format == "tsv" {
@ -76,7 +76,7 @@ func (plugin *PluginNxLog) Eval(pluginsState *PluginsState, msg *dns.Msg) error
if plugin.logger == nil {
return errors.New("Log file not initialized")
}
_, _ = plugin.logger.Write([]byte(line))
plugin.logger.Write([]byte(line))
return nil
}

View file

@ -3,17 +3,17 @@ package main
import (
"errors"
"fmt"
"io"
"net"
"strings"
"time"
"github.com/jedisct1/dlog"
"github.com/miekg/dns"
lumberjack "gopkg.in/natefinch/lumberjack.v2"
)
type PluginQueryLog struct {
logger io.Writer
logger *lumberjack.Logger
format string
ignoredQtypes []string
}
@ -27,7 +27,7 @@ func (plugin *PluginQueryLog) Description() string {
}
func (plugin *PluginQueryLog) Init(proxy *Proxy) error {
plugin.logger = Logger(proxy.logMaxSize, proxy.logMaxAge, proxy.logMaxBackups, proxy.queryLogFile)
plugin.logger = &lumberjack.Logger{LocalTime: true, MaxSize: proxy.logMaxSize, MaxAge: proxy.logMaxAge, MaxBackups: proxy.logMaxBackups, Filename: proxy.queryLogFile, Compress: true}
plugin.format = proxy.queryLogFormat
plugin.ignoredQtypes = proxy.queryLogIgnoredQtypes
@ -43,17 +43,11 @@ func (plugin *PluginQueryLog) Reload() error {
}
func (plugin *PluginQueryLog) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
var clientIPStr string
switch pluginsState.clientProto {
case "udp":
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
case "tcp", "local_doh":
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
default:
// Ignore internal flow.
questions := msg.Question
if len(questions) == 0 {
return nil
}
question := msg.Question[0]
question := questions[0]
qType, ok := dns.TypeToString[question.Qtype]
if !ok {
qType = string(qType)
@ -65,7 +59,13 @@ func (plugin *PluginQueryLog) Eval(pluginsState *PluginsState, msg *dns.Msg) err
}
}
}
qName := pluginsState.qName
var clientIPStr string
if pluginsState.clientProto == "udp" {
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
} else {
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
}
qName := StripTrailingDot(question.Name)
if pluginsState.cacheHit {
pluginsState.serverName = "-"
@ -90,16 +90,8 @@ func (plugin *PluginQueryLog) Eval(pluginsState *PluginsState, msg *dns.Msg) err
year, month, day := now.Date()
hour, minute, second := now.Clock()
tsStr := fmt.Sprintf("[%d-%02d-%02d %02d:%02d:%02d]", year, int(month), day, hour, minute, second)
line = fmt.Sprintf(
"%s\t%s\t%s\t%s\t%s\t%dms\t%s\n",
tsStr,
clientIPStr,
StringQuote(qName),
qType,
returnCode,
requestDuration/time.Millisecond,
StringQuote(pluginsState.serverName),
)
line = fmt.Sprintf("%s\t%s\t%s\t%s\t%s\t%dms\t%s\n", tsStr, clientIPStr, StringQuote(qName), qType, returnCode, requestDuration/time.Millisecond,
StringQuote(pluginsState.serverName))
} else if plugin.format == "ltsv" {
cached := 0
if pluginsState.cacheHit {
@ -113,7 +105,6 @@ func (plugin *PluginQueryLog) Eval(pluginsState *PluginsState, msg *dns.Msg) err
if plugin.logger == nil {
return errors.New("Log file not initialized")
}
_, _ = plugin.logger.Write([]byte(line))
plugin.logger.Write([]byte(line))
return nil
}

View file

@ -18,10 +18,8 @@ func (plugin *PluginQueryMeta) Description() string {
func (plugin *PluginQueryMeta) Init(proxy *Proxy) error {
queryMetaRR := new(dns.TXT)
queryMetaRR.Hdr = dns.RR_Header{
Name: ".", Rrtype: dns.TypeTXT,
Class: dns.ClassINET, Ttl: 86400,
}
queryMetaRR.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeTXT,
Class: dns.ClassINET, Ttl: 86400}
queryMetaRR.Txt = proxy.queryMeta
plugin.queryMetaRR = queryMetaRR
return nil
@ -36,6 +34,10 @@ func (plugin *PluginQueryMeta) Reload() error {
}
func (plugin *PluginQueryMeta) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
questions := msg.Question
if len(questions) == 0 {
return nil
}
msg.Extra = []dns.RR{plugin.queryMetaRR}
return nil
}

View file

@ -3,50 +3,51 @@ package main
import (
"errors"
"fmt"
"io"
"net"
"strings"
"time"
"unicode"
"github.com/jedisct1/dlog"
"github.com/miekg/dns"
lumberjack "gopkg.in/natefinch/lumberjack.v2"
)
type PluginAllowName struct {
type PluginWhitelistName struct {
allWeeklyRanges *map[string]WeeklyRanges
patternMatcher *PatternMatcher
logger io.Writer
logger *lumberjack.Logger
format string
}
func (plugin *PluginAllowName) Name() string {
return "allow_name"
func (plugin *PluginWhitelistName) Name() string {
return "whitelist_name"
}
func (plugin *PluginAllowName) Description() string {
return "Allow names matching patterns"
func (plugin *PluginWhitelistName) Description() string {
return "Whitelists DNS queries matching name patterns"
}
func (plugin *PluginAllowName) Init(proxy *Proxy) error {
dlog.Noticef("Loading the set of allowed names from [%s]", proxy.allowNameFile)
lines, err := ReadTextFile(proxy.allowNameFile)
func (plugin *PluginWhitelistName) Init(proxy *Proxy) error {
dlog.Noticef("Loading the set of whitelisting rules from [%s]", proxy.whitelistNameFile)
bin, err := ReadTextFile(proxy.whitelistNameFile)
if err != nil {
return err
}
plugin.allWeeklyRanges = proxy.allWeeklyRanges
plugin.patternMatcher = NewPatternMatcher()
for lineNo, line := range strings.Split(lines, "\n") {
line = TrimAndStripInlineComments(line)
if len(line) == 0 {
plugin.patternMatcher = NewPatternPatcher()
for lineNo, line := range strings.Split(string(bin), "\n") {
line = strings.TrimFunc(line, unicode.IsSpace)
if len(line) == 0 || strings.HasPrefix(line, "#") {
continue
}
parts := strings.Split(line, "@")
timeRangeName := ""
if len(parts) == 2 {
line = strings.TrimSpace(parts[0])
timeRangeName = strings.TrimSpace(parts[1])
line = strings.TrimFunc(parts[0], unicode.IsSpace)
timeRangeName = strings.TrimFunc(parts[1], unicode.IsSpace)
} else if len(parts) > 2 {
dlog.Errorf("Syntax error in allowed names at line %d -- Unexpected @ character", 1+lineNo)
dlog.Errorf("Syntax error in whitelist rules at line %d -- Unexpected @ character", 1+lineNo)
continue
}
var weeklyRanges *WeeklyRanges
@ -58,52 +59,55 @@ func (plugin *PluginAllowName) Init(proxy *Proxy) error {
weeklyRanges = &weeklyRangesX
}
}
if err := plugin.patternMatcher.Add(line, weeklyRanges, lineNo+1); err != nil {
if _, err := plugin.patternMatcher.Add(line, weeklyRanges, lineNo+1); err != nil {
dlog.Error(err)
continue
}
}
if len(proxy.allowNameLogFile) == 0 {
if len(proxy.whitelistNameLogFile) == 0 {
return nil
}
plugin.logger = Logger(proxy.logMaxSize, proxy.logMaxAge, proxy.logMaxBackups, proxy.allowNameLogFile)
plugin.format = proxy.allowNameFormat
plugin.logger = &lumberjack.Logger{LocalTime: true, MaxSize: proxy.logMaxSize, MaxAge: proxy.logMaxAge, MaxBackups: proxy.logMaxBackups, Filename: proxy.whitelistNameLogFile, Compress: true}
plugin.format = proxy.whitelistNameFormat
return nil
}
func (plugin *PluginAllowName) Drop() error {
func (plugin *PluginWhitelistName) Drop() error {
return nil
}
func (plugin *PluginAllowName) Reload() error {
func (plugin *PluginWhitelistName) Reload() error {
return nil
}
func (plugin *PluginAllowName) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
qName := pluginsState.qName
allowList, reason, xweeklyRanges := plugin.patternMatcher.Eval(qName)
func (plugin *PluginWhitelistName) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
questions := msg.Question
if len(questions) != 1 {
return nil
}
qName := strings.ToLower(StripTrailingDot(questions[0].Name))
whitelist, reason, xweeklyRanges := plugin.patternMatcher.Eval(qName)
var weeklyRanges *WeeklyRanges
if xweeklyRanges != nil {
weeklyRanges = xweeklyRanges.(*WeeklyRanges)
}
if allowList {
if whitelist {
if weeklyRanges != nil && !weeklyRanges.Match() {
allowList = false
whitelist = false
}
}
if allowList {
if whitelist {
if pluginsState.sessionData == nil {
pluginsState.sessionData = make(map[string]interface{})
}
pluginsState.sessionData["whitelisted"] = true
if plugin.logger != nil {
var clientIPStr string
switch pluginsState.clientProto {
case "udp":
if pluginsState.clientProto == "udp" {
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
case "tcp", "local_doh":
} else {
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
default:
// Ignore internal flow.
return nil
}
var line string
if plugin.format == "tsv" {
@ -120,7 +124,7 @@ func (plugin *PluginAllowName) Eval(pluginsState *PluginsState, msg *dns.Msg) er
if plugin.logger == nil {
return errors.New("Log file not initialized")
}
_, _ = plugin.logger.Write([]byte(line))
plugin.logger.Write([]byte(line))
}
}
return nil

View file

@ -14,11 +14,11 @@ import (
type PluginsAction int
const (
PluginsActionNone = 0
PluginsActionContinue = 1
PluginsActionDrop = 2
PluginsActionReject = 3
PluginsActionSynth = 4
PluginsActionNone = 0
PluginsActionForward = 1
PluginsActionDrop = 2
PluginsActionReject = 3
PluginsActionSynth = 4
)
type PluginsGlobals struct {
@ -42,11 +42,8 @@ const (
PluginsReturnCodeParseError
PluginsReturnCodeNXDomain
PluginsReturnCodeResponseError
PluginsReturnCodeServFail
PluginsReturnCodeNetworkError
PluginsReturnCodeServerError
PluginsReturnCodeCloak
PluginsReturnCodeServerTimeout
PluginsReturnCodeNotReady
)
var PluginsReturnCodeToString = map[PluginsReturnCode]string{
@ -58,58 +55,45 @@ var PluginsReturnCodeToString = map[PluginsReturnCode]string{
PluginsReturnCodeParseError: "PARSE_ERROR",
PluginsReturnCodeNXDomain: "NXDOMAIN",
PluginsReturnCodeResponseError: "RESPONSE_ERROR",
PluginsReturnCodeServFail: "SERVFAIL",
PluginsReturnCodeNetworkError: "NETWORK_ERROR",
PluginsReturnCodeServerError: "SERVER_ERROR",
PluginsReturnCodeCloak: "CLOAK",
PluginsReturnCodeServerTimeout: "SERVER_TIMEOUT",
PluginsReturnCodeNotReady: "NOT_READY",
}
type PluginsState struct {
requestStart time.Time
requestEnd time.Time
clientProto string
serverName string
serverProto string
qName string
clientAddr *net.Addr
synthResponse *dns.Msg
questionMsg *dns.Msg
sessionData map[string]interface{}
action PluginsAction
timeout time.Duration
returnCode PluginsReturnCode
maxPayloadSize int
cacheSize int
originalMaxPayloadSize int
maxUnencryptedUDPSafePayloadSize int
rejectTTL uint32
cacheMaxTTL uint32
cacheNegMaxTTL uint32
cacheNegMinTTL uint32
cacheMinTTL uint32
cacheHit bool
originalMaxPayloadSize int
maxPayloadSize int
clientProto string
clientAddr *net.Addr
synthResponse *dns.Msg
dnssec bool
cacheSize int
cacheNegMinTTL uint32
cacheNegMaxTTL uint32
cacheMinTTL uint32
cacheMaxTTL uint32
questionMsg *dns.Msg
requestStart time.Time
requestEnd time.Time
cacheHit bool
returnCode PluginsReturnCode
serverName string
}
func (proxy *Proxy) InitPluginsGlobals() error {
func InitPluginsGlobals(pluginsGlobals *PluginsGlobals, proxy *Proxy) error {
queryPlugins := &[]Plugin{}
if proxy.captivePortalMap != nil {
*queryPlugins = append(*queryPlugins, Plugin(new(PluginCaptivePortal)))
}
if len(proxy.queryMeta) != 0 {
*queryPlugins = append(*queryPlugins, Plugin(new(PluginQueryMeta)))
}
if len(proxy.allowNameFile) != 0 {
*queryPlugins = append(*queryPlugins, Plugin(new(PluginAllowName)))
if len(proxy.whitelistNameFile) != 0 {
*queryPlugins = append(*queryPlugins, Plugin(new(PluginWhitelistName)))
}
*queryPlugins = append(*queryPlugins, Plugin(new(PluginFirefox)))
if len(proxy.ednsClientSubnets) != 0 {
*queryPlugins = append(*queryPlugins, Plugin(new(PluginECS)))
}
if len(proxy.blockNameFile) != 0 {
*queryPlugins = append(*queryPlugins, Plugin(new(PluginBlockName)))
}
@ -126,29 +110,14 @@ func (proxy *Proxy) InitPluginsGlobals() error {
if len(proxy.forwardFile) != 0 {
*queryPlugins = append(*queryPlugins, Plugin(new(PluginForward)))
}
if proxy.pluginBlockUnqualified {
*queryPlugins = append(*queryPlugins, Plugin(new(PluginBlockUnqualified)))
}
if proxy.pluginBlockUndelegated {
*queryPlugins = append(*queryPlugins, Plugin(new(PluginBlockUndelegated)))
}
responsePlugins := &[]Plugin{}
if len(proxy.nxLogFile) != 0 {
*responsePlugins = append(*responsePlugins, Plugin(new(PluginNxLog)))
}
if len(proxy.allowedIPFile) != 0 {
*responsePlugins = append(*responsePlugins, Plugin(new(PluginAllowedIP)))
}
if len(proxy.blockNameFile) != 0 {
*responsePlugins = append(*responsePlugins, Plugin(new(PluginBlockNameResponse)))
}
if len(proxy.blockIPFile) != 0 {
*responsePlugins = append(*responsePlugins, Plugin(new(PluginBlockIP)))
}
if len(proxy.dns64Resolvers) != 0 || len(proxy.dns64Prefixes) != 0 {
*responsePlugins = append(*responsePlugins, Plugin(new(PluginDNS64)))
}
if proxy.cache {
*responsePlugins = append(*responsePlugins, Plugin(new(PluginCacheResponse)))
}
@ -174,11 +143,11 @@ func (proxy *Proxy) InitPluginsGlobals() error {
}
}
proxy.pluginsGlobals.queryPlugins = queryPlugins
proxy.pluginsGlobals.responsePlugins = responsePlugins
proxy.pluginsGlobals.loggingPlugins = loggingPlugins
(*pluginsGlobals).queryPlugins = queryPlugins
(*pluginsGlobals).responsePlugins = responsePlugins
(*pluginsGlobals).loggingPlugins = loggingPlugins
parseBlockedQueryResponse(proxy.blockedQueryResponse, &proxy.pluginsGlobals)
parseBlockedQueryResponse(proxy.blockedQueryResponse, pluginsGlobals)
return nil
}
@ -189,11 +158,11 @@ func parseBlockedQueryResponse(blockedResponse string, pluginsGlobals *PluginsGl
if strings.HasPrefix(blockedResponse, "a:") {
blockedIPStrings := strings.Split(blockedResponse, ",")
pluginsGlobals.respondWithIPv4 = net.ParseIP(strings.TrimPrefix(blockedIPStrings[0], "a:"))
(*pluginsGlobals).respondWithIPv4 = net.ParseIP(strings.TrimPrefix(blockedIPStrings[0], "a:"))
if pluginsGlobals.respondWithIPv4 == nil {
if (*pluginsGlobals).respondWithIPv4 == nil {
dlog.Notice("Error parsing IPv4 response given in blocked_query_response option, defaulting to `hinfo`")
pluginsGlobals.refusedCodeInResponses = false
(*pluginsGlobals).refusedCodeInResponses = false
return
}
@ -203,30 +172,29 @@ func parseBlockedQueryResponse(blockedResponse string, pluginsGlobals *PluginsGl
if strings.HasPrefix(ipv6Response, "[") {
ipv6Response = strings.Trim(ipv6Response, "[]")
}
pluginsGlobals.respondWithIPv6 = net.ParseIP(ipv6Response)
(*pluginsGlobals).respondWithIPv6 = net.ParseIP(ipv6Response)
if pluginsGlobals.respondWithIPv6 == nil {
dlog.Notice(
"Error parsing IPv6 response given in blocked_query_response option, defaulting to IPv4",
)
if (*pluginsGlobals).respondWithIPv6 == nil {
dlog.Notice("Error parsing IPv6 response given in blocked_query_response option, defaulting to IPv4")
}
} else {
dlog.Noticef("Invalid IPv6 response given in blocked_query_response option [%s], the option should take the form 'a:<IPv4>,aaaa:<IPv6>'", blockedIPStrings[1])
}
}
if pluginsGlobals.respondWithIPv6 == nil {
pluginsGlobals.respondWithIPv6 = pluginsGlobals.respondWithIPv4
if (*pluginsGlobals).respondWithIPv6 == nil {
(*pluginsGlobals).respondWithIPv6 = (*pluginsGlobals).respondWithIPv4
}
} else {
switch blockedResponse {
case "refused":
pluginsGlobals.refusedCodeInResponses = true
(*pluginsGlobals).refusedCodeInResponses = true
case "hinfo":
pluginsGlobals.refusedCodeInResponses = false
(*pluginsGlobals).refusedCodeInResponses = false
default:
dlog.Noticef("Invalid blocked_query_response option [%s], defaulting to `hinfo`", blockedResponse)
pluginsGlobals.refusedCodeInResponses = false
(*pluginsGlobals).refusedCodeInResponses = false
}
}
}
@ -240,16 +208,9 @@ type Plugin interface {
Eval(pluginsState *PluginsState, msg *dns.Msg) error
}
func NewPluginsState(
proxy *Proxy,
clientProto string,
clientAddr *net.Addr,
serverProto string,
start time.Time,
) PluginsState {
func NewPluginsState(proxy *Proxy, clientProto string, clientAddr *net.Addr, start time.Time) PluginsState {
return PluginsState{
action: PluginsActionContinue,
returnCode: PluginsReturnCodePass,
action: PluginsActionForward,
maxPayloadSize: MaxDNSUDPPacketSize - ResponseOverhead,
clientProto: clientProto,
clientAddr: clientAddr,
@ -258,81 +219,58 @@ func NewPluginsState(
cacheNegMaxTTL: proxy.cacheNegMaxTTL,
cacheMinTTL: proxy.cacheMinTTL,
cacheMaxTTL: proxy.cacheMaxTTL,
rejectTTL: proxy.rejectTTL,
questionMsg: nil,
qName: "",
serverName: "-",
serverProto: serverProto,
timeout: proxy.timeout,
requestStart: start,
maxUnencryptedUDPSafePayloadSize: MaxDNSUDPSafePacketSize,
sessionData: make(map[string]interface{}),
}
}
func (pluginsState *PluginsState) ApplyQueryPlugins(
pluginsGlobals *PluginsGlobals,
packet []byte,
needsEDNS0Padding bool,
) ([]byte, error) {
func (pluginsState *PluginsState) ApplyQueryPlugins(pluginsGlobals *PluginsGlobals, packet []byte, serverName string) ([]byte, error) {
if len(*pluginsGlobals.queryPlugins) == 0 && len(*pluginsGlobals.loggingPlugins) == 0 {
return packet, nil
}
pluginsState.serverName = serverName
pluginsState.action = PluginsActionForward
msg := dns.Msg{}
if err := msg.Unpack(packet); err != nil {
return packet, err
}
if len(msg.Question) != 1 {
if len(msg.Question) > 1 {
return packet, errors.New("Unexpected number of questions")
}
qName, err := NormalizeQName(msg.Question[0].Name)
if err != nil {
return packet, err
}
dlog.Debugf("Handling query for [%v]", qName)
pluginsState.qName = qName
pluginsState.questionMsg = &msg
if len(*pluginsGlobals.queryPlugins) == 0 && len(*pluginsGlobals.loggingPlugins) == 0 {
return packet, nil
}
pluginsGlobals.RLock()
defer pluginsGlobals.RUnlock()
for _, plugin := range *pluginsGlobals.queryPlugins {
if err := plugin.Eval(pluginsState, &msg); err != nil {
if ret := plugin.Eval(pluginsState, &msg); ret != nil {
pluginsGlobals.RUnlock()
pluginsState.action = PluginsActionDrop
return packet, err
return packet, ret
}
if pluginsState.action == PluginsActionReject {
synth := RefusedResponseFromMessage(
&msg,
pluginsGlobals.refusedCodeInResponses,
pluginsGlobals.respondWithIPv4,
pluginsGlobals.respondWithIPv6,
pluginsState.rejectTTL,
)
synth, err := RefusedResponseFromMessage(&msg, pluginsGlobals.refusedCodeInResponses, pluginsGlobals.respondWithIPv4, pluginsGlobals.respondWithIPv6, pluginsState.cacheMinTTL)
if err != nil {
return nil, err
}
pluginsState.synthResponse = synth
}
if pluginsState.action != PluginsActionContinue {
if pluginsState.action != PluginsActionForward {
break
}
}
pluginsGlobals.RUnlock()
packet2, err := msg.PackBuffer(packet)
if err != nil {
return packet, err
}
if needsEDNS0Padding && pluginsState.action == PluginsActionContinue {
padLen := 63 - ((len(packet2) + 63) & 63)
if paddedPacket2, _ := addEDNS0PaddingIfNoneFound(&msg, packet2, padLen); paddedPacket2 != nil {
return paddedPacket2, nil
}
}
return packet2, nil
}
func (pluginsState *PluginsState) ApplyResponsePlugins(
pluginsGlobals *PluginsGlobals,
packet []byte,
ttl *uint32,
) ([]byte, error) {
msg := dns.Msg{Compress: true}
func (pluginsState *PluginsState) ApplyResponsePlugins(pluginsGlobals *PluginsGlobals, packet []byte, ttl *uint32) ([]byte, error) {
if len(*pluginsGlobals.responsePlugins) == 0 && len(*pluginsGlobals.loggingPlugins) == 0 {
return packet, nil
}
pluginsState.action = PluginsActionForward
msg := dns.Msg{}
if err := msg.Unpack(packet); err != nil {
if len(packet) >= MinDNSPacketSize && HasTCFlag(packet) {
err = nil
@ -345,32 +283,30 @@ func (pluginsState *PluginsState) ApplyResponsePlugins(
case dns.RcodeNameError:
pluginsState.returnCode = PluginsReturnCodeNXDomain
case dns.RcodeServerFailure:
pluginsState.returnCode = PluginsReturnCodeServFail
pluginsState.returnCode = PluginsReturnCodeServerError
default:
pluginsState.returnCode = PluginsReturnCodeResponseError
}
removeEDNS0Options(&msg)
pluginsGlobals.RLock()
defer pluginsGlobals.RUnlock()
for _, plugin := range *pluginsGlobals.responsePlugins {
if err := plugin.Eval(pluginsState, &msg); err != nil {
if ret := plugin.Eval(pluginsState, &msg); ret != nil {
pluginsGlobals.RUnlock()
pluginsState.action = PluginsActionDrop
return packet, err
return packet, ret
}
if pluginsState.action == PluginsActionReject {
synth := RefusedResponseFromMessage(
&msg,
pluginsGlobals.refusedCodeInResponses,
pluginsGlobals.respondWithIPv4,
pluginsGlobals.respondWithIPv6,
pluginsState.rejectTTL,
)
synth, err := RefusedResponseFromMessage(&msg, pluginsGlobals.refusedCodeInResponses, pluginsGlobals.respondWithIPv4, pluginsGlobals.respondWithIPv6, pluginsState.cacheMinTTL)
if err != nil {
return nil, err
}
dlog.Infof("Blocking [%s]", synth.Question[0].Name)
pluginsState.synthResponse = synth
}
if pluginsState.action != PluginsActionContinue {
if pluginsState.action != PluginsActionForward {
break
}
}
pluginsGlobals.RUnlock()
if ttl != nil {
setMaxTTL(&msg, *ttl)
}
@ -387,15 +323,16 @@ func (pluginsState *PluginsState) ApplyLoggingPlugins(pluginsGlobals *PluginsGlo
}
pluginsState.requestEnd = time.Now()
questionMsg := pluginsState.questionMsg
if questionMsg == nil {
return errors.New("Question not found")
if questionMsg == nil || len(questionMsg.Question) > 1 {
return errors.New("Unexpected number of questions")
}
pluginsGlobals.RLock()
defer pluginsGlobals.RUnlock()
for _, plugin := range *pluginsGlobals.loggingPlugins {
if err := plugin.Eval(pluginsState, questionMsg); err != nil {
return err
if ret := plugin.Eval(pluginsState, questionMsg); ret != nil {
pluginsGlobals.RUnlock()
return ret
}
}
pluginsGlobals.RUnlock()
return nil
}

View file

@ -15,7 +15,8 @@ import (
)
func (proxy *Proxy) dropPrivilege(userStr string, fds []*os.File) {
if os.Geteuid() != 0 {
currentUser, err := user.Current()
if err != nil && currentUser.Uid != "0" {
dlog.Fatal("Root privileges are required in order to switch to a different user. Maybe try again with 'sudo'")
}
userInfo, err := user.Lookup(userStr)
@ -24,19 +25,9 @@ func (proxy *Proxy) dropPrivilege(userStr string, fds []*os.File) {
if err != nil {
uid, err2 := strconv.Atoi(userStr)
if err2 != nil || uid <= 0 {
dlog.Fatalf(
"Unable to retrieve any information about user [%s]: [%s] - Remove the user_name directive from the configuration file in order to avoid identity switch",
userStr,
err,
)
dlog.Fatalf("Unable to retrieve any information about user [%s]: [%s] - Remove the user_name directive from the configuration file in order to avoid identity switch", userStr, err)
}
dlog.Warnf(
"Unable to retrieve any information about user [%s]: [%s] - Switching to user id [%v] with the same group id, as [%v] looks like a user id. But you should remove or fix the user_name directive in the configuration file if possible",
userStr,
err,
uid,
uid,
)
dlog.Warnf("Unable to retrieve any information about user [%s]: [%s] - Switching to user id [%v] with the same group id, as [%v] looks like a user id. But you should remove or fix the user_name directive in the configuration file if possible", userStr, err, uid, uid)
userInfo = &user.User{Uid: userStr, Gid: userStr}
}
uid, err := strconv.Atoi(userInfo.Uid)
@ -56,9 +47,7 @@ func (proxy *Proxy) dropPrivilege(userStr string, fds []*os.File) {
dlog.Fatal(err)
}
if err := ServiceManagerReadyNotify(); err != nil {
dlog.Fatal(err)
}
ServiceManagerReadyNotify()
args = append(args, "-child")
@ -74,17 +63,26 @@ func (proxy *Proxy) dropPrivilege(userStr string, fds []*os.File) {
if _, _, rcode := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0); rcode != 0 {
dlog.Fatalf("Unable to drop user privileges: [%s]", rcode.Error())
}
for i, fd := range fds {
if fd.Fd() >= InheritedDescriptorsBase {
dlog.Fatal("Duplicated file descriptors are above base")
maxfd := uintptr(0)
for _, fd := range fds {
if fd.Fd() > maxfd {
maxfd = fd.Fd()
}
if err := unix.Dup2(int(fd.Fd()), int(InheritedDescriptorsBase+uintptr(i))); err != nil {
}
fdbase := maxfd + 1
for i, fd := range fds {
if err := unix.Dup2(int(fd.Fd()), int(fdbase+uintptr(i))); err != nil {
dlog.Fatalf("Unable to clone file descriptor: [%s]", err)
}
if _, err := unix.FcntlInt(fd.Fd(), unix.F_SETFD, unix.FD_CLOEXEC); err != nil {
dlog.Fatalf("Unable to set the close on exec flag: [%s]", err)
}
}
for i := range fds {
if err := unix.Dup2(int(fdbase+uintptr(i)), int(i)+3); err != nil {
dlog.Fatalf("Unable to reassign descriptor: [%s]", err)
}
}
err = unix.Exec(path, args, os.Environ())
dlog.Fatalf("Unable to reexecute [%s]: [%s]", path, err)
os.Exit(1)

View file

@ -1,4 +1,4 @@
//go:build !windows && !linux
// +build !windows,!linux
package main
@ -16,7 +16,8 @@ import (
)
func (proxy *Proxy) dropPrivilege(userStr string, fds []*os.File) {
if os.Geteuid() != 0 {
currentUser, err := user.Current()
if err != nil && currentUser.Uid != "0" {
dlog.Fatal("Root privileges are required in order to switch to a different user. Maybe try again with 'sudo'")
}
userInfo, err := user.Lookup(userStr)
@ -25,19 +26,9 @@ func (proxy *Proxy) dropPrivilege(userStr string, fds []*os.File) {
if err != nil {
uid, err2 := strconv.Atoi(userStr)
if err2 != nil || uid <= 0 {
dlog.Fatalf(
"Unable to retrieve any information about user [%s]: [%s] - Remove the user_name directive from the configuration file in order to avoid identity switch",
userStr,
err,
)
dlog.Fatalf("Unable to retrieve any information about user [%s]: [%s] - Remove the user_name directive from the configuration file in order to avoid identity switch", userStr, err)
}
dlog.Warnf(
"Unable to retrieve any information about user [%s]: [%s] - Switching to user id [%v] with the same group id, as [%v] looks like a user id. But you should remove or fix the user_name directive in the configuration file if possible",
userStr,
err,
uid,
uid,
)
dlog.Warnf("Unable to retrieve any information about user [%s]: [%s] - Switching to user id [%v] with the same group id, as [%v] looks like a user id. But you should remove or fix the user_name directive in the configuration file if possible", userStr, err, uid, uid)
userInfo = &user.User{Uid: userStr, Gid: userStr}
}
uid, err := strconv.Atoi(userInfo.Uid)
@ -57,9 +48,7 @@ func (proxy *Proxy) dropPrivilege(userStr string, fds []*os.File) {
dlog.Fatal(err)
}
if err := ServiceManagerReadyNotify(); err != nil {
dlog.Fatal(err)
}
ServiceManagerReadyNotify()
args = append(args, "-child")
@ -75,17 +64,26 @@ func (proxy *Proxy) dropPrivilege(userStr string, fds []*os.File) {
if err := unix.Setuid(uid); err != nil {
dlog.Fatalf("Unable to drop user privileges: %s", err)
}
for i, fd := range fds {
if fd.Fd() >= InheritedDescriptorsBase {
dlog.Fatal("Duplicated file descriptors are above base")
maxfd := uintptr(0)
for _, fd := range fds {
if fd.Fd() > maxfd {
maxfd = fd.Fd()
}
if err := unix.Dup2(int(fd.Fd()), int(InheritedDescriptorsBase+uintptr(i))); err != nil {
}
fdbase := maxfd + 1
for i, fd := range fds {
if err := unix.Dup2(int(fd.Fd()), int(fdbase+uintptr(i))); err != nil {
dlog.Fatalf("Unable to clone file descriptor: [%s]", err)
}
if _, err := unix.FcntlInt(fd.Fd(), unix.F_SETFD, unix.FD_CLOEXEC); err != nil {
dlog.Fatalf("Unable to set the close on exec flag: [%s]", err)
}
}
for i := range fds {
if err := unix.Dup2(int(fdbase+uintptr(i)), int(i)+3); err != nil {
dlog.Fatalf("Unable to reassign descriptor: [%s]", err)
}
}
err = unix.Exec(path, args, os.Environ())
dlog.Fatalf("Unable to reexecute [%s]: [%s]", path, err)
os.Exit(1)

File diff suppressed because it is too large Load diff

View file

@ -1,384 +1,60 @@
package main
import (
"errors"
"fmt"
"net"
"os"
"strings"
"time"
"github.com/miekg/dns"
)
const (
myResolverHost string = "resolver.dnscrypt.info."
nonexistentName string = "nonexistent-zone.dnscrypt-test."
)
const myResolverHost string = "resolver.dnscrypt.info"
func resolveQuery(server string, qName string, qType uint16, sendClientSubnet bool) (*dns.Msg, error) {
client := new(dns.Client)
client.ReadTimeout = 2 * time.Second
msg := &dns.Msg{
MsgHdr: dns.MsgHdr{
RecursionDesired: true,
Opcode: dns.OpcodeQuery,
},
Question: make([]dns.Question, 1),
}
options := &dns.OPT{
Hdr: dns.RR_Header{
Name: ".",
Rrtype: dns.TypeOPT,
},
}
func Resolve(name string) {
fmt.Printf("Resolving [%s]\n\n", name)
if sendClientSubnet {
subnet := net.IPNet{IP: net.IPv4(93, 184, 216, 0), Mask: net.CIDRMask(24, 32)}
prr := dns.EDNS0_SUBNET{}
prr.Code = dns.EDNS0SUBNET
bits, totalSize := subnet.Mask.Size()
if totalSize == 32 {
prr.Family = 1
} else if totalSize == 128 { // if we want to test with IPv6
prr.Family = 2
}
prr.SourceNetmask = uint8(bits)
prr.SourceScope = 0
prr.Address = subnet.IP
options.Option = append(options.Option, &prr)
}
msg.Extra = append(msg.Extra, options)
options.SetDo()
options.SetUDPSize(uint16(MaxDNSPacketSize))
msg.Question[0] = dns.Question{Name: qName, Qtype: qType, Qclass: dns.ClassINET}
msg.Id = dns.Id()
for i := 0; i < 3; i++ {
response, rtt, err := client.Exchange(msg, server)
if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
client.ReadTimeout *= 2
continue
}
_ = rtt
if err != nil {
return nil, err
}
return response, nil
}
return nil, errors.New("Timeout")
}
func Resolve(server string, name string, singleResolver bool) {
parts := strings.SplitN(name, ",", 2)
if len(parts) == 2 {
name, server = parts[0], parts[1]
singleResolver = true
}
host, port := ExtractHostAndPort(server, 53)
if host == "0.0.0.0" {
host = "127.0.0.1"
} else if host == "[::]" {
host = "[::1]"
}
server = fmt.Sprintf("%s:%d", host, port)
fmt.Printf("Resolving [%s] using %s port %d\n\n", name, host, port)
name = dns.Fqdn(name)
cname := name
var clientSubnet string
for once := true; once; once = false {
response, err := resolveQuery(server, myResolverHost, dns.TypeTXT, true)
if err != nil {
fmt.Printf("Unable to resolve: [%s]\n", err)
os.Exit(1)
}
fmt.Printf("Resolver : ")
res := make([]string, 0)
for _, answer := range response.Answer {
if answer.Header().Class != dns.ClassINET || answer.Header().Rrtype != dns.TypeTXT {
continue
}
var ip string
for _, txt := range answer.(*dns.TXT).Txt {
if strings.HasPrefix(txt, "Resolver IP: ") {
ip = strings.TrimPrefix(txt, "Resolver IP: ")
} else if strings.HasPrefix(txt, "EDNS0 client subnet: ") {
clientSubnet = strings.TrimPrefix(txt, "EDNS0 client subnet: ")
}
}
if ip == "" {
continue
}
if rev, err := dns.ReverseAddr(ip); err == nil {
response, err = resolveQuery(server, rev, dns.TypePTR, false)
if err != nil {
break
}
for _, answer := range response.Answer {
if answer.Header().Rrtype != dns.TypePTR || answer.Header().Class != dns.ClassINET {
continue
}
ip = ip + " (" + answer.(*dns.PTR).Ptr + ")"
break
}
}
res = append(res, ip)
}
if len(res) == 0 {
fmt.Println("-")
fmt.Printf("Domain exists: ")
ns, err := net.LookupNS(name)
if err != nil || len(ns) == 0 {
if name == "." {
fmt.Println("'No' would mean that the Internet doesn't exist any more, and that would be very sad. On the bright side, you just found an easter egg.")
} else {
fmt.Println(strings.Join(res, ", "))
fmt.Println("probably not, or blocked by the proxy")
}
} else {
fmt.Printf("yes, %d name servers found\n", len(ns))
}
if singleResolver {
for once := true; once; once = false {
fmt.Printf("Lying : ")
response, err := resolveQuery(server, nonexistentName, dns.TypeA, false)
if err != nil {
fmt.Printf("[%v]", err)
break
}
if response.Rcode == dns.RcodeSuccess {
fmt.Println("yes. That resolver returns wrong responses")
} else if response.Rcode == dns.RcodeNameError {
fmt.Println("no")
} else {
fmt.Printf("unknown - query returned %s\n", dns.RcodeToString[response.Rcode])
}
if response.Rcode == dns.RcodeNameError {
fmt.Printf("DNSSEC : ")
if response.AuthenticatedData {
fmt.Println("yes, the resolver supports DNSSEC")
} else {
fmt.Println("no, the resolver doesn't support DNSSEC")
}
}
fmt.Printf("ECS : ")
if clientSubnet != "" {
fmt.Println("client network address is sent to authoritative servers")
} else {
fmt.Println("ignored or selective")
}
}
}
fmt.Println("")
cname:
for once := true; once; once = false {
fmt.Printf("Canonical name: ")
for i := 0; i < 100; i++ {
response, err := resolveQuery(server, cname, dns.TypeCNAME, false)
if err != nil {
break cname
}
found := false
for _, answer := range response.Answer {
if answer.Header().Rrtype != dns.TypeCNAME || answer.Header().Class != dns.ClassINET {
continue
}
cname = answer.(*dns.CNAME).Target
found = true
break
}
if !found {
break
}
}
fmt.Printf("Canonical name: ")
cname, err := net.LookupCNAME(name)
if err != nil {
fmt.Println("-")
} else {
fmt.Println(cname)
}
fmt.Println("")
for once := true; once; once = false {
fmt.Printf("IPv4 addresses: ")
response, err := resolveQuery(server, cname, dns.TypeA, false)
if err != nil {
break
}
ipv4 := make([]string, 0)
for _, answer := range response.Answer {
if answer.Header().Rrtype != dns.TypeA || answer.Header().Class != dns.ClassINET {
continue
}
ipv4 = append(ipv4, answer.(*dns.A).A.String())
}
if len(ipv4) == 0 {
fmt.Println("-")
} else {
fmt.Println(strings.Join(ipv4, ", "))
}
fmt.Printf("IP addresses: ")
addrs, err := net.LookupHost(name)
if err != nil {
fmt.Println("-")
} else {
fmt.Println(strings.Join(addrs, ", "))
}
for once := true; once; once = false {
fmt.Printf("IPv6 addresses: ")
response, err := resolveQuery(server, cname, dns.TypeAAAA, false)
if err != nil {
break
}
ipv6 := make([]string, 0)
for _, answer := range response.Answer {
if answer.Header().Rrtype != dns.TypeAAAA || answer.Header().Class != dns.ClassINET {
continue
}
ipv6 = append(ipv6, answer.(*dns.AAAA).AAAA.String())
}
if len(ipv6) == 0 {
fmt.Println("-")
} else {
fmt.Println(strings.Join(ipv6, ", "))
}
fmt.Printf("TXT records: ")
txt, err := net.LookupTXT(name)
if err != nil {
fmt.Println("-")
} else {
fmt.Println(strings.Join(txt, " "))
}
fmt.Println("")
for once := true; once; once = false {
fmt.Printf("Name servers : ")
response, err := resolveQuery(server, cname, dns.TypeNS, false)
if err != nil {
break
}
nss := make([]string, 0)
for _, answer := range response.Answer {
if answer.Header().Rrtype != dns.TypeNS || answer.Header().Class != dns.ClassINET {
continue
}
nss = append(nss, answer.(*dns.NS).Ns)
}
if response.Rcode == dns.RcodeNameError {
fmt.Println("name does not exist")
} else if response.Rcode != dns.RcodeSuccess {
fmt.Printf("server returned %s", dns.RcodeToString[response.Rcode])
} else if len(nss) == 0 {
fmt.Println("no name servers found")
} else {
fmt.Println(strings.Join(nss, ", "))
}
fmt.Printf("DNSSEC signed : ")
if response.AuthenticatedData {
fmt.Println("yes")
} else {
fmt.Println("no")
resIP, err := net.LookupHost(myResolverHost)
if err == nil && len(resIP) > 0 {
fmt.Printf("Resolver IP: %s", resIP[0])
rev, err := net.LookupAddr(resIP[0])
if err == nil && len(rev) > 0 {
fmt.Printf(" (%s)", rev[0])
}
fmt.Println("")
}
for once := true; once; once = false {
fmt.Printf("Mail servers : ")
response, err := resolveQuery(server, cname, dns.TypeMX, false)
if err != nil {
break
}
mxs := make([]string, 0)
for _, answer := range response.Answer {
if answer.Header().Rrtype != dns.TypeMX || answer.Header().Class != dns.ClassINET {
continue
}
mxs = append(mxs, answer.(*dns.MX).Mx)
}
if len(mxs) == 0 {
fmt.Println("no mail servers found")
} else if len(mxs) > 1 {
fmt.Printf("%d mail servers found\n", len(mxs))
} else {
fmt.Println("1 mail servers found")
}
}
fmt.Println("")
for once := true; once; once = false {
fmt.Printf("HTTPS alias : ")
response, err := resolveQuery(server, cname, dns.TypeHTTPS, false)
if err != nil {
break
}
aliases := make([]string, 0)
for _, answer := range response.Answer {
if answer.Header().Rrtype != dns.TypeHTTPS || answer.Header().Class != dns.ClassINET {
continue
}
https := answer.(*dns.HTTPS)
if https.Priority != 0 || len(https.Target) < 2 {
continue
}
aliases = append(aliases, https.Target)
}
if len(aliases) == 0 {
fmt.Println("-")
} else {
fmt.Println(strings.Join(aliases, ", "))
}
fmt.Printf("HTTPS info : ")
info := make([]string, 0)
for _, answer := range response.Answer {
if answer.Header().Rrtype != dns.TypeHTTPS || answer.Header().Class != dns.ClassINET {
continue
}
https := answer.(*dns.HTTPS)
if https.Priority == 0 || len(https.Target) > 1 {
continue
}
for _, value := range https.Value {
info = append(info, fmt.Sprintf("[%s]=[%s]", value.Key(), value.String()))
}
}
if len(info) == 0 {
fmt.Println("-")
} else {
fmt.Println(strings.Join(info, ", "))
}
}
fmt.Println("")
for once := true; once; once = false {
fmt.Printf("Host info : ")
response, err := resolveQuery(server, cname, dns.TypeHINFO, false)
if err != nil {
break
}
hinfo := make([]string, 0)
for _, answer := range response.Answer {
if answer.Header().Rrtype != dns.TypeHINFO || answer.Header().Class != dns.ClassINET {
continue
}
hinfo = append(hinfo, fmt.Sprintf("%s %s", answer.(*dns.HINFO).Cpu, answer.(*dns.HINFO).Os))
}
if len(hinfo) == 0 {
fmt.Println("-")
} else {
fmt.Println(strings.Join(hinfo, ", "))
}
}
for once := true; once; once = false {
fmt.Printf("TXT records : ")
response, err := resolveQuery(server, cname, dns.TypeTXT, false)
if err != nil {
break
}
txt := make([]string, 0)
for _, answer := range response.Answer {
if answer.Header().Rrtype != dns.TypeTXT || answer.Header().Class != dns.ClassINET {
continue
}
txt = append(txt, strings.Join(answer.(*dns.TXT).Txt, " "))
}
if len(txt) == 0 {
fmt.Println("-")
} else {
fmt.Println(strings.Join(txt, ", "))
}
}
fmt.Println("")
}

File diff suppressed because it is too large Load diff

View file

@ -1,11 +0,0 @@
//go:build android
package main
func ServiceManagerStartNotify() error {
return nil
}
func ServiceManagerReadyNotify() error {
return nil
}

View file

@ -1,35 +1,12 @@
//go:build !android
package main
import (
"github.com/coreos/go-systemd/daemon"
clocksmith "github.com/jedisct1/go-clocksmith"
)
const SdNotifyStatus = "STATUS="
import "github.com/coreos/go-systemd/daemon"
func ServiceManagerStartNotify() error {
daemon.SdNotify(false, SdNotifyStatus+"Starting...")
daemon.SdNotify(false, "STATUS=Starting")
return nil
}
func ServiceManagerReadyNotify() error {
daemon.SdNotify(false, daemon.SdNotifyReady+"\n"+SdNotifyStatus+"Ready")
return systemDWatchdog()
}
func systemDWatchdog() error {
watchdogFailureDelay, err := daemon.SdWatchdogEnabled(false)
if err != nil || watchdogFailureDelay == 0 {
return err
}
refreshInterval := watchdogFailureDelay / 3
go func() {
for {
daemon.SdNotify(false, daemon.SdNotifyWatchdog)
clocksmith.Sleep(refreshInterval)
}
}()
return nil
func ServiceManagerReadyNotify() {
daemon.SdNotify(false, "READY=1")
}

View file

@ -1,4 +1,4 @@
//go:build !linux && !windows
// +build !linux,!windows
package main
@ -6,6 +6,5 @@ func ServiceManagerStartNotify() error {
return nil
}
func ServiceManagerReadyNotify() error {
return nil
func ServiceManagerReadyNotify() {
}

View file

@ -7,11 +7,8 @@ func ServiceManagerStartNotify() error {
if err != nil {
return err
}
_ = mgr.Disconnect()
mgr.Disconnect()
return nil
}
func ServiceManagerReadyNotify() error {
return nil
}
func ServiceManagerReadyNotify() {}

View file

@ -1,31 +0,0 @@
package main
import (
"net"
"syscall"
)
func (proxy *Proxy) udpListenerConfig() (*net.ListenConfig, error) {
return &net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
_ = c.Control(func(fd uintptr) {
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_DF, 0)
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_TOS, 0x70)
_ = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVBUF, 4096)
_ = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDBUF, 4096)
})
return nil
},
}, nil
}
func (proxy *Proxy) tcpListenerConfig() (*net.ListenConfig, error) {
return &net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
_ = c.Control(func(fd uintptr) {
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_TOS, 0x70)
})
return nil
},
}, nil
}

View file

@ -1,35 +0,0 @@
package main
import (
"net"
"syscall"
)
func (proxy *Proxy) udpListenerConfig() (*net.ListenConfig, error) {
return &net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
_ = c.Control(func(fd uintptr) {
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_BINDANY, 1)
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IPV6, syscall.IPV6_BINDANY, 1)
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_DF, 0)
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_TOS, 0x70)
_ = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVBUF, 4096)
_ = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDBUF, 4096)
})
return nil
},
}, nil
}
func (proxy *Proxy) tcpListenerConfig() (*net.ListenConfig, error) {
return &net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
_ = c.Control(func(fd uintptr) {
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_BINDANY, 1)
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IPV6, syscall.IPV6_BINDANY, 1)
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_TOS, 0x70)
})
return nil
},
}, nil
}

View file

@ -1,40 +0,0 @@
package main
import (
"net"
"syscall"
)
func (proxy *Proxy) udpListenerConfig() (*net.ListenConfig, error) {
return &net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
_ = c.Control(func(fd uintptr) {
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_FREEBIND, 1)
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_DF, 0)
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_TOS, 0x70)
_ = syscall.SetsockoptInt(
int(fd),
syscall.IPPROTO_IP,
syscall.IP_MTU_DISCOVER,
syscall.IP_PMTUDISC_DONT,
)
_ = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVBUFFORCE, 4096)
_ = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDBUFFORCE, 4096)
})
return nil
},
}, nil
}
func (proxy *Proxy) tcpListenerConfig() (*net.ListenConfig, error) {
return &net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
_ = c.Control(func(fd uintptr) {
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_FREEBIND, 1)
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_TOS, 0x70)
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, syscall.TCP_QUICKACK, 1)
})
return nil
},
}, nil
}

View file

@ -1,33 +0,0 @@
package main
import (
"net"
"syscall"
)
func (proxy *Proxy) udpListenerConfig() (*net.ListenConfig, error) {
return &net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
_ = c.Control(func(fd uintptr) {
_ = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_BINDANY, 1)
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_DF, 0)
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_TOS, 0x70)
_ = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVBUF, 4096)
_ = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDBUF, 4096)
})
return nil
},
}, nil
}
func (proxy *Proxy) tcpListenerConfig() (*net.ListenConfig, error) {
return &net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
_ = c.Control(func(fd uintptr) {
_ = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_BINDANY, 1)
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_IP, syscall.IP_TOS, 0x70)
})
return nil
},
}, nil
}

View file

@ -1,15 +0,0 @@
//go:build !freebsd && !openbsd && !windows && !darwin && !linux
package main
import (
"net"
)
func (proxy *Proxy) udpListenerConfig() (*net.ListenConfig, error) {
return &net.ListenConfig{}, nil
}
func (proxy *Proxy) tcpListenerConfig() (*net.ListenConfig, error) {
return &net.ListenConfig{}, nil
}

View file

@ -1,30 +0,0 @@
package main
import (
"net"
"syscall"
)
func (proxy *Proxy) udpListenerConfig() (*net.ListenConfig, error) {
return &net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
_ = c.Control(func(fd uintptr) {
_ = syscall.SetsockoptInt(syscall.Handle(fd), syscall.IPPROTO_IP, syscall.IP_TOS, 0x70)
_ = syscall.SetsockoptInt(syscall.Handle(fd), syscall.SOL_SOCKET, syscall.SO_RCVBUF, 4096)
_ = syscall.SetsockoptInt(syscall.Handle(fd), syscall.SOL_SOCKET, syscall.SO_SNDBUF, 4096)
})
return nil
},
}, nil
}
func (proxy *Proxy) tcpListenerConfig() (*net.ListenConfig, error) {
return &net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
_ = c.Control(func(fd uintptr) {
_ = syscall.SetsockoptInt(syscall.Handle(fd), syscall.IPPROTO_IP, syscall.IP_TOS, 0x70)
})
return nil
},
}, nil
}

View file

@ -1,19 +1,22 @@
package main
import (
"bytes"
"errors"
"fmt"
"math/rand"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"time"
"unicode"
"github.com/dchest/safefile"
"github.com/jedisct1/dlog"
"github.com/jedisct1/go-dnsstamps"
stamps "github.com/jedisct1/go-dnsstamps"
"github.com/jedisct1/go-minisign"
)
@ -24,237 +27,195 @@ const (
)
const (
DefaultPrefetchDelay time.Duration = 24 * time.Hour
MinimumPrefetchInterval time.Duration = 10 * time.Minute
MinSourcesUpdateDelay = time.Duration(24) * time.Hour
)
type Source struct {
name string
urls []*url.URL
format SourceFormat
bin []byte
minisignKey *minisign.PublicKey
cacheFile string
cacheTTL, prefetchDelay time.Duration
refresh time.Time
prefix string
urls []string
format SourceFormat
in string
}
// timeNow() is replaced by tests to provide a static value
var timeNow = time.Now
func (source *Source) checkSignature(bin, sig []byte) error {
signature, err := minisign.DecodeSignature(string(sig))
if err == nil {
_, err = source.minisignKey.Verify(bin, signature)
}
return err
}
func (source *Source) fetchFromCache(now time.Time) (time.Duration, error) {
var err error
var bin, sig []byte
if bin, err = os.ReadFile(source.cacheFile); err != nil {
return 0, err
}
if sig, err = os.ReadFile(source.cacheFile + ".minisig"); err != nil {
return 0, err
}
if err = source.checkSignature(bin, sig); err != nil {
return 0, err
}
source.bin = bin
var fi os.FileInfo
if fi, err = os.Stat(source.cacheFile); err != nil {
return 0, err
}
var ttl time.Duration = 0
if elapsed := now.Sub(fi.ModTime()); elapsed < source.cacheTTL {
ttl = source.prefetchDelay - elapsed
dlog.Debugf("Source [%s] cache file [%s] is still fresh, next update: %v", source.name, source.cacheFile, ttl)
} else {
dlog.Debugf("Source [%s] cache file [%s] needs to be refreshed", source.name, source.cacheFile)
}
return ttl, nil
}
func writeSource(f string, bin, sig []byte) error {
var err error
var fSrc, fSig *safefile.File
if fSrc, err = safefile.Create(f, 0o644); err != nil {
return err
}
defer fSrc.Close()
if fSig, err = safefile.Create(f+".minisig", 0o644); err != nil {
return err
}
defer fSig.Close()
if _, err = fSrc.Write(bin); err != nil {
return err
}
if _, err = fSig.Write(sig); err != nil {
return err
}
if err = fSrc.Commit(); err != nil {
return err
}
return fSig.Commit()
}
func (source *Source) updateCache(bin, sig []byte, now time.Time) {
file := source.cacheFile
absPath := file
if resolved, err := filepath.Abs(file); err != nil {
absPath = resolved
}
if !bytes.Equal(source.bin, bin) {
if err := writeSource(file, bin, sig); err != nil {
dlog.Warnf("Couldn't write cache file [%s]: %s", absPath, err) // an error writing to the cache isn't fatal
}
}
if err := os.Chtimes(file, now, now); err != nil {
dlog.Warnf("Couldn't update cache file [%s]: %s", absPath, err)
}
source.bin = bin
}
func (source *Source) parseURLs(urls []string) {
for _, urlStr := range urls {
if srcURL, err := url.Parse(urlStr); err != nil {
dlog.Warnf("Source [%s] failed to parse URL [%s]", source.name, urlStr)
} else {
source.urls = append(source.urls, srcURL)
}
}
}
func fetchFromURL(xTransport *XTransport, u *url.URL) ([]byte, error) {
bin, _, _, _, err := xTransport.GetWithCompression(u, "", DefaultTimeout)
return bin, err
}
func (source *Source) fetchWithCache(xTransport *XTransport, now time.Time) (time.Duration, error) {
var err error
var ttl time.Duration
if ttl, err = source.fetchFromCache(now); err != nil {
if len(source.urls) == 0 {
dlog.Errorf("Source [%s] cache file [%s] not present and no valid URL", source.name, source.cacheFile)
return 0, err
}
dlog.Debugf("Source [%s] cache file [%s] not present", source.name, source.cacheFile)
}
if len(source.urls) == 0 {
return 0, err
}
if ttl > 0 {
source.refresh = now.Add(ttl)
return 0, err
}
ttl = MinimumPrefetchInterval
source.refresh = now.Add(ttl)
var bin, sig []byte
for _, srcURL := range source.urls {
dlog.Infof("Source [%s] loading from URL [%s]", source.name, srcURL)
sigURL := &url.URL{}
*sigURL = *srcURL // deep copy to avoid parsing twice
sigURL.Path += ".minisig"
if bin, err = fetchFromURL(xTransport, srcURL); err != nil {
dlog.Debugf("Source [%s] failed to download from URL [%s]", source.name, srcURL)
continue
}
if sig, err = fetchFromURL(xTransport, sigURL); err != nil {
dlog.Debugf("Source [%s] failed to download signature from URL [%s]", source.name, sigURL)
continue
}
if err = source.checkSignature(bin, sig); err != nil {
dlog.Debugf("Source [%s] failed signature check using URL [%s]", source.name, srcURL)
continue
}
break // valid signature
func fetchFromCache(cacheFile string, refreshDelay time.Duration) (in string, expired bool, delayTillNextUpdate time.Duration, err error) {
expired = false
if refreshDelay < MinSourcesUpdateDelay {
refreshDelay = MinSourcesUpdateDelay
}
fi, err := os.Stat(cacheFile)
if err != nil {
return 0, err
dlog.Debugf("Cache file [%s] not present", cacheFile)
delayTillNextUpdate = time.Duration(0)
return
}
source.updateCache(bin, sig, now)
ttl = source.prefetchDelay
source.refresh = now.Add(ttl)
return ttl, nil
elapsed := time.Since(fi.ModTime())
if elapsed < refreshDelay {
dlog.Debugf("Cache file [%s] is still fresh", cacheFile)
delayTillNextUpdate = refreshDelay - elapsed
} else {
dlog.Debugf("Cache file [%s] needs to be refreshed", cacheFile)
delayTillNextUpdate = time.Duration(0)
}
var bin []byte
bin, err = ioutil.ReadFile(cacheFile)
if err != nil {
delayTillNextUpdate = time.Duration(0)
return
}
in = string(bin)
if delayTillNextUpdate <= time.Duration(0) {
expired = true
}
return
}
// NewSource loads a new source using the given cacheFile and urls, ensuring it has a valid signature
func NewSource(
name string,
xTransport *XTransport,
urls []string,
minisignKeyStr string,
cacheFile string,
formatStr string,
refreshDelay time.Duration,
prefix string,
) (*Source, error) {
if refreshDelay < DefaultPrefetchDelay {
refreshDelay = DefaultPrefetchDelay
func fetchWithCache(xTransport *XTransport, urlStr string, cacheFile string, refreshDelay time.Duration) (in string, cached bool, delayTillNextUpdate time.Duration, err error) {
cached = false
expired := false
in, expired, delayTillNextUpdate, err = fetchFromCache(cacheFile, refreshDelay)
if err == nil && !expired {
dlog.Debugf("Delay till next update: %v", delayTillNextUpdate)
cached = true
return
}
source := &Source{
name: name,
urls: []*url.URL{},
cacheFile: cacheFile,
cacheTTL: refreshDelay,
prefetchDelay: DefaultPrefetchDelay,
prefix: prefix,
if expired {
cached = true
}
if len(urlStr) == 0 {
if !expired {
err = fmt.Errorf("Cache file [%s] not present and no URL given to retrieve it", cacheFile)
}
return
}
var resp *http.Response
dlog.Infof("Loading source information from URL [%s]", urlStr)
url, err := url.Parse(urlStr)
if err != nil {
return
}
resp, _, err = xTransport.Get(url, "", 30*time.Second)
if err == nil && resp != nil && (resp.StatusCode < 200 || resp.StatusCode > 299) {
err = fmt.Errorf("Webserver returned code %d", resp.StatusCode)
return
} else if err != nil {
return
} else if resp == nil {
err = errors.New("Webserver returned an error")
return
}
var bin []byte
bin, err = ioutil.ReadAll(io.LimitReader(resp.Body, MaxHTTPBodyLength))
resp.Body.Close()
if err != nil {
return
}
err = nil
cached = false
in = string(bin)
delayTillNextUpdate = refreshDelay
return
}
func AtomicFileWrite(file string, data []byte) error {
return safefile.WriteFile(file, data, 0644)
}
type URLToPrefetch struct {
url string
cacheFile string
when time.Time
}
func NewSource(xTransport *XTransport, urls []string, minisignKeyStr string, cacheFile string, formatStr string, refreshDelay time.Duration) (Source, []URLToPrefetch, error) {
source := Source{urls: urls}
if formatStr == "v2" {
source.format = SourceFormatV2
} else {
return source, fmt.Errorf("Unsupported source format: [%s]", formatStr)
return source, []URLToPrefetch{}, fmt.Errorf("Unsupported source format: [%s]", formatStr)
}
if minisignKey, err := minisign.NewPublicKey(minisignKeyStr); err == nil {
source.minisignKey = &minisignKey
} else {
return source, err
minisignKey, err := minisign.NewPublicKey(minisignKeyStr)
if err != nil {
return source, []URLToPrefetch{}, err
}
source.parseURLs(urls)
_, err := source.fetchWithCache(xTransport, timeNow())
if err == nil {
dlog.Noticef("Source [%s] loaded", name)
}
return source, err
}
now := time.Now()
urlsToPrefetch := []URLToPrefetch{}
sigCacheFile := cacheFile + ".minisig"
// PrefetchSources downloads latest versions of given sources, ensuring they have a valid signature before caching
func PrefetchSources(xTransport *XTransport, sources []*Source) time.Duration {
now := timeNow()
interval := MinimumPrefetchInterval
for _, source := range sources {
if source.refresh.IsZero() || source.refresh.After(now) {
continue
var sigStr, in string
var cached, sigCached bool
var delayTillNextUpdate, sigDelayTillNextUpdate time.Duration
var sigErr error
var preloadURL string
if len(urls) <= 0 {
in, cached, delayTillNextUpdate, err = fetchWithCache(xTransport, "", cacheFile, refreshDelay)
sigStr, sigCached, sigDelayTillNextUpdate, sigErr = fetchWithCache(xTransport, "", sigCacheFile, refreshDelay)
} else {
preloadURL = urls[0]
for _, url := range urls {
sigURL := url + ".minisig"
in, cached, delayTillNextUpdate, err = fetchWithCache(xTransport, url, cacheFile, refreshDelay)
sigStr, sigCached, sigDelayTillNextUpdate, sigErr = fetchWithCache(xTransport, sigURL, sigCacheFile, refreshDelay)
if err == nil && sigErr == nil {
preloadURL = url
break
}
dlog.Infof("Loading from [%s] failed", url)
}
dlog.Debugf("Prefetching [%s]", source.name)
if delay, err := source.fetchWithCache(xTransport, now); err != nil {
dlog.Infof("Prefetching [%s] failed: %v, will retry in %v", source.name, err, interval)
} else {
dlog.Debugf("Prefetching [%s] succeeded, next update in %v min", source.name, delay)
if delay >= MinimumPrefetchInterval && (interval == MinimumPrefetchInterval || interval > delay) {
interval = delay
}
if len(preloadURL) > 0 {
url := preloadURL
sigURL := url + ".minisig"
urlsToPrefetch = append(urlsToPrefetch, URLToPrefetch{url: url, cacheFile: cacheFile, when: now.Add(delayTillNextUpdate)})
urlsToPrefetch = append(urlsToPrefetch, URLToPrefetch{url: sigURL, cacheFile: sigCacheFile, when: now.Add(sigDelayTillNextUpdate)})
}
if sigErr != nil && err == nil {
err = sigErr
}
if err != nil {
return source, urlsToPrefetch, err
}
signature, err := minisign.DecodeSignature(sigStr)
if err != nil {
os.Remove(cacheFile)
os.Remove(sigCacheFile)
return source, urlsToPrefetch, err
}
res, err := minisignKey.Verify([]byte(in), signature)
if err != nil || !res {
os.Remove(cacheFile)
os.Remove(sigCacheFile)
return source, urlsToPrefetch, err
}
if !cached {
if err = AtomicFileWrite(cacheFile, []byte(in)); err != nil {
if absPath, err2 := filepath.Abs(cacheFile); err2 == nil {
dlog.Warnf("%s: %s", absPath, err)
}
}
}
return interval
if !sigCached {
if err = AtomicFileWrite(sigCacheFile, []byte(sigStr)); err != nil {
if absPath, err2 := filepath.Abs(sigCacheFile); err2 == nil {
dlog.Warnf("%s: %s", absPath, err)
}
}
}
dlog.Noticef("Source [%s] loaded", cacheFile)
source.in = in
return source, urlsToPrefetch, nil
}
func (source *Source) Parse() ([]RegisteredServer, error) {
func (source *Source) Parse(prefix string) ([]RegisteredServer, error) {
if source.format == SourceFormatV2 {
return source.parseV2()
return source.parseV2(prefix)
}
dlog.Fatal("Unexpected source format")
return []RegisteredServer{}, nil
}
func (source *Source) parseV2() ([]RegisteredServer, error) {
func (source *Source) parseV2(prefix string) ([]RegisteredServer, error) {
var registeredServers []RegisteredServer
var stampErrs []string
appendStampErr := func(format string, a ...interface{}) {
@ -262,30 +223,34 @@ func (source *Source) parseV2() ([]RegisteredServer, error) {
stampErrs = append(stampErrs, stampErr)
dlog.Warn(stampErr)
}
in := string(source.bin)
in := string(source.in)
parts := strings.Split(in, "## ")
if len(parts) < 2 {
return registeredServers, fmt.Errorf("Invalid format for source at [%v]", source.urls)
}
parts = parts[1:]
PartsLoop:
for _, part := range parts {
part = strings.TrimSpace(part)
part = strings.TrimFunc(part, unicode.IsSpace)
subparts := strings.Split(part, "\n")
if len(subparts) < 2 {
return registeredServers, fmt.Errorf("Invalid format for source at [%v]", source.urls)
}
name := strings.TrimSpace(subparts[0])
name := strings.TrimFunc(subparts[0], unicode.IsSpace)
if len(name) == 0 {
return registeredServers, fmt.Errorf("Invalid format for source at [%v]", source.urls)
}
subparts = subparts[1:]
name = source.prefix + name
name = prefix + name
var stampStr, description string
stampStrs := make([]string, 0)
for _, subpart := range subparts {
subpart = strings.TrimSpace(subpart)
if strings.HasPrefix(subpart, "sdns:") && len(subpart) >= 6 {
stampStrs = append(stampStrs, subpart)
subpart = strings.TrimFunc(subpart, unicode.IsSpace)
if strings.HasPrefix(subpart, "sdns:") {
if len(stampStr) > 0 {
appendStampErr("Multiple stamps for server [%s]", name)
continue PartsLoop
}
stampStr = subpart
continue
} else if len(subpart) == 0 || strings.HasPrefix(subpart, "//") {
continue
@ -295,23 +260,13 @@ func (source *Source) parseV2() ([]RegisteredServer, error) {
}
description += subpart
}
stampStrsLen := len(stampStrs)
if stampStrsLen <= 0 {
if len(stampStr) < 6 {
appendStampErr("Missing stamp for server [%s]", name)
continue
} else if stampStrsLen > 1 {
rand.Shuffle(stampStrsLen, func(i, j int) { stampStrs[i], stampStrs[j] = stampStrs[j], stampStrs[i] })
}
var stamp dnsstamps.ServerStamp
var err error
for _, stampStr = range stampStrs {
stamp, err = dnsstamps.NewServerStampFromString(stampStr)
if err == nil {
break
}
appendStampErr("Invalid or unsupported stamp [%v]: %s", stampStr, err.Error())
}
stamp, err := stamps.NewServerStampFromString(stampStr)
if err != nil {
appendStampErr("Invalid or unsupported stamp [%v]: %s", stampStr, err.Error())
continue
}
registeredServer := RegisteredServer{
@ -325,3 +280,12 @@ func (source *Source) parseV2() ([]RegisteredServer, error) {
}
return registeredServers, nil
}
func PrefetchSourceURL(xTransport *XTransport, urlToPrefetch *URLToPrefetch) error {
in, cached, delayTillNextUpdate, err := fetchWithCache(xTransport, urlToPrefetch.url, urlToPrefetch.cacheFile, MinSourcesUpdateDelay)
if err == nil && !cached {
AtomicFileWrite(urlToPrefetch.cacheFile, []byte(in))
}
urlToPrefetch.when = time.Now().Add(delayTillNextUpdate)
return err
}

View file

@ -1,491 +0,0 @@
package main
import (
"bytes"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"github.com/hectane/go-acl"
"github.com/jedisct1/dlog"
"github.com/jedisct1/go-minisign"
"github.com/powerman/check"
)
type SourceFixture struct {
suffix string
content []byte
length string // HTTP Content-Length header
perms os.FileMode
mtime time.Time
}
type SourceTestState uint8
const (
TestStateCorrect SourceTestState = iota // valid files
TestStateExpired // modification time of files set in distant past (cache only)
TestStatePartial // incomplete files
TestStatePartialSig // incomplete .minisig
TestStateMissing // non-existent files
TestStateMissingSig // non-existent .minisig
TestStateReadErr // I/O error on reading files (download only)
TestStateReadSigErr // I/O error on reading .minisig (download only)
TestStateOpenErr // I/O error on opening files
TestStateOpenSigErr // I/O error on opening .minisig
TestStatePathErr // unparsable path to files (download only)
)
type SourceTestData struct {
n int // subtest counter
xTransport *XTransport
key *minisign.PublicKey
keyStr, tempDir string
sources []string
fixtures map[SourceTestState]map[string]SourceFixture
timeNow, timeOld, timeUpd time.Time
server *httptest.Server
reqActual, reqExpect map[string]uint
cacheTests map[string]SourceTestState
downloadTests map[string][]SourceTestState
}
type SourceTestExpect struct {
success bool
err, cachePath string
cache []SourceFixture
mtime time.Time
urls []string
Source *Source
delay time.Duration
prefix string
}
func readFixture(t *testing.T, name string) []byte {
bin, err := os.ReadFile(filepath.Join("testdata", name))
if err != nil {
t.Fatalf("Unable to read test fixture %s: %v", name, err)
}
return bin
}
func writeSourceCache(t *testing.T, e *SourceTestExpect) {
for _, f := range e.cache {
if f.content == nil {
continue
}
path := e.cachePath + f.suffix
perms := f.perms
if perms == 0 {
perms = 0o644
}
if err := os.WriteFile(path, f.content, perms); err != nil {
t.Fatalf("Unable to write cache file %s: %v", path, err)
}
if err := acl.Chmod(path, perms); err != nil {
t.Fatalf("Unable to set permissions on cache file %s: %v", path, err)
}
if f.suffix != "" {
continue
}
mtime := f.mtime
if f.mtime.IsZero() {
mtime = e.mtime
}
if err := os.Chtimes(path, mtime, mtime); err != nil {
t.Fatalf("Unable to touch cache file %s to %v: %v", path, f.mtime, err)
}
}
}
func checkSourceCache(c *check.C, e *SourceTestExpect) {
for _, f := range e.cache {
path := e.cachePath + f.suffix
_ = acl.Chmod(path, 0o644) // don't worry if this fails, reading it will catch the same problem
got, err := os.ReadFile(path)
c.DeepEqual(got, f.content, "Unexpected content for cache file '%s', err %v", path, err)
if f.suffix != "" {
continue
}
if fi, err := os.Stat(path); err == nil { // again, if this failed it was already caught above
mtime := f.mtime
if f.mtime.IsZero() {
mtime = e.mtime
}
c.EQ(fi.ModTime(), mtime, "Unexpected timestamp for cache file '%s'", path)
}
}
}
func loadSnakeoil(t *testing.T, d *SourceTestData) {
key, err := minisign.NewPublicKeyFromFile(filepath.Join("testdata", "snakeoil.pub"))
if err != nil {
t.Fatalf("Unable to load snakeoil key: %v", err)
}
d.keyStr = string(bytes.SplitN(readFixture(t, "snakeoil.pub"), []byte("\n"), 2)[1])
d.key = &key
}
func loadTestSourceNames(t *testing.T, d *SourceTestData) {
files, err := os.ReadDir(filepath.Join("testdata", "sources"))
if err != nil {
t.Fatalf("Unable to load list of test sources: %v", err)
}
for _, file := range files {
if !file.IsDir() && strings.HasSuffix(file.Name(), ".minisig") {
d.sources = append(d.sources, strings.TrimSuffix(file.Name(), ".minisig"))
}
}
}
func generateFixtureState(_ *testing.T, d *SourceTestData, suffix, file string, state SourceTestState) {
if _, ok := d.fixtures[state]; !ok {
d.fixtures[state] = map[string]SourceFixture{}
}
if suffix != ".minisig" {
switch state {
case TestStatePartialSig, TestStateMissingSig, TestStateReadSigErr, TestStateOpenSigErr:
d.fixtures[state][file] = d.fixtures[TestStateCorrect][file]
return
}
}
f := SourceFixture{suffix: suffix}
switch state {
case TestStateExpired:
f.content, f.mtime = d.fixtures[TestStateCorrect][file].content, d.timeOld
case TestStatePartial, TestStatePartialSig:
f.content = d.fixtures[TestStateCorrect][file].content[:1]
case TestStateReadErr, TestStateReadSigErr:
f.content, f.length = []byte{}, "1"
case TestStateOpenErr, TestStateOpenSigErr:
f.content, f.perms = d.fixtures[TestStateCorrect][file].content[:1], 0o200
}
d.fixtures[state][file] = f
}
func loadFixtures(t *testing.T, d *SourceTestData) {
d.fixtures = map[SourceTestState]map[string]SourceFixture{TestStateCorrect: {}}
for _, source := range d.sources {
for _, suffix := range [...]string{"", ".minisig"} {
file := source + suffix
d.fixtures[TestStateCorrect][file] = SourceFixture{
suffix: suffix,
content: readFixture(t, filepath.Join("sources", file)),
}
for _, state := range [...]SourceTestState{
TestStateExpired,
TestStatePartial,
TestStateReadErr,
TestStateOpenErr,
TestStatePartialSig,
TestStateMissingSig,
TestStateReadSigErr,
TestStateOpenSigErr,
} {
generateFixtureState(t, d, suffix, file, state)
}
}
}
}
func makeTempDir(t *testing.T, d *SourceTestData) {
name, err := os.MkdirTemp("", "sources_test.go."+t.Name())
if err != nil {
t.Fatalf("Unable to create temporary directory: %v", err)
}
d.tempDir = name
}
func makeTestServer(t *testing.T, d *SourceTestData) {
d.reqActual, d.reqExpect = map[string]uint{}, map[string]uint{}
d.server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data []byte = nil
d.reqActual[r.URL.Path]++
pathParts := strings.SplitN(strings.TrimPrefix(r.URL.Path, "/"), "/", 2)
state, _ := strconv.ParseUint(pathParts[0], 10, 8)
if fixture, ok := d.fixtures[SourceTestState(state)][pathParts[1]]; ok {
if len(fixture.length) > 0 {
w.Header().Set("Content-Length", fixture.length) // client will return unexpected EOF
}
data = fixture.content
}
if data != nil {
if _, err := w.Write(data); err != nil {
t.Logf("Error writing HTTP response for request [%s]: %v", r.URL.Path, err)
}
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
}
func checkTestServer(c *check.C, d *SourceTestData) {
c.DeepEqual(d.reqActual, d.reqExpect, "Unexpected HTTP request log")
d.reqActual, d.reqExpect = map[string]uint{}, map[string]uint{}
}
func setupSourceTest(t *testing.T) (func(), *SourceTestData) {
d := &SourceTestData{n: -1, xTransport: NewXTransport()}
d.cacheTests = map[string]SourceTestState{ // determines cache files written to disk before each call
"correct": TestStateCorrect,
"expired": TestStateExpired,
"partial": TestStatePartial,
"partial-sig": TestStatePartialSig,
"missing": TestStateMissing,
"missing-sig": TestStateMissingSig,
"open-err": TestStateOpenErr,
"open-sig-err": TestStateOpenSigErr,
}
d.downloadTests = map[string][]SourceTestState{ // determines the list of URLs passed in each call and how they will respond
"correct": {TestStateCorrect},
"partial": {TestStatePartial},
"partial-sig": {TestStatePartialSig},
"missing": {TestStateMissing},
"missing-sig": {TestStateMissingSig},
"read-err": {TestStateReadErr},
"read-sig-err": {TestStateReadSigErr},
"open-err": {TestStateOpenErr},
"open-sig-err": {TestStateOpenSigErr},
"path-err": {TestStatePathErr},
"partial,correct": {TestStatePartial, TestStateCorrect},
"partial-sig,correct": {TestStatePartialSig, TestStateCorrect},
"missing,correct": {TestStateMissing, TestStateCorrect},
"missing-sig,correct": {TestStateMissingSig, TestStateCorrect},
"read-err,correct": {TestStateReadErr, TestStateCorrect},
"read-sig-err,correct": {TestStateReadSigErr, TestStateCorrect},
"open-err,correct": {TestStateOpenErr, TestStateCorrect},
"open-sig-err,correct": {TestStateOpenSigErr, TestStateCorrect},
"path-err,correct": {TestStatePathErr, TestStateCorrect},
"no-urls": {},
}
d.xTransport.rebuildTransport()
d.timeNow = time.Now().AddDate(0, 0, 0).Truncate(time.Second)
d.timeOld = d.timeNow.Add(DefaultPrefetchDelay * -4)
d.timeUpd = d.timeNow.Add(DefaultPrefetchDelay)
timeNow = func() time.Time { return d.timeNow } // originally defined in sources.go, replaced during testing to ensure consistent results
makeTempDir(t, d)
makeTestServer(t, d)
loadSnakeoil(t, d)
loadTestSourceNames(t, d)
loadFixtures(t, d)
return func() {
os.RemoveAll(d.tempDir)
d.server.Close()
}, d
}
func prepSourceTestCache(t *testing.T, d *SourceTestData, e *SourceTestExpect, source string, state SourceTestState) {
e.cache = []SourceFixture{d.fixtures[state][source], d.fixtures[state][source+".minisig"]}
switch state {
case TestStateCorrect:
e.Source.bin, e.success = e.cache[0].content, true
case TestStateExpired:
e.Source.bin = e.cache[0].content
case TestStatePartial, TestStatePartialSig:
e.err = "signature"
case TestStateMissing, TestStateMissingSig, TestStateOpenErr, TestStateOpenSigErr:
e.err = "open"
}
writeSourceCache(t, e)
}
func prepSourceTestDownload(
_ *testing.T,
d *SourceTestData,
e *SourceTestExpect,
source string,
downloadTest []SourceTestState,
) {
if len(downloadTest) == 0 {
return
}
for _, state := range downloadTest {
path := "/" + strconv.FormatUint(uint64(state), 10) + "/" + source
serverURL := d.server.URL
switch state {
case TestStateMissing, TestStateMissingSig:
e.err = "404 Not Found"
case TestStatePartial, TestStatePartialSig:
e.err = "signature"
case TestStateReadErr, TestStateReadSigErr:
e.err = "unexpected EOF"
case TestStateOpenErr, TestStateOpenSigErr:
if u, err := url.Parse(serverURL + path); err == nil {
host, port := ExtractHostAndPort(u.Host, -1)
u.Host = fmt.Sprintf(
"%s:%d",
host,
port|0x10000,
) // high numeric port is parsed but then fails to connect
serverURL = u.String()
}
e.err = "invalid port"
case TestStatePathErr:
path = "..." + path // non-numeric port fails URL parsing
}
if u, err := url.Parse(serverURL + path); err == nil {
e.Source.urls = append(e.Source.urls, u)
}
e.urls = append(e.urls, serverURL+path)
if e.success {
continue
}
switch state {
case TestStateCorrect:
e.cache = []SourceFixture{d.fixtures[state][source], d.fixtures[state][source+".minisig"]}
e.Source.bin, e.success = e.cache[0].content, true
fallthrough
case TestStateMissingSig, TestStatePartial, TestStatePartialSig, TestStateReadSigErr:
d.reqExpect[path+".minisig"]++
fallthrough
case TestStateMissing, TestStateReadErr:
d.reqExpect[path]++
}
}
if e.success {
e.err = ""
e.delay = DefaultPrefetchDelay
} else {
e.delay = MinimumPrefetchInterval
}
if len(e.Source.urls) > 0 {
e.Source.refresh = d.timeNow.Add(e.delay)
} else {
e.success = false
}
}
func setupSourceTestCase(t *testing.T, d *SourceTestData, i int,
cacheTest *SourceTestState, downloadTest []SourceTestState,
) (id string, e *SourceTestExpect) {
id = strconv.Itoa(d.n) + "-" + strconv.Itoa(i)
e = &SourceTestExpect{
cachePath: filepath.Join(d.tempDir, id),
mtime: d.timeNow,
}
e.Source = &Source{
name: id, urls: []*url.URL{}, format: SourceFormatV2, minisignKey: d.key,
cacheFile: e.cachePath, cacheTTL: DefaultPrefetchDelay * 3, prefetchDelay: DefaultPrefetchDelay,
}
if cacheTest != nil {
prepSourceTestCache(t, d, e, d.sources[i], *cacheTest)
i = (i + 1) % len(d.sources) // make the cached and downloaded fixtures different
}
prepSourceTestDownload(t, d, e, d.sources[i], downloadTest)
return
}
func TestNewSource(t *testing.T) {
if testing.Verbose() {
dlog.SetLogLevel(dlog.SeverityDebug)
dlog.UseSyslog(false)
}
teardown, d := setupSourceTest(t)
defer teardown()
checkResult := func(t *testing.T, e *SourceTestExpect, got *Source, err error) {
c := check.T(t)
if len(e.err) > 0 {
c.Match(err, e.err, "Unexpected error")
} else {
c.Nil(err, "Unexpected error")
}
c.DeepEqual(got, e.Source, "Unexpected return")
checkTestServer(c, d)
checkSourceCache(c, e)
}
d.n++
for _, tt := range []struct {
v, key string
refreshDelay time.Duration
e *SourceTestExpect
}{
{"", "", 0, &SourceTestExpect{err: " ", Source: &Source{name: "short refresh delay", urls: []*url.URL{}, cacheTTL: DefaultPrefetchDelay, prefetchDelay: DefaultPrefetchDelay, prefix: ""}}},
{"v1", d.keyStr, DefaultPrefetchDelay * 2, &SourceTestExpect{err: "Unsupported source format", Source: &Source{name: "old format", urls: []*url.URL{}, cacheTTL: DefaultPrefetchDelay * 2, prefetchDelay: DefaultPrefetchDelay}}},
{"v2", "", DefaultPrefetchDelay * 3, &SourceTestExpect{err: "Invalid encoded public key", Source: &Source{name: "invalid public key", urls: []*url.URL{}, cacheTTL: DefaultPrefetchDelay * 3, prefetchDelay: DefaultPrefetchDelay}}},
} {
t.Run(tt.e.Source.name, func(t *testing.T) {
got, err := NewSource(
tt.e.Source.name,
d.xTransport,
tt.e.urls,
tt.key,
tt.e.cachePath,
tt.v,
tt.refreshDelay,
tt.e.prefix,
)
checkResult(t, tt.e, got, err)
})
}
for cacheTestName, cacheTest := range d.cacheTests {
for downloadTestName, downloadTest := range d.downloadTests {
d.n++
for i := range d.sources {
id, e := setupSourceTestCase(t, d, i, &cacheTest, downloadTest)
t.Run("cache "+cacheTestName+", download "+downloadTestName+"/"+id, func(t *testing.T) {
got, err := NewSource(
id,
d.xTransport,
e.urls,
d.keyStr,
e.cachePath,
"v2",
DefaultPrefetchDelay*3,
"",
)
checkResult(t, e, got, err)
})
}
}
}
}
func TestPrefetchSources(t *testing.T) {
if testing.Verbose() {
dlog.SetLogLevel(dlog.SeverityDebug)
dlog.UseSyslog(false)
}
teardown, d := setupSourceTest(t)
defer teardown()
checkResult := func(t *testing.T, expects []*SourceTestExpect, got time.Duration) {
c := check.T(t)
expectDelay := MinimumPrefetchInterval
for _, e := range expects {
if e.delay >= MinimumPrefetchInterval && (expectDelay == MinimumPrefetchInterval || expectDelay > e.delay) {
expectDelay = e.delay
}
}
c.InDelta(got, expectDelay, time.Second, "Unexpected return")
checkTestServer(c, d)
for _, e := range expects {
checkSourceCache(c, e)
}
}
timeNow = func() time.Time { return d.timeUpd } // since the fixtures are prepared using real now, make the tested code think it's the future
for downloadTestName, downloadTest := range d.downloadTests {
d.n++
sources := []*Source{}
expects := []*SourceTestExpect{}
for i := range d.sources {
_, e := setupSourceTestCase(t, d, i, nil, downloadTest)
e.mtime = d.timeUpd
s := &Source{}
*s = *e.Source
s.bin = nil
sources = append(sources, s)
expects = append(expects, e)
}
t.Run("download "+downloadTestName, func(t *testing.T) {
got := PrefetchSources(d.xTransport, sources)
checkResult(t, expects, got)
})
}
}
func TestMain(m *testing.M) { check.TestMain(m) }

View file

@ -1 +0,0 @@
checks = ["all", "-ST1005"]

View file

@ -1,5 +0,0 @@
package main
func (proxy *Proxy) addSystemDListeners() error {
return nil
}

View file

@ -1,7 +1,7 @@
//go:build !linux
// +build !linux
package main
func (proxy *Proxy) addSystemDListeners() error {
func (proxy *Proxy) SystemDListeners() error {
return nil
}

View file

@ -1,5 +1,3 @@
//go:build !android
package main
import (
@ -9,26 +7,25 @@ import (
"github.com/jedisct1/dlog"
)
func (proxy *Proxy) addSystemDListeners() error {
func (proxy *Proxy) SystemDListeners() error {
files := activation.Files(true)
if len(files) > 0 {
if len(proxy.userName) > 0 || proxy.child {
dlog.Fatal(
"Systemd activated sockets are incompatible with privilege dropping. Remove activated sockets and fill `listen_addresses` in the dnscrypt-proxy configuration file instead.",
)
if (len(proxy.userName) > 0 || proxy.child) {
dlog.Fatal("Systemd activated sockets are incompatible with privilege dropping. Remove activated sockets and fill `listen_addresses` in the dnscrypt-proxy configuration file instead.")
}
dlog.Warn("Systemd sockets are untested and unsupported - use at your own risk")
}
for i, file := range files {
defer file.Close()
if listener, err := net.FileListener(file); err == nil {
proxy.registerTCPListener(listener.(*net.TCPListener))
dlog.Noticef("Wiring systemd TCP socket #%d, %s, %s", i, file.Name(), listener.Addr())
go proxy.tcpListener(listener.(*net.TCPListener))
} else if pc, err := net.FilePacketConn(file); err == nil {
proxy.registerUDPListener(pc.(*net.UDPConn))
dlog.Noticef("Wiring systemd UDP socket #%d, %s, %s", i, file.Name(), pc.LocalAddr())
go proxy.udpListener(pc.(*net.UDPConn))
}
file.Close()
}
return nil
}

View file

@ -1,2 +0,0 @@
untrusted comment: minisign encrypted secret key
RWRTY0IyhB6jk0BvB9YnhvXfyLkbKj5CSoL4jZtHw7qidhhNW3sAAAACAAAAAAAAAEAAAAAAus/LOCZVIOcL/da0ldQytjgjKmd5D/C84VitiDP0Fe9zWfTxaGs6SXn4tk0ZYmh2CmHydeQstzpA8cTinbFZgb+gxMHu205cqwHr1wUMtBpKhgdwqPh1EWfokCzrGSCj2Vjxq/Fr0bQ=

View file

@ -1,2 +0,0 @@
untrusted comment: minisign public key 956181C0EA8BF961
RWRh+YvqwIFhlRUdNGI/u+EDEmFip5BjgHY/z1yQkmRUcLfeIDWBCxnP

View file

@ -1,2 +0,0 @@
# Minimal example of an empty source list

View file

@ -1,4 +0,0 @@
untrusted comment: signature from minisign secret key
RWRh+YvqwIFhlQu0PHH9BqqxLYYwmhA4TFMNmfj11kkEYZvu8atPqYVEEyEnLIZLUhx+MYHYoiYrRI88LSpoKxMVSr9jIcaGaAI=
trusted comment: timestamp:1571786369 file:empty.md
LTu6UMxo0VXIESDN3/vpM/A04L2RfJkorJNPhXMcYUFl4lRH2x7DSyqCK0k6L3fS7u5iz0+SPLxVx17oqw4aBg==

Some files were not shown because too many files have changed in this diff Show more