Remove letsdebug module

This module has a dependency of a modules that requires cgo, that's a no-go.
This commit is contained in:
Ingo Oppermann
2022-12-31 17:46:46 +01:00
parent 65a617c2af
commit 0cd8be130c
73 changed files with 12 additions and 21469 deletions

View File

@@ -39,7 +39,6 @@ import (
"github.com/datarhei/core/v16/update"
"github.com/caddyserver/certmagic"
"github.com/letsdebug/letsdebug"
"go.uber.org/zap"
)
@@ -719,19 +718,19 @@ func (a *api) start() error {
if err != nil {
logger.Error().WithField("error", err).Log("Failed to acquire certificate")
certerror = true
/*
problems, err := letsdebug.Check(host, letsdebug.HTTP01)
if err != nil {
logger.Error().WithField("error", err).Log("Failed to debug certificate acquisition")
}
problems, err := letsdebug.Check(host, letsdebug.HTTP01)
if err != nil {
logger.Error().WithField("error", err).Log("Failed to debug certificate acquisition")
}
for _, p := range problems {
logger.Error().WithFields(log.Fields{
"name": p.Name,
"detail": p.Detail,
}).Log(p.Explanation)
}
for _, p := range problems {
logger.Error().WithFields(log.Fields{
"name": p.Name,
"detail": p.Detail,
}).Log(p.Explanation)
}
*/
break
}

5
go.mod
View File

@@ -16,7 +16,6 @@ require (
github.com/invopop/jsonschema v0.4.0
github.com/joho/godotenv v1.4.0
github.com/labstack/echo/v4 v4.9.1
github.com/letsdebug/letsdebug v1.6.1
github.com/lithammer/shortuuid/v4 v4.0.0
github.com/mattn/go-isatty v0.0.16
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
@@ -39,7 +38,6 @@ require (
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/eggsampler/acme/v3 v3.1.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
@@ -56,7 +54,6 @@ require (
github.com/klauspost/cpuid/v2 v2.1.2 // indirect
github.com/labstack/gommon v0.4.0 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/lib/pq v1.8.0 // indirect
github.com/libdns/libdns v0.2.1 // indirect
github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect
github.com/mailru/easyjson v0.7.7 // indirect
@@ -64,7 +61,6 @@ require (
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mholt/acmez v1.0.4 // indirect
github.com/miekg/dns v1.1.50 // indirect
github.com/miekg/unbound v0.0.0-20180419064740-e2b53b2dbcba // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
@@ -78,7 +74,6 @@ require (
github.com/urfave/cli/v2 v2.8.1 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.2 // indirect
github.com/weppos/publicsuffix-go v0.13.0 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect

32
go.sum
View File

@@ -40,7 +40,6 @@ github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/agiledragon/gomonkey/v2 v2.3.1/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
@@ -90,19 +89,11 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/eggsampler/acme/v3 v3.1.1 h1:hSze1Cw4bHtCUdiQE2R0GKfXjAuLirSFPUX1IBz9wKw=
github.com/eggsampler/acme/v3 v3.1.1/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo=
github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -137,7 +128,6 @@ github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/j
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
@@ -146,7 +136,6 @@ github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keL
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-migrate/migrate v3.5.4+incompatible/go.mod h1:IsVUlFN5puWOmXrqjgGUfIRIbU7mr8oNBE2tyERd9Wk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -217,7 +206,6 @@ github.com/iancoleman/orderedmap v0.2.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/invopop/jsonschema v0.4.0 h1:Yuy/unfgCnfV5Wl7H0HgFufp/rlurqPOOuacqyByrws=
github.com/invopop/jsonschema v0.4.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg=
github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@@ -230,7 +218,6 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM=
@@ -256,11 +243,6 @@ github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/letsdebug/letsdebug v1.6.1 h1:ef4qwhKAXbyoLB2jGWsIWeI245UjyDYvOgenwr/pblA=
github.com/letsdebug/letsdebug v1.6.1/go.mod h1:Bl1mFMHJqyTb3kzsznBpfTpcQLKaChV7xCsWEIdA2Ew=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg=
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/libdns/libdns v0.2.1 h1:Wu59T7wSHRgtA0cfxC+n1c/e+O3upJGWytknkmFEDis=
github.com/libdns/libdns v0.2.1/go.mod h1:yQCXzk1lEZmmCPa857bnk4TsOiqYasqpyOEeSObbb40=
github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c=
@@ -281,17 +263,13 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mholt/acmez v1.0.4 h1:N3cE4Pek+dSolbsofIkAYz6H1d3pE+2G0os7QHslf80=
github.com/mholt/acmez v1.0.4/go.mod h1:qFGLZ4u+ehWINeJZjzPlsnjJBCPAADWTcIqE/7DAYQY=
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/miekg/unbound v0.0.0-20180419064740-e2b53b2dbcba h1:RHTbLjrNIt6k3R4Aq2Q9KNBwFw8rZcZuoJVASoeB6Es=
github.com/miekg/unbound v0.0.0-20180419064740-e2b53b2dbcba/go.mod h1:lGLaihw972wB1AFBO88/Q69nOTzLqG/qR/uSp2YBLgM=
github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@@ -303,7 +281,6 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
@@ -361,7 +338,6 @@ github.com/shirou/gopsutil/v3 v3.22.10 h1:4KMHdfBRYXGF9skjDWiL4RA2N+E8dRdodU/bOZ
github.com/shirou/gopsutil/v3 v3.22.10/go.mod h1:QNza6r4YQoydyCfo6rH0blGfKahgibh4dQmV5xdFkQk=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
@@ -402,8 +378,6 @@ github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQ
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/vektah/gqlparser/v2 v2.5.1 h1:ZGu+bquAY23jsxDRcYpWjttRZrUz07LbiY77gUOHcr4=
github.com/vektah/gqlparser/v2 v2.5.1/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRFZrvOnp3hmCs=
github.com/weppos/publicsuffix-go v0.13.0 h1:0Tu1uzLBd1jPn4k6OnMmOPZH/l/9bj9kUOMMkoRs6Gg=
github.com/weppos/publicsuffix-go v0.13.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -499,7 +473,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -514,7 +487,6 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
@@ -562,7 +534,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -582,7 +553,6 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -648,7 +618,6 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -763,7 +732,6 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=

View File

@@ -1,3 +0,0 @@
.idea/
*.out
coverage*

View File

@@ -1,26 +0,0 @@
language: go
go:
- "1.11"
- "1.x"
env:
- GO111MODULE=on
sudo: required
services:
- docker
before_install:
- GO111MODULE=off go get github.com/mattn/goveralls
script:
- unset TRAVIS_GO_VERSION
# test the examples first
- make clean examples
# test pebble integration
- make clean pebble
# test boulder integration
- make clean boulder
- goveralls -coverprofile=coverage.out -service=travis-ci

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2018 Isaac
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,66 +0,0 @@
.PHONY: test examples clean test_full pebble pebble_setup pebble_start pebble_wait pebble_stop boulder boulder_setup boulder_start boulder_stop
GOPATH ?= $(HOME)/go
BOULDER_PATH ?= $(GOPATH)/src/github.com/letsencrypt/boulder
PEBBLE_PATH ?= $(GOPATH)/src/github.com/letsencrypt/pebble
TEST_PATH ?= github.com/eggsampler/acme/v3
# tests the code against a running ca instance
test:
-go clean -testcache
go test -v -race -coverprofile=coverage.out -covermode=atomic $(TEST_PATH)
examples:
go build -o /dev/null examples/certbot/certbot.go
go build -o /dev/null examples/autocert/autocert.go
clean:
rm -f coverage.out
test_full: clean examples pebble pebble_stop boulder boulder_stop
pebble: pebble_setup pebble_start pebble_wait test pebble_stop
pebble_setup:
mkdir -p $(PEBBLE_PATH)
git clone --depth 1 https://github.com/letsencrypt/pebble.git $(PEBBLE_PATH) \
|| (cd $(PEBBLE_PATH); git checkout -f master && git reset --hard HEAD && git pull -q)
docker-compose -f $(PEBBLE_PATH)/docker-compose.yml down
# runs an instance of pebble using docker
pebble_start:
docker-compose -f $(PEBBLE_PATH)/docker-compose.yml up -d
# waits until pebble responds
pebble_wait:
while ! wget --delete-after -q --no-check-certificate "https://localhost:14000/dir" ; do sleep 1 ; done
# stops the running pebble instance
pebble_stop:
docker-compose -f $(PEBBLE_PATH)/docker-compose.yml down
boulder: boulder_setup boulder_start boulder_wait test boulder_stop
# NB: this edits docker-compose.yml
boulder_setup:
mkdir -p $(BOULDER_PATH)
git clone --depth 1 https://github.com/letsencrypt/boulder.git $(BOULDER_PATH) \
|| (cd $(BOULDER_PATH); git checkout -f master && git reset --hard HEAD && git pull -q)
docker-compose -f $(BOULDER_PATH)/docker-compose.yml down
# runs an instance of boulder
boulder_start:
docker-compose -f $(BOULDER_PATH)/docker-compose.yml up -d
# waits until boulder responds
boulder_wait:
while ! wget --delete-after -q --no-check-certificate "http://localhost:4001/directory" ; do sleep 1 ; done
# stops the running docker instance
boulder_stop:
docker-compose -f $(BOULDER_PATH)/docker-compose.yml down

View File

@@ -1,43 +0,0 @@
# eggsampler/acme
[![GoDoc](https://godoc.org/github.com/eggsampler/acme?status.svg)](https://godoc.org/github.com/eggsampler/acme)
[![Build Status](https://travis-ci.com/eggsampler/acme.svg?branch=master)](https://travis-ci.com/eggsampler/acme)
[![Coverage Status](https://coveralls.io/repos/github/eggsampler/acme/badge.svg?branch=master)](https://coveralls.io/github/eggsampler/acme?branch=master)
## About
`eggsampler/acme` is a Go client library implementation for [RFC8555](https://tools.ietf.org/html/rfc8555) (previously ACME v2), specifically for use with the [Let's Encrypt](https://letsencrypt.org/)™ service.
The library is designed to provide a zero external dependency wrapper over exposed directory endpoints and provide objects in easy to use structures.
## Requirements
A Go version of at least 1.11 is required as this repository is designed to be imported as a Go module.
## Usage
Simply import the module into a project,
```go
import "github.com/eggsampler/acme/v3"
```
Note the `/v3` major version at the end. Due to the way modules function, this is the major version as represented in the `go.mod` file and latest git repo [semver](https://semver.org/) tag.
All functions are still exported and called using the `acme` package name.
## Examples
A simple [certbot](https://certbot.eff.org/)-like example is provided in the examples/certbot directory.
This code demonstrates account registration, new order submission, fulfilling challenges, finalising an order and fetching the issued certificate chain.
An example of how to use the autocert package is also provided in examples/autocert.
## Tests
The tests can be run against an instance of [boulder](https://github.com/letsencrypt/boulder) or [pebble](https://github.com/letsencrypt/pebble).
Challenge fulfilment is designed to use the new `challtestsrv` server present inside boulder and pebble which responds to dns queries and challenges as required.
To run tests against an already running instance of boulder or pebble, use the `test` target in the Makefile.
Some convenience targets for launching pebble/boulder using their respective docker compose files have also been included in the Makefile.

View File

@@ -1,35 +0,0 @@
This document contains Third Party Software Notices and/or Additional Terms and Conditions for licensed third party software components included within this product.
==
https://github.com/golang/crypto/blob/master/acme/jws.go
https://github.com/golang/crypto/blob/master/acme/jws_test.go
(with modifications)
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,128 +0,0 @@
package acme
import (
"crypto"
"encoding/json"
"errors"
"fmt"
"net/http"
"reflect"
)
// NewAccount registers a new account with the acme service
func (c Client) NewAccount(privateKey crypto.Signer, onlyReturnExisting, termsOfServiceAgreed bool, contact ...string) (Account, error) {
newAccountReq := struct {
OnlyReturnExisting bool `json:"onlyReturnExisting"`
TermsOfServiceAgreed bool `json:"termsOfServiceAgreed"`
Contact []string `json:"contact,omitempty"`
}{
OnlyReturnExisting: onlyReturnExisting,
TermsOfServiceAgreed: termsOfServiceAgreed,
Contact: contact,
}
account := Account{}
resp, err := c.post(c.dir.NewAccount, "", privateKey, newAccountReq, &account, http.StatusOK, http.StatusCreated)
if err != nil {
return account, err
}
account.URL = resp.Header.Get("Location")
account.PrivateKey = privateKey
if account.Thumbprint == "" {
account.Thumbprint, err = JWKThumbprint(account.PrivateKey.Public())
if err != nil {
return account, fmt.Errorf("acme: error computing account thumbprint: %v", err)
}
}
return account, nil
}
// UpdateAccount updates an existing account with the acme service.
func (c Client) UpdateAccount(account Account, contact ...string) (Account, error) {
var updateAccountReq interface{}
if !reflect.DeepEqual(account.Contact, contact) {
// Only provide a non-nil updateAccountReq when there is an update to be made.
updateAccountReq = struct {
Contact []string `json:"contact,omitempty"`
}{
Contact: contact,
}
} else {
// Otherwise use "" to trigger a POST-as-GET to fetch up-to-date account
// information from the acme service.
updateAccountReq = ""
}
_, err := c.post(account.URL, account.URL, account.PrivateKey, updateAccountReq, &account, http.StatusOK)
if err != nil {
return account, err
}
if account.Thumbprint == "" {
account.Thumbprint, err = JWKThumbprint(account.PrivateKey.Public())
if err != nil {
return account, fmt.Errorf("acme: error computing account thumbprint: %v", err)
}
}
return account, nil
}
// AccountKeyChange rolls over an account to a new key.
func (c Client) AccountKeyChange(account Account, newPrivateKey crypto.Signer) (Account, error) {
oldJwkKeyPub, err := jwkEncode(account.PrivateKey.Public())
if err != nil {
return account, fmt.Errorf("acme: error encoding new private key: %v", err)
}
keyChangeReq := struct {
Account string `json:"account"`
OldKey json.RawMessage `json:"oldKey"`
}{
Account: account.URL,
OldKey: []byte(oldJwkKeyPub),
}
innerJws, err := jwsEncodeJSON(keyChangeReq, newPrivateKey, "", "", c.dir.KeyChange)
if err != nil {
return account, fmt.Errorf("acme: error encoding inner jws: %v", err)
}
if _, err := c.post(c.dir.KeyChange, account.URL, account.PrivateKey, json.RawMessage(innerJws), nil, http.StatusOK); err != nil {
return account, err
}
account.PrivateKey = newPrivateKey
return account, nil
}
// DeactivateAccount deactivates a given account.
func (c Client) DeactivateAccount(account Account) (Account, error) {
deactivateReq := struct {
Status string `json:"status"`
}{
Status: "deactivated",
}
_, err := c.post(account.URL, account.URL, account.PrivateKey, deactivateReq, &account, http.StatusOK)
return account, err
}
// FetchOrderList fetches a list of orders from the account url provided in the account Orders field
func (c Client) FetchOrderList(account Account) (OrderList, error) {
orderList := OrderList{}
if account.Orders == "" {
return orderList, errors.New("no order list for account")
}
_, err := c.post(account.Orders, account.URL, account.PrivateKey, "", &orderList, http.StatusOK)
return orderList, err
}

View File

@@ -1,294 +0,0 @@
package acme
import (
"bytes"
"crypto"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"regexp"
"strings"
"time"
)
const (
// LetsEncryptProduction holds the production directory url
LetsEncryptProduction = "https://acme-v02.api.letsencrypt.org/directory"
// LetsEncryptStaging holds the staging directory url
LetsEncryptStaging = "https://acme-staging-v02.api.letsencrypt.org/directory"
userAgentString = "eggsampler-acme/1.0 Go-http-client/1.1"
)
// NewClient creates a new acme client given a valid directory url.
func NewClient(directoryURL string, options ...OptionFunc) (Client, error) {
// Set a default http timeout of 60 seconds, this can be overridden
// via an OptionFunc eg: acme.NewClient(url, WithHTTPTimeout(10 * time.Second))
httpClient := &http.Client{
Timeout: 60 * time.Second,
}
acmeClient := Client{
httpClient: httpClient,
nonces: &nonceStack{},
retryCount: 5,
}
acmeClient.dir.URL = directoryURL
for _, opt := range options {
if err := opt(&acmeClient); err != nil {
return acmeClient, fmt.Errorf("acme: error setting option: %v", err)
}
}
if _, err := acmeClient.get(directoryURL, &acmeClient.dir, http.StatusOK); err != nil {
return acmeClient, err
}
return acmeClient, nil
}
// The directory object returned by the client connecting to a directory url.
func (c Client) Directory() Directory {
return c.dir
}
// Helper function to get the poll interval and poll timeout, defaulting if 0
func (c Client) getPollingDurations() (time.Duration, time.Duration) {
pollInterval := c.PollInterval
if pollInterval == 0 {
pollInterval = 500 * time.Millisecond
}
pollTimeout := c.PollTimeout
if pollTimeout == 0 {
pollTimeout = 30 * time.Second
}
return pollInterval, pollTimeout
}
// Helper function to have a central point for performing http requests.
// Stores any returned nonces in the stack.
func (c Client) do(req *http.Request, addNonce bool) (*http.Response, error) {
// identifier for this client, as well as the default go user agent
if c.userAgentSuffix != "" {
req.Header.Set("User-Agent", userAgentString+" "+c.userAgentSuffix)
} else {
req.Header.Set("User-Agent", userAgentString)
}
if c.acceptLanguage != "" {
req.Header.Set("Accept-Language", c.acceptLanguage)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return resp, err
}
if addNonce {
c.nonces.push(resp.Header.Get("Replay-Nonce"))
}
return resp, nil
}
// Helper function to perform an http get request and read the body.
func (c Client) getRaw(url string, expectedStatus ...int) (*http.Response, []byte, error) {
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, nil, fmt.Errorf("acme: error creating request: %v", err)
}
resp, err := c.do(req, true)
if err != nil {
return resp, nil, fmt.Errorf("acme: error fetching response: %v", err)
}
defer resp.Body.Close()
if err := checkError(resp, expectedStatus...); err != nil {
return resp, nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return resp, body, fmt.Errorf("acme: error reading response body: %v", err)
}
return resp, body, nil
}
// Helper function for performing a http get on an acme resource.
func (c Client) get(url string, out interface{}, expectedStatus ...int) (*http.Response, error) {
resp, body, err := c.getRaw(url, expectedStatus...)
if err != nil {
return resp, err
}
if len(body) > 0 && out != nil {
if err := json.Unmarshal(body, out); err != nil {
return resp, fmt.Errorf("acme: error parsing response body: %v", err)
}
}
return resp, nil
}
func (c Client) nonce() (string, error) {
nonce := c.nonces.pop()
if nonce != "" {
return nonce, nil
}
if c.dir.NewNonce == "" {
return "", errors.New("acme: no new nonce url")
}
req, err := http.NewRequest("HEAD", c.dir.NewNonce, nil)
if err != nil {
return "", fmt.Errorf("acme: error creating new nonce request: %v", err)
}
resp, err := c.do(req, false)
if err != nil {
return "", fmt.Errorf("acme: error fetching new nonce: %v", err)
}
nonce = resp.Header.Get("Replay-Nonce")
return nonce, nil
}
// Helper function to perform an http post request and read the body.
// Will attempt to retry if error is badNonce
func (c Client) postRaw(retryCount int, requestURL, kid string, privateKey crypto.Signer, payload interface{}, expectedStatus []int) (*http.Response, []byte, error) {
nonce, err := c.nonce()
if err != nil {
return nil, nil, err
}
data, err := jwsEncodeJSON(payload, privateKey, keyID(kid), nonce, requestURL)
if err != nil {
return nil, nil, fmt.Errorf("acme: error encoding json payload: %v", err)
}
req, err := http.NewRequest(http.MethodPost, requestURL, bytes.NewReader(data))
if err != nil {
return nil, nil, fmt.Errorf("acme: error creating request: %v", err)
}
req.Header.Set("Content-Type", "application/jose+json")
resp, err := c.do(req, true)
if err != nil {
return resp, nil, fmt.Errorf("acme: error sending request: %v", err)
}
defer resp.Body.Close()
if err := checkError(resp, expectedStatus...); err != nil {
prob, ok := err.(Problem)
if !ok {
// don't retry for an error we don't know about
return resp, nil, err
}
if retryCount >= c.retryCount {
// don't attempt to retry if too many retries
return resp, nil, err
}
if strings.HasSuffix(prob.Type, ":badNonce") {
// only retry if error is badNonce
return c.postRaw(retryCount+1, requestURL, kid, privateKey, payload, expectedStatus)
}
return resp, nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return resp, body, fmt.Errorf("acme: error reading response body: %v", err)
}
return resp, body, nil
}
// Helper function for performing a http post to an acme resource.
func (c Client) post(requestURL, keyID string, privateKey crypto.Signer, payload interface{}, out interface{}, expectedStatus ...int) (*http.Response, error) {
resp, body, err := c.postRaw(0, requestURL, keyID, privateKey, payload, expectedStatus)
if err != nil {
return resp, err
}
if _, b := os.LookupEnv("ACME_DEBUG_POST"); b {
fmt.Println()
fmt.Println(string(body))
fmt.Println()
}
if len(body) > 0 && out != nil {
if err := json.Unmarshal(body, out); err != nil {
return resp, fmt.Errorf("acme: error parsing response: %v - %s", err, string(body))
}
}
return resp, nil
}
var regLink = regexp.MustCompile(`<(.+?)>;\s*rel="(.+?)"`)
// Fetches a http Link header from a http response
func fetchLink(resp *http.Response, wantedLink string) string {
if resp == nil {
return ""
}
linkHeader := resp.Header["Link"]
if len(linkHeader) == 0 {
return ""
}
for _, l := range linkHeader {
matches := regLink.FindAllStringSubmatch(l, -1)
for _, m := range matches {
if len(m) != 3 {
continue
}
if m[2] == wantedLink {
return m[1]
}
}
}
return ""
}
// FetchRaw is a helper function to assist with POST-AS-GET requests
func (c Client) Fetch(account Account, requestURL string, result interface{}, expectedStatus ...int) error {
if len(expectedStatus) == 0 {
expectedStatus = []int{http.StatusOK}
}
_, err := c.post(requestURL, account.URL, account.PrivateKey, "", result, expectedStatus...)
return err
}
// Fetches all http Link header from a http response
func fetchLinks(resp *http.Response, wantedLink string) []string {
if resp == nil {
return nil
}
linkHeader := resp.Header["Link"]
if len(linkHeader) == 0 {
return nil
}
var links []string
for _, l := range linkHeader {
matches := regLink.FindAllStringSubmatch(l, -1)
for _, m := range matches {
if len(m) != 3 {
continue
}
if m[2] == wantedLink {
links = append(links, m[1])
}
}
}
return links
}

View File

@@ -1,43 +0,0 @@
package acme
import "net/http"
// FetchAuthorization fetches an authorization from an authorization url provided in an order.
func (c Client) FetchAuthorization(account Account, authURL string) (Authorization, error) {
authResp := Authorization{}
_, err := c.post(authURL, account.URL, account.PrivateKey, "", &authResp, http.StatusOK)
if err != nil {
return authResp, err
}
for i := 0; i < len(authResp.Challenges); i++ {
if authResp.Challenges[i].KeyAuthorization == "" {
authResp.Challenges[i].KeyAuthorization = authResp.Challenges[i].Token + "." + account.Thumbprint
}
}
authResp.ChallengeMap = map[string]Challenge{}
authResp.ChallengeTypes = []string{}
for _, c := range authResp.Challenges {
authResp.ChallengeMap[c.Type] = c
authResp.ChallengeTypes = append(authResp.ChallengeTypes, c.Type)
}
authResp.URL = authURL
return authResp, nil
}
// DeactivateAuthorization deactivate a provided authorization url from an order.
func (c Client) DeactivateAuthorization(account Account, authURL string) (Authorization, error) {
deactivateReq := struct {
Status string `json:"status"`
}{
Status: "deactivated",
}
deactivateResp := Authorization{}
_, err := c.post(authURL, account.URL, account.PrivateKey, deactivateReq, &deactivateResp, http.StatusOK)
return deactivateResp, err
}

View File

@@ -1,430 +0,0 @@
package acme
// Similar to golang.org/x/crypto/acme/autocert
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"net/http"
"path"
"strings"
"sync"
)
// HostCheck function prototype to implement for checking hosts against before issuing certificates
type HostCheck func(host string) error
// WhitelistHosts implements a simple whitelist HostCheck
func WhitelistHosts(hosts ...string) HostCheck {
m := map[string]bool{}
for _, v := range hosts {
m[v] = true
}
return func(host string) error {
if !m[host] {
return errors.New("autocert: host not whitelisted")
}
return nil
}
}
// AutoCert is a stateful certificate manager for issuing certificates on connecting hosts
type AutoCert struct {
// Acme directory Url
// If nil, uses `LetsEncryptStaging`
DirectoryURL string
// Options contains the options used for creating the acme client
Options []OptionFunc
// A function to check whether a host is allowed or not
// If nil, all hosts allowed
// Use `WhitelistHosts(hosts ...string)` for a simple white list of hostnames
HostCheck HostCheck
// Cache dir to store account data and certificates
// If nil, does not write cache data to file
CacheDir string
// When using a staging environment, include a root certificate for verification purposes
RootCert string
// Called before updating challenges
PreUpdateChallengeHook func(Account, Challenge)
// Mapping of token -> keyauth
// Protected by a mutex, but not rwmutex because tokens are deleted once read
tokensLock sync.RWMutex
tokens map[string][]byte
// Mapping of cache key -> value
cacheLock sync.Mutex
cache map[string][]byte
// read lock around getting existing certs
// write lock around issuing new certificate
certLock sync.RWMutex
client Client
}
// HTTPHandler Wraps a handler and provides serving of http-01 challenge tokens from /.well-known/acme-challenge/
// If handler is nil, will redirect all traffic otherwise to https
func (m *AutoCert) HTTPHandler(handler http.Handler) http.Handler {
if handler == nil {
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "https://"+r.Host+r.URL.RequestURI(), http.StatusMovedPermanently)
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") {
handler.ServeHTTP(w, r)
return
}
if err := m.checkHost(r.Host); err != nil {
http.Error(w, err.Error(), http.StatusForbidden)
return
}
token := path.Base(r.URL.Path)
m.tokensLock.RLock()
defer m.tokensLock.RUnlock()
keyAuth := m.tokens[token]
if len(keyAuth) == 0 {
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
_, _ = w.Write(keyAuth)
})
}
// GetCertificate implements a tls.Config.GetCertificate hook
func (m *AutoCert) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
name := strings.TrimSuffix(hello.ServerName, ".")
if name == "" {
return nil, errors.New("autocert: missing server name")
}
if !strings.Contains(strings.Trim(name, "."), ".") {
return nil, errors.New("autocert: server name component count invalid")
}
if strings.ContainsAny(name, `/\`) {
return nil, errors.New("autocert: server name contains invalid character")
}
// check the hostname is allowed
if err := m.checkHost(name); err != nil {
return nil, err
}
// check if there's an existing cert
m.certLock.RLock()
existingCert := m.getExistingCert(name)
m.certLock.RUnlock()
if existingCert != nil {
return existingCert, nil
}
// if not, attempt to issue a new cert
m.certLock.Lock()
defer m.certLock.Unlock()
return m.issueCert(name)
}
func (m *AutoCert) getDirectoryURL() string {
if m.DirectoryURL != "" {
return m.DirectoryURL
}
return LetsEncryptStaging
}
func (m *AutoCert) getCache(keys ...string) []byte {
key := strings.Join(keys, "-")
m.cacheLock.Lock()
defer m.cacheLock.Unlock()
b := m.cache[key]
if len(b) > 0 {
return b
}
if m.CacheDir == "" {
return nil
}
b, _ = ioutil.ReadFile(path.Join(m.CacheDir, key))
if len(b) == 0 {
return nil
}
if m.cache == nil {
m.cache = map[string][]byte{}
}
m.cache[key] = b
return b
}
func (m *AutoCert) putCache(data []byte, keys ...string) context.Context {
ctx, cancel := context.WithCancel(context.Background())
key := strings.Join(keys, "-")
m.cacheLock.Lock()
defer m.cacheLock.Unlock()
if m.cache == nil {
m.cache = map[string][]byte{}
}
m.cache[key] = data
if m.CacheDir == "" {
cancel()
return ctx
}
go func() {
_ = ioutil.WriteFile(path.Join(m.CacheDir, key), data, 0700)
cancel()
}()
return ctx
}
func (m *AutoCert) checkHost(name string) error {
if m.HostCheck == nil {
return nil
}
return m.HostCheck(name)
}
func (m *AutoCert) getExistingCert(name string) *tls.Certificate {
// check for a stored cert
certData := m.getCache("cert", name)
if len(certData) == 0 {
// no cert
return nil
}
privBlock, pubData := pem.Decode(certData)
if len(pubData) == 0 {
// no public key data (cert/issuer), ignore
return nil
}
// decode pub chain
var pubDER [][]byte
var pub []byte
for len(pubData) > 0 {
var b *pem.Block
b, pubData = pem.Decode(pubData)
if b == nil {
break
}
pubDER = append(pubDER, b.Bytes)
pub = append(pub, b.Bytes...)
}
if len(pubData) > 0 {
// leftover data in file - possibly corrupt, ignore
return nil
}
certs, err := x509.ParseCertificates(pub)
if err != nil {
// bad certificates, ignore
return nil
}
leaf := certs[0]
// add any intermediate certs if present
var intermediates *x509.CertPool
if len(certs) > 1 {
intermediates = x509.NewCertPool()
for i := 1; i < len(certs); i++ {
intermediates.AddCert(certs[i])
}
}
// add a root certificate if present
var roots *x509.CertPool
if m.RootCert != "" {
roots = x509.NewCertPool()
rootBlock, _ := pem.Decode([]byte(m.RootCert))
rootCert, err := x509.ParseCertificate(rootBlock.Bytes)
if err != nil {
return nil
}
roots.AddCert(rootCert)
}
if _, err := leaf.Verify(x509.VerifyOptions{DNSName: name, Intermediates: intermediates, Roots: roots}); err != nil {
// invalid certificates , ignore
return nil
}
privKey, err := x509.ParseECPrivateKey(privBlock.Bytes)
if err != nil {
// invalid private key, ignore
return nil
}
return &tls.Certificate{
Certificate: pubDER,
PrivateKey: privKey,
Leaf: leaf,
}
}
func (m *AutoCert) issueCert(domainName string) (*tls.Certificate, error) {
// attempt to load an existing account key
var privKey *ecdsa.PrivateKey
if keyData := m.getCache("account"); len(keyData) > 0 {
block, _ := pem.Decode(keyData)
x509Encoded := block.Bytes
privKey, _ = x509.ParseECPrivateKey(x509Encoded)
}
// otherwise generate a new one
if privKey == nil {
var err error
privKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, fmt.Errorf("autocert: error generating new account key: %v", err)
}
x509Encoded, _ := x509.MarshalECPrivateKey(privKey)
pemEncoded := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: x509Encoded})
m.putCache(pemEncoded, "account")
}
// create a new client if one doesn't exist
if m.client.Directory().URL == "" {
var err error
m.client, err = NewClient(m.getDirectoryURL(), m.Options...)
if err != nil {
return nil, err
}
}
// create/fetch acme account
account, err := m.client.NewAccount(privKey, false, true)
if err != nil {
return nil, fmt.Errorf("autocert: error creating/fetching account: %v", err)
}
// start a new order process
order, err := m.client.NewOrderDomains(account, domainName)
if err != nil {
return nil, fmt.Errorf("autocert: error creating new order for domain %s: %v", domainName, err)
}
// loop through each of the provided authorization Urls
for _, authURL := range order.Authorizations {
auth, err := m.client.FetchAuthorization(account, authURL)
if err != nil {
return nil, fmt.Errorf("autocert: error fetching authorization Url %q: %v", authURL, err)
}
if auth.Status == "valid" {
continue
}
chal, ok := auth.ChallengeMap[ChallengeTypeHTTP01]
if !ok {
return nil, fmt.Errorf("autocert: unable to find http-01 challenge for auth %s, Url: %s", auth.Identifier.Value, authURL)
}
m.tokensLock.Lock()
if m.tokens == nil {
m.tokens = map[string][]byte{}
}
m.tokens[chal.Token] = []byte(chal.KeyAuthorization)
m.tokensLock.Unlock()
if m.PreUpdateChallengeHook != nil {
m.PreUpdateChallengeHook(account, chal)
}
chal, err = m.client.UpdateChallenge(account, chal)
if err != nil {
return nil, fmt.Errorf("autocert: error updating authorization %s challenge (Url: %s) : %v", auth.Identifier.Value, authURL, err)
}
m.tokensLock.Lock()
delete(m.tokens, chal.Token)
m.tokensLock.Unlock()
}
// generate private key for cert
certKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, fmt.Errorf("autocert: error generating certificate key for %s: %v", domainName, err)
}
certKeyEnc, err := x509.MarshalECPrivateKey(certKey)
if err != nil {
return nil, fmt.Errorf("autocert: error encoding certificate key for %s: %v", domainName, err)
}
certKeyPem := pem.EncodeToMemory(&pem.Block{
Type: "EC PRIVATE KEY",
Bytes: certKeyEnc,
})
// create the new csr template
tpl := &x509.CertificateRequest{
SignatureAlgorithm: x509.ECDSAWithSHA256,
PublicKeyAlgorithm: x509.ECDSA,
PublicKey: certKey.Public(),
Subject: pkix.Name{CommonName: domainName},
DNSNames: []string{domainName},
}
csrDer, err := x509.CreateCertificateRequest(rand.Reader, tpl, certKey)
if err != nil {
return nil, fmt.Errorf("autocert: error creating certificate request for %s: %v", domainName, err)
}
csr, err := x509.ParseCertificateRequest(csrDer)
if err != nil {
return nil, fmt.Errorf("autocert: error parsing certificate request for %s: %v", domainName, err)
}
// finalize the order with the acme server given a csr
order, err = m.client.FinalizeOrder(account, order, csr)
if err != nil {
return nil, fmt.Errorf("autocert: error finalizing order for %s: %v", domainName, err)
}
// fetch the certificate chain from the finalized order provided by the acme server
certs, err := m.client.FetchCertificates(account, order.Certificate)
if err != nil {
return nil, fmt.Errorf("autocert: error fetching order certificates for %s: %v", domainName, err)
}
certPem := certKeyPem
// var certDer [][]byte
for _, c := range certs {
b := pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE",
Bytes: c.Raw,
})
certPem = append(certPem, b...)
// certDer = append(certDer, c.Raw)
}
m.putCache(certPem, "cert", domainName)
return m.getExistingCert(domainName), nil
}

View File

@@ -1,106 +0,0 @@
package acme
import (
"crypto"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"fmt"
"net/http"
)
func (c Client) decodeCertificateChain(body []byte, resp *http.Response, account Account) ([]*x509.Certificate, error) {
var certs []*x509.Certificate
for {
var p *pem.Block
p, body = pem.Decode(body)
if p == nil {
break
}
cert, err := x509.ParseCertificate(p.Bytes)
if err != nil {
return certs, fmt.Errorf("acme: error parsing certificate: %v", err)
}
certs = append(certs, cert)
}
up := fetchLink(resp, "up")
if up != "" {
upCerts, err := c.FetchCertificates(account, up)
if err != nil {
return certs, fmt.Errorf("acme: error fetching up cert: %v", err)
}
if len(upCerts) != 0 {
certs = append(certs, upCerts...)
}
}
return certs, nil
}
// FetchCertificates downloads a certificate chain from a url given in an order certificate.
func (c Client) FetchCertificates(account Account, certificateURL string) ([]*x509.Certificate, error) {
resp, body, err := c.postRaw(0, certificateURL, account.URL, account.PrivateKey, "", []int{http.StatusOK})
if err != nil {
return nil, err
}
return c.decodeCertificateChain(body, resp, account)
}
// FetchAllCertificates downloads a certificate chain from a url given in an order certificate, as well as any alternate certificates if provided.
// Returns a mapping of certificate urls to the certificate chain.
func (c Client) FetchAllCertificates(account Account, certificateURL string) (map[string][]*x509.Certificate, error) {
resp, body, err := c.postRaw(0, certificateURL, account.URL, account.PrivateKey, "", []int{http.StatusOK})
if err != nil {
return nil, err
}
certChain, err := c.decodeCertificateChain(body, resp, account)
if err != nil {
return nil, err
}
certs := map[string][]*x509.Certificate{
certificateURL: certChain,
}
alternates := fetchLinks(resp, "alternate")
for _, altURL := range alternates {
altResp, altBody, err := c.postRaw(0, altURL, account.URL, account.PrivateKey, "", []int{http.StatusOK})
if err != nil {
return certs, fmt.Errorf("acme: error fetching alt cert chain at %q - %v", altURL, err)
}
altCertChain, err := c.decodeCertificateChain(altBody, altResp, account)
if err != nil {
return certs, fmt.Errorf("acme: error decoding alt cert chain at %q - %v", altURL, err)
}
certs[altURL] = altCertChain
}
return certs, nil
}
// RevokeCertificate revokes a given certificate given the certificate key or account key, and a reason.
func (c Client) RevokeCertificate(account Account, cert *x509.Certificate, key crypto.Signer, reason int) error {
revokeReq := struct {
Certificate string `json:"certificate"`
Reason int `json:"reason"`
}{
Certificate: base64.RawURLEncoding.EncodeToString(cert.Raw),
Reason: reason,
}
kid := ""
if key == account.PrivateKey {
kid = account.URL
}
if _, err := c.post(c.dir.RevokeCert, kid, key, revokeReq, nil, http.StatusOK); err != nil {
return err
}
return nil
}

View File

@@ -1,102 +0,0 @@
package acme
import (
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"net/http"
"time"
)
// EncodeDNS01KeyAuthorization encodes a key authorization and provides a value to be put in the TXT record for the _acme-challenge DNS entry.
func EncodeDNS01KeyAuthorization(keyAuth string) string {
h := sha256.Sum256([]byte(keyAuth))
return base64.RawURLEncoding.EncodeToString(h[:])
}
// Helper function to determine whether a challenge is "finished" by it's status.
func checkUpdatedChallengeStatus(challenge Challenge) (bool, error) {
switch challenge.Status {
case "pending":
// Challenge objects are created in the "pending" state.
// TODO: https://github.com/letsencrypt/boulder/issues/3346
// return true, errors.New("acme: unexpected 'pending' challenge state")
return false, nil
case "processing":
// They transition to the "processing" state when the client responds to the
// challenge and the server begins attempting to validate that the client has completed the challenge.
return false, nil
case "valid":
// If validation is successful, the challenge moves to the "valid" state
return true, nil
case "invalid":
// if there is an error, the challenge moves to the "invalid" state.
if challenge.Error.Type != "" {
return true, challenge.Error
}
return true, errors.New("acme: challenge is invalid, no error provided")
default:
return true, fmt.Errorf("acme: unknown challenge status: %s", challenge.Status)
}
}
// UpdateChallenge responds to a challenge to indicate to the server to complete the challenge.
func (c Client) UpdateChallenge(account Account, challenge Challenge) (Challenge, error) {
resp, err := c.post(challenge.URL, account.URL, account.PrivateKey, struct{}{}, &challenge, http.StatusOK)
if err != nil {
return challenge, err
}
if loc := resp.Header.Get("Location"); loc != "" {
challenge.URL = loc
}
challenge.AuthorizationURL = fetchLink(resp, "up")
if finished, err := checkUpdatedChallengeStatus(challenge); finished {
return challenge, err
}
pollInterval, pollTimeout := c.getPollingDurations()
end := time.Now().Add(pollTimeout)
for {
if time.Now().After(end) {
return challenge, errors.New("acme: challenge update timeout")
}
time.Sleep(pollInterval)
resp, err := c.post(challenge.URL, account.URL, account.PrivateKey, "", &challenge, http.StatusOK)
if err != nil {
// i don't think it's worth exiting the loop on this error
// it could just be connectivity issue that's resolved before the timeout duration
continue
}
if loc := resp.Header.Get("Location"); loc != "" {
challenge.URL = loc
}
challenge.AuthorizationURL = fetchLink(resp, "up")
if finished, err := checkUpdatedChallengeStatus(challenge); finished {
return challenge, err
}
}
}
// FetchChallenge fetches an existing challenge from the given url.
func (c Client) FetchChallenge(account Account, challengeURL string) (Challenge, error) {
challenge := Challenge{}
resp, err := c.post(challengeURL, account.URL, account.PrivateKey, "", &challenge, http.StatusOK)
if err != nil {
return challenge, err
}
challenge.URL = resp.Header.Get("Location")
challenge.AuthorizationURL = fetchLink(resp, "up")
return challenge, nil
}

View File

@@ -1,187 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the THIRD-PARTY file.
package acme
import (
"crypto"
"crypto/ecdsa"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
_ "crypto/sha512" // need for EC keys
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"math/big"
)
var errUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported")
// keyID is the account identity provided by a CA during registration.
type keyID string
// noKeyID indicates that jwsEncodeJSON should compute and use JWK instead of a KID.
// See jwsEncodeJSON for details.
const noKeyID = keyID("")
// noPayload indicates jwsEncodeJSON will encode zero-length octet string
// in a JWS request. This is called POST-as-GET in RFC 8555 and is used to make
// authenticated GET requests via POSTing with an empty payload.
// See https://tools.ietf.org/html/rfc8555#section-6.3 for more details.
const noPayload = ""
// jwsEncodeJSON signs claimset using provided key and a nonce.
// The result is serialized in JSON format containing either kid or jwk
// fields based on the provided keyID value.
//
// If kid is non-empty, its quoted value is inserted in the protected head
// as "kid" field value. Otherwise, JWK is computed using jwkEncode and inserted
// as "jwk" field value. The "jwk" and "kid" fields are mutually exclusive.
//
// See https://tools.ietf.org/html/rfc7515#section-7.
func jwsEncodeJSON(claimset interface{}, key crypto.Signer, kid keyID, nonce, url string) ([]byte, error) {
alg, sha := jwsHasher(key.Public())
if alg == "" || !sha.Available() {
return nil, errUnsupportedKey
}
var phead string
switch kid {
case noKeyID:
jwk, err := jwkEncode(key.Public())
if err != nil {
return nil, err
}
phead = fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q,"url":%q}`, alg, jwk, nonce, url)
default:
phead = fmt.Sprintf(`{"alg":%q,"kid":%q,"nonce":%q,"url":%q}`, alg, kid, nonce, url)
}
phead = base64.RawURLEncoding.EncodeToString([]byte(phead))
var payload string
if claimset != noPayload {
cs, err := json.Marshal(claimset)
if err != nil {
return nil, err
}
payload = base64.RawURLEncoding.EncodeToString(cs)
}
hash := sha.New()
_, _ = hash.Write([]byte(phead + "." + payload))
sig, err := jwsSign(key, sha, hash.Sum(nil))
if err != nil {
return nil, err
}
enc := struct {
Protected string `json:"protected"`
Payload string `json:"payload"`
Sig string `json:"signature"`
}{
Protected: phead,
Payload: payload,
Sig: base64.RawURLEncoding.EncodeToString(sig),
}
return json.Marshal(&enc)
}
// jwkEncode encodes public part of an RSA or ECDSA key into a JWK.
// The result is also suitable for creating a JWK thumbprint.
// https://tools.ietf.org/html/rfc7517
func jwkEncode(pub crypto.PublicKey) (string, error) {
switch pub := pub.(type) {
case *rsa.PublicKey:
// https://tools.ietf.org/html/rfc7518#section-6.3.1
n := pub.N
e := big.NewInt(int64(pub.E))
// Field order is important.
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`,
base64.RawURLEncoding.EncodeToString(e.Bytes()),
base64.RawURLEncoding.EncodeToString(n.Bytes()),
), nil
case *ecdsa.PublicKey:
// https://tools.ietf.org/html/rfc7518#section-6.2.1
p := pub.Curve.Params()
n := p.BitSize / 8
if p.BitSize%8 != 0 {
n++
}
x := pub.X.Bytes()
if n > len(x) {
x = append(make([]byte, n-len(x)), x...)
}
y := pub.Y.Bytes()
if n > len(y) {
y = append(make([]byte, n-len(y)), y...)
}
// Field order is important.
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`,
p.Name,
base64.RawURLEncoding.EncodeToString(x),
base64.RawURLEncoding.EncodeToString(y),
), nil
}
return "", errUnsupportedKey
}
// jwsSign signs the digest using the given key.
// The hash is unused for ECDSA keys.
//
// Note: non-stdlib crypto.Signer implementations are expected to return
// the signature in the format as specified in RFC7518.
// See https://tools.ietf.org/html/rfc7518 for more details.
func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) {
if key, ok := key.(*ecdsa.PrivateKey); ok {
// The key.Sign method of ecdsa returns ASN1-encoded signature.
// So, we use the package Sign function instead
// to get R and S values directly and format the result accordingly.
r, s, err := ecdsa.Sign(rand.Reader, key, digest)
if err != nil {
return nil, err
}
rb, sb := r.Bytes(), s.Bytes()
size := key.Params().BitSize / 8
if size%8 > 0 {
size++
}
sig := make([]byte, size*2)
copy(sig[size-len(rb):], rb)
copy(sig[size*2-len(sb):], sb)
return sig, nil
}
return key.Sign(rand.Reader, digest, hash)
}
// jwsHasher indicates suitable JWS algorithm name and a hash function
// to use for signing a digest with the provided key.
// It returns ("", 0) if the key is not supported.
func jwsHasher(pub crypto.PublicKey) (string, crypto.Hash) {
switch pub := pub.(type) {
case *rsa.PublicKey:
return "RS256", crypto.SHA256
case *ecdsa.PublicKey:
switch pub.Params().Name {
case "P-256":
return "ES256", crypto.SHA256
case "P-384":
return "ES384", crypto.SHA384
case "P-521":
return "ES512", crypto.SHA512
}
}
return "", 0
}
// JWKThumbprint creates a JWK thumbprint out of pub
// as specified in https://tools.ietf.org/html/rfc7638.
func JWKThumbprint(pub crypto.PublicKey) (string, error) {
jwk, err := jwkEncode(pub)
if err != nil {
return "", err
}
b := sha256.Sum256([]byte(jwk))
return base64.RawURLEncoding.EncodeToString(b[:]), nil
}

View File

@@ -1,45 +0,0 @@
package acme
import (
"sync"
)
// Simple thread-safe stack impl
type nonceStack struct {
lock sync.Mutex
stack []string
}
// Pushes a nonce to the stack.
// Doesn't push empty nonces, or if there's more than 100 nonces on the stack
func (ns *nonceStack) push(v string) {
if v == "" {
return
}
ns.lock.Lock()
defer ns.lock.Unlock()
if len(ns.stack) > 100 {
return
}
ns.stack = append(ns.stack, v)
}
// Pops a nonce from the stack.
// Returns empty string if there are no nonces
func (ns *nonceStack) pop() string {
ns.lock.Lock()
defer ns.lock.Unlock()
n := len(ns.stack)
if n == 0 {
return ""
}
v := ns.stack[n-1]
ns.stack = ns.stack[:n-1]
return v
}

View File

@@ -1,70 +0,0 @@
package acme
import (
"crypto/tls"
"errors"
"net/http"
"time"
)
// OptionFunc function prototype for passing options to NewClient
type OptionFunc func(client *Client) error
// WithHTTPTimeout sets a timeout on the http client used by the Client
func WithHTTPTimeout(duration time.Duration) OptionFunc {
return func(client *Client) error {
client.httpClient.Timeout = duration
return nil
}
}
// WithInsecureSkipVerify sets InsecureSkipVerify on the http client transport tls client config used by the Client
func WithInsecureSkipVerify() OptionFunc {
return func(client *Client) error {
client.httpClient.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
return nil
}
}
// WithUserAgentSuffix appends a user agent suffix for http requests to acme resources
func WithUserAgentSuffix(userAgentSuffix string) OptionFunc {
return func(client *Client) error {
client.userAgentSuffix = userAgentSuffix
return nil
}
}
// WithAcceptLanguage sets an Accept-Language header on http requests
func WithAcceptLanguage(acceptLanguage string) OptionFunc {
return func(client *Client) error {
client.acceptLanguage = acceptLanguage
return nil
}
}
// WithRetryCount sets the number of times the acme client retries when receiving an api error (eg, nonce failures, etc).
// Default: 5
func WithRetryCount(retryCount int) OptionFunc {
return func(client *Client) error {
if retryCount < 1 {
return errors.New("retryCount must be > 0")
}
client.retryCount = retryCount
return nil
}
}
// WithHTTPClient Allows setting a custom http client for acme connections
func WithHTTPClient(httpClient *http.Client) OptionFunc {
return func(client *Client) error {
if httpClient == nil {
return errors.New("client must not be nil")
}
client.httpClient = httpClient
return nil
}
}

View File

@@ -1,136 +0,0 @@
package acme
import (
"crypto/x509"
"encoding/base64"
"errors"
"fmt"
"net/http"
"time"
)
// NewOrder initiates a new order for a new certificate.
func (c Client) NewOrder(account Account, identifiers []Identifier) (Order, error) {
newOrderReq := struct {
Identifiers []Identifier `json:"identifiers"`
}{
Identifiers: identifiers,
}
newOrderResp := Order{}
resp, err := c.post(c.dir.NewOrder, account.URL, account.PrivateKey, newOrderReq, &newOrderResp, http.StatusCreated)
if err != nil {
return newOrderResp, err
}
newOrderResp.URL = resp.Header.Get("Location")
return newOrderResp, nil
}
// NewOrderDomains is a wrapper for NewOrder(AcmeAccount, []AcmeIdentifiers)
// Creates a dns identifier for each provided domain
func (c Client) NewOrderDomains(account Account, domains ...string) (Order, error) {
if len(domains) == 0 {
return Order{}, errors.New("acme: no domains provided")
}
var ids []Identifier
for _, d := range domains {
ids = append(ids, Identifier{Type: "dns", Value: d})
}
return c.NewOrder(account, ids)
}
// FetchOrder fetches an existing order given an order url.
func (c Client) FetchOrder(account Account, orderURL string) (Order, error) {
orderResp := Order{
URL: orderURL, // boulder response doesn't seem to contain location header for this request
}
_, err := c.post(orderURL, account.URL, account.PrivateKey, "", &orderResp, http.StatusOK)
return orderResp, err
}
// Helper function to determine whether an order is "finished" by it's status.
func checkFinalizedOrderStatus(order Order) (bool, error) {
switch order.Status {
case "invalid":
// "invalid": The certificate will not be issued. Consider this
// order process abandoned.
if order.Error.Type != "" {
return true, order.Error
}
return true, errors.New("acme: finalized order is invalid, no error provided")
case "pending":
// "pending": The server does not believe that the client has
// fulfilled the requirements. Check the "authorizations" array for
// entries that are still pending.
return true, errors.New("acme: authorizations not fulfilled")
case "ready":
// "ready": The server agrees that the requirements have been
// fulfilled, and is awaiting finalization. Submit a finalization
// request.
return true, errors.New("acme: unexpected 'ready' state")
case "processing":
// "processing": The certificate is being issued. Send a GET request
// after the time given in the "Retry-After" header field of the
// response, if any.
return false, nil
case "valid":
// "valid": The server has issued the certificate and provisioned its
// URL to the "certificate" field of the order. Download the
// certificate.
return true, nil
default:
return true, fmt.Errorf("acme: unknown order status: %s", order.Status)
}
}
// FinalizeOrder indicates to the acme server that the client considers an order complete and "finalizes" it.
// If the server believes the authorizations have been filled successfully, a certificate should then be available.
// This function assumes that the order status is "ready".
func (c Client) FinalizeOrder(account Account, order Order, csr *x509.CertificateRequest) (Order, error) {
finaliseReq := struct {
Csr string `json:"csr"`
}{
Csr: base64.RawURLEncoding.EncodeToString(csr.Raw),
}
resp, err := c.post(order.Finalize, account.URL, account.PrivateKey, finaliseReq, &order, http.StatusOK)
if err != nil {
return order, err
}
order.URL = resp.Header.Get("Location")
if finished, err := checkFinalizedOrderStatus(order); finished {
return order, err
}
pollInterval, pollTimeout := c.getPollingDurations()
end := time.Now().Add(pollTimeout)
for {
if time.Now().After(end) {
return order, errors.New("acme: finalized order timeout")
}
time.Sleep(pollInterval)
if _, err := c.post(order.URL, account.URL, account.PrivateKey, "", &order, http.StatusOK); err != nil {
// i dont think it's worth exiting the loop on this error
// it could just be connectivity issue thats resolved before the timeout duration
continue
}
order.URL = resp.Header.Get("Location")
if finished, err := checkFinalizedOrderStatus(order); finished {
return order, err
}
}
}

View File

@@ -1,65 +0,0 @@
package acme
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
// Problem document as defined in,
// https://tools.ietf.org/html/rfc7807
// Problem represents an error returned by an acme server.
type Problem struct {
Type string `json:"type"`
Detail string `json:"detail,omitempty"`
Status int `json:"status,omitempty"`
Instance string `json:"instance,omitempty"`
SubProblems []SubProblem `json:"subproblems,omitempty"`
}
type SubProblem struct {
Type string `json:"type"`
Detail string `json:"detail"`
Identifier Identifier `json:"identifier"`
}
// Returns a human readable error string.
func (err Problem) Error() string {
s := fmt.Sprintf("acme: error code %d %q: %s", err.Status, err.Type, err.Detail)
if len(err.SubProblems) > 0 {
for _, v := range err.SubProblems {
s += fmt.Sprintf(", problem %q: %s", v.Type, v.Detail)
}
}
if err.Instance != "" {
s += ", url: " + err.Instance
}
return s
}
// Helper function to determine if a response contains an expected status code, or otherwise an error object.
func checkError(resp *http.Response, expectedStatuses ...int) error {
for _, statusCode := range expectedStatuses {
if resp.StatusCode == statusCode {
return nil
}
}
if resp.StatusCode < 400 || resp.StatusCode >= 600 {
return fmt.Errorf("acme: expected status codes: %d, got: %d %s", expectedStatuses, resp.StatusCode, resp.Status)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("acme: error reading error body: %v", err)
}
acmeError := Problem{}
if err := json.Unmarshal(body, &acmeError); err != nil {
return fmt.Errorf("acme: parsing error body: %v - %s", err, string(body))
}
return acmeError
}

View File

@@ -1,163 +0,0 @@
package acme
import (
"crypto"
"net/http"
"time"
)
// Different possible challenge types provided by an ACME server.
// See https://tools.ietf.org/html/rfc8555#section-9.7.8
const (
ChallengeTypeDNS01 = "dns-01"
ChallengeTypeHTTP01 = "http-01"
ChallengeTypeTLSALPN01 = "tls-alpn-01"
// ChallengeTypeTLSSNI01 is deprecated and should not be used.
// See: https://community.letsencrypt.org/t/important-what-you-need-to-know-about-tls-sni-validation-issues/50811
ChallengeTypeTLSSNI01 = "tls-sni-01"
)
// Constants used for certificate revocation, used for RevokeCertificate
// See https://tools.ietf.org/html/rfc5280#section-5.3.1
const (
ReasonUnspecified = iota // 0
ReasonKeyCompromise // 1
ReasonCaCompromise // 2
ReasonAffiliationChanged // 3
ReasonSuperseded // 4
ReasonCessationOfOperation // 5
ReasonCertificateHold // 6
_ // 7 - Unused
ReasonRemoveFromCRL // 8
ReasonPrivilegeWithdrawn // 9
ReasonAaCompromise // 10
)
// Directory object as returned from the client's directory url upon creation of client.
// See https://tools.ietf.org/html/rfc8555#section-7.1.1
type Directory struct {
NewNonce string `json:"newNonce"` // url to new nonce endpoint
NewAccount string `json:"newAccount"` // url to new account endpoint
NewOrder string `json:"newOrder"` // url to new order endpoint
NewAuthz string `json:"newAuthz"` // url to new authz endpoint
RevokeCert string `json:"revokeCert"` // url to revoke cert endpoint
KeyChange string `json:"keyChange"` // url to key change endpoint
// meta object containing directory metadata
Meta struct {
TermsOfService string `json:"termsOfService"`
Website string `json:"website"`
CaaIdentities []string `json:"caaIdentities"`
ExternalAccountRequired bool `json:"externalAccountRequired"`
} `json:"meta"`
// Directory url provided when creating a new acme client.
URL string `json:"-"`
}
// Client structure to interact with an ACME server.
// This is typically how most, if not all, of the communication between the client and server occurs.
type Client struct {
httpClient *http.Client
nonces *nonceStack
dir Directory
userAgentSuffix string
acceptLanguage string
retryCount int
// The amount of total time the Client will wait at most for a challenge to be updated or a certificate to be issued.
// Default 30 seconds if duration is not set or if set to 0.
PollTimeout time.Duration
// The time between checking if a challenge has been updated or a certificate has been issued.
// Default 0.5 seconds if duration is not set or if set to 0.
PollInterval time.Duration
}
// Account structure representing fields in an account object.
// See https://tools.ietf.org/html/rfc8555#section-7.1.2
// See also https://tools.ietf.org/html/rfc8555#section-9.7.1
type Account struct {
Status string `json:"status"`
Contact []string `json:"contact"`
Orders string `json:"orders"`
// Provided by the Location http header when creating a new account or fetching an existing account.
URL string `json:"-"`
// The private key used to create or fetch the account.
// Not fetched from server.
PrivateKey crypto.Signer `json:"-"`
// Thumbprint is the SHA-256 digest JWK_Thumbprint of the account key.
// See https://tools.ietf.org/html/rfc8555#section-8.1
Thumbprint string `json:"-"`
}
// Identifier object used in order and authorization objects
// See https://tools.ietf.org/html/rfc8555#section-7.1.4
type Identifier struct {
Type string `json:"type"`
Value string `json:"value"`
}
// Order object returned when fetching or creating a new order.
// See https://tools.ietf.org/html/rfc8555#section-7.1.3
type Order struct {
Status string `json:"status"`
Expires time.Time `json:"expires"`
Identifiers []Identifier `json:"identifiers"`
NotBefore time.Time `json:"notBefore"`
NotAfter time.Time `json:"notAfter"`
Error Problem `json:"error"`
Authorizations []string `json:"authorizations"`
Finalize string `json:"finalize"`
Certificate string `json:"certificate"`
// URL for the order object.
// Provided by the rel="Location" Link http header
URL string `json:"-"`
}
// Authorization object returned when fetching an authorization in an order.
// See https://tools.ietf.org/html/rfc8555#section-7.1.4
type Authorization struct {
Identifier Identifier `json:"identifier"`
Status string `json:"status"`
Expires time.Time `json:"expires"`
Challenges []Challenge `json:"challenges"`
Wildcard bool `json:"wildcard"`
// For convenience access to the provided challenges
ChallengeMap map[string]Challenge `json:"-"`
ChallengeTypes []string `json:"-"`
URL string `json:"-"`
}
// Challenge object fetched in an authorization or directly from the challenge url.
// See https://tools.ietf.org/html/rfc8555#section-7.1.5
type Challenge struct {
Type string `json:"type"`
URL string `json:"url"`
Status string `json:"status"`
Validated string `json:"validated"`
Error Problem `json:"error"`
// Based on the challenge used
Token string `json:"token"`
KeyAuthorization string `json:"keyAuthorization"`
// Authorization url provided by the rel="up" Link http header
AuthorizationURL string `json:"-"`
}
// OrderList of challenge objects.
type OrderList struct {
Orders []string `json:"orders"`
// Order list pagination, url to next orders.
// Provided by the rel="next" Link http header
Next string `json:"-"`
}

View File

@@ -1,6 +0,0 @@
vendor/
.idea/
acme-account.json
web/*_gen.go
letsdebug-server
letsdebug-cli

View File

@@ -1,23 +0,0 @@
language: go
go:
- '1.15.x'
before_install:
- sudo apt-get update
- sudo apt-get -y install libunbound-dev make
- sudo mkdir -p $GOPATH/bin
- export PATH=$PATH:$GOPATH/bin
install:
- make deps
script:
- make letsdebug-server letsdebug-cli
deploy:
provider: releases
api_key:
secure: f65vxdzq7SoIooXNAPJaHEiCcnG1Q2R7muomm/5qWyRXgaXyBu6Yo0oOGQNcYLbs22PbRCVS8xnN2cSfFV5jeilRKuhpTYq0tforjJbtjL1DEs9ODyLZnXIXH+uacIPwM/ioxFbFVTnSCkZx90+9I+WHY0taqc2AW49RvQKPTzOmWYZ4ATsQxsv5jBLXZIuhhl3cEJayeogT2yToYump3AZN+8o67kP94a/vSbPMTRcKOeLQa+gjxSoHVBfjmpYvGdFTp1iE8bWsJpfo/i2snF6eMdAig4Vy9Ajk/SVEmSzEBWk31JceDrT9n7VNmlDN9Us2PhdjQLR5KD3OCLx6QN/P72iBN1zq9bTRiHaF4TEUq6IyP3cnDQStfTYzE+IIZtl7DQQKY+Dp5mTO3QSq17Kp7Dvw9mNyGsyE7Oo4VmxHuH8XXbuCoyN2ywJ6l2rv/wuBPylIC5iuguJyVK9WnMxt8vOaBWIAPmm8HbviU3FHnHic6s4DPDpLfwpvsqbxSvEYcj+mRYKhMSD3pF2E/a9wFhph+Wj6sbPhiWkI84D4kmwH42h7WmYqaJfTMGyiZiiFdcF4J/M4c66csWbBLza1GIeNGmxPpKLjilIIBDo6gfpKqQYZllt+ZfeLdwIydE8m5NBVw1d6I0ctF9GNWUG8yfHPKHxNwY05kziW5qA=
file:
- letsdebug-server
- letsdebug-cli
skip_cleanup: true
on:
repo: letsdebug/letsdebug
tags: true

View File

@@ -1,32 +0,0 @@
.PHONY: clean all deps server-dev server-dev-db-up deploy
clean:
rm -f letsdebug-server
deps:
go get -u github.com/go-bindata/go-bindata/...
generate:
go generate ./...
test:
go test -v ./...
server-dev: generate
LETSDEBUG_WEB_DEBUG=1 \
LETSDEBUG_WEB_DB_DSN="user=letsdebug dbname=letsdebug password=password sslmode=disable" \
LETSDEBUG_DEBUG=1 go \
run -race cmd/server/server.go
server-dev-db-up:
docker run -d --name letsdebug-db -p 5432:5432 -e POSTGRES_PASSWORD=password -e POSTGRES_USER=letsdebug postgres:10.3-alpine
letsdebug-server: generate
go build -o letsdebug-server cmd/server/server.go
letsdebug-cli:
go build -o letsdebug-cli cmd/cli/cli.go
deploy: clean letsdebug-server
rsync -vhz --progress letsdebug-server root@letsdebug.net:/usr/local/bin/ && \
ssh root@letsdebug.net "systemctl restart letsdebug"

View File

@@ -1,170 +0,0 @@
# Let's Debug
[![Build Status](https://travis-ci.org/letsdebug/letsdebug.svg?branch=master)](https://travis-ci.org/letsdebug/letsdebug)
[![godoc](https://godoc.org/github.com/letsdebug/letsdebug?status.svg)](https://godoc.org/github.com/letsdebug/letsdebug)
Let's Debug is a diagnostic website, API, CLI and Go package for quickly and accurately finding and reporting issues for any domain that may prevent issuance of a Let's Encrypt SSL certificate for any ACME validation method.
It is motivated by [this community thread](https://community.letsencrypt.org/t/creating-a-webservice-for-analysis-of-common-problems/45836).
## Status
Currently [deployed to letsdebug.net and regularly in use](https://letsdebug.net).
## Problems Detected
| Name | Description | Examples
-------|-------------|--------|
| InvalidMethod, ValidationMethodDisabled, ValidationMethodNotSuitable | Checks the ACME validation method is valid and usable for the provided domain name. | [Example](https://letsdebug.net/*.letsencrypt.org/1) |
| InvalidDomain | Checks the domain is a valid domain name on a public TLD. | [Example](https://letsdebug.net/ooga.booga/2) |
| StatusNotOperational| Checks that the Let's Encrypt service is not experiencing an outage, according to status.io | -
| DNSLookupFailed, TXTRecordError | Checks that the Unbound resolver (via libunbound) is able to resolve a variety records relevant to Let's Encrypt. Discovers problems such as DNSSEC issues, 0x20 mixed case randomization, timeouts etc, in the spirit of jsha's unboundtest.com | [Example](https://letsdebug.net/dnssec-failed.org/3) |
CAAIssuanceNotAllowed | Checks that no CAA records are preventing the issuance of Let's Encrypt certificates. | [Example](https://letsdebug.net/id-rsa.pub/4) |
CAACriticalUnknown | Checks that no CAA critical flags unknown to Let's Encrypt are used | - |
RateLimit | Checks that the domain name is not currently affected by any of the domain-based rate limits imposed by Let's Encrypt, using the public certwatch Postgres interface from Comodo's crt.sh. | [Example](https://letsdebug.net/targettec.ddns.net/13) |
NoRecords, ReservedAddress | Checks that sufficient valid A/AAAA records are present to perform HTTP-01 validation | [Example](https://letsdebug.net/localtest.me/6) |
BadRedirect | Checks that no bad HTTP redirects are present. Discovers redirects that aren't accessible, unacceptable ports, unacceptable schemes, accidental missing trailing slash on redirect. | [Example](https://letsdebug.net/foo.monkas.xyz/7) |
WebserverMisconfiguration | Checks whether the server is serving the wrong protocol on the wrong port as the result of an HTTP-01 validation request. | - |
ANotWorking, AAAANotWorking | Checks whether listed IP addresses are not functioning properly for HTTP-01 validation, including timeouts and other classes of network and HTTP errors. | [Example](https://letsdebug.net/network-fail.foo.monkas.xyz/8) |
MultipleIPAddressDiscrepancy | For domains with multiple A/AAAA records, checks whether there are major discrepancies between the server responses to reveal when the addresses may be pointing to different servers accidentally. | [Example](https://letsdebug.net/v4v6fail.monkas.xyz/51916)
CloudflareCDN | Checks whether the domain is being served via Cloudflare's proxy service (and therefore SSL termination is occurring at Cloudflare) | - |
CloudflareSSLNotProvisioned | Checks whether the domain has its SSL terminated by Cloudflare and Cloudflare has not provisioned a certificate yet (leading to a TLS handshake error). | [Example](https://letsdebug.net/cf-no-ssl.fleetssl.com/10) |
IssueFromLetsEncrypt | Attempts to detect issues with a high degree of accuracy via the Let's Encrypt v2 staging service by attempting to perform an authorization for the domain. Discovers issues such as CA-based domain blacklists & other policies, specific networking issues. | [Example](https://letsdebug.net/bankofamerica.com/12) |
| TXTDoubleLabel | Checks for the presence of records that are doubled up (e.g. `_acme-challenge.example.org.example.org`). Usually indicates that the user has been incorrectly creating records in their DNS user interface. | [Example](https://letsdebug.net/double.monkas.xyz/2477) |
PortForwarding | Checks whether the domain is serving a modem-router administrative interface instead of an intended webserver, which is indicative of a port-forwarding misconfiguration. | [Example](https://letsdebug.net/cdkauffmannnextcloud.duckdns.org/11450) |
| SanctionedDomain | Checks whether the Registered Domain is present on the [USG OFAC SDN List](https://sanctionssearch.ofac.treas.gov/). Updated daily. | [Example](https://letsdebug.net/unomasuno.com.mx/48081) |
| BlockedByNginxTestCookie | Checks whether the HTTP-01 validation requests are being intercepted by [testcookie-nginx-module](https://github.com/kyprizel/testcookie-nginx-module). | [Example](https://letsdebug.net/13513427185.ifastnet.org/51860) |
| HttpOnHttpsPort | Checks whether the server reported receiving an HTTP request on an HTTPS-only port | [Example](https://letsdebug.net/clep-energy.org/107591) |
## Web API Usage
There is a JSON-based API available as part of the web frontend.
### Submitting a test
```bash
$ curl --data '{"method":"http-01","domain":"letsdebug.net"}' -H 'content-type: application/json' https://letsdebug.net
```
```javascript
{"Domain":"letsdebug.net","ID":14}
```
### Submitting a test with custom options
```bash
curl --data '{"method":"http-01","domain":"letsdebug.net","options":{"http_request_path":"custom-path","http_expect_response":"abc123"}}' -H 'content-type: application/json' https://letsdebug.net
```
Available options are as follows:
| Option | Description |
-------|-------------|
`http_request_path` | What path within `/.well-known/acme-challenge/` to use instead of `letsdebug-test` (default) for the HTTP check. Max length 255. |
`http_expect_response` | What exact response to expect from each server during the HTTP check. By default, no particular response is expected. If present and the response does not match, the test will fail with an Error severity. It is highly recommended to always use a completely random value. Max length 255. |
### Viewing tests
```bash
$ curl -H 'accept: application/json' https://letsdebug.net/letsdebug.net/14
```
```javascript
{"id":14,"domain":"letsdebug.net","method":"http-01","status":"Complete","created_at":"2018-04-30T01:58:34.765829Z","started_at":"2018-04-30T01:58:34.769815Z","completed_at":"2018-04-30T01:58:41.39023Z","result":{}}
```
or to view all recent tests
```bash
$ curl -H 'accept: application/json' https://letsdebug.net/letsdebug.net
```
### Performing a query against the Certwatch database
```bash
$ curl "https://letsdebug.net/certwatch-query?q=<urlencoded SQL query>"
```
```javascript
{
"query": "select c.id as crtsh_id, x509_subjectName(c.CERTIFICATE), x509_notAfter(c.CERTIFICATE) from certificate c where x509_notAfter(c.CERTIFICATE) = '2018-06-01 16:25:44' AND x509_issuerName(c.CERTIFICATE) LIKE 'C=US, O=Let''s Encrypt%';",
"results": [
{
"crtsh_id": 346300797,
"x509_notafter": "2018-06-01T16:25:44Z",
"x509_subjectname": "CN=hivdatingzimbabwe.com"
},
/* ... */
]
}
```
## CLI Usage
You can download binaries for tagged releases for Linux for both the CLi and the server [from the releases page](https://github.com/letsdebug/letsdebug/releases).
letsdebug-cli -domain example.org -method http-01 -debug
## Library Usage
```go
import "github.com/letsdebug/letsdebug"
problems, _ := letsdebug.Check("example.org", letsdebug.HTTP01)
```
## Installation
### Dependencies
This package relies on a fairly recent version of libunbound.
* On Debian-based distributions:
`apt install libunbound2 libunbound-dev`
* On EL-based distributions, you may need to build from source because the packages are ancient on e.g. CentOS, but you can try:
`yum install unbound-libs unbound-devel`
* On OSX, [Homebrew](https://brew.sh/) contains the latest version of unbound:
`brew install unbound`
You will also need Go's [dep](https://github.com/golang/dep) dependency manager.
### Releases
You can save time by [downloading tagged releases for 64-bit Linux](https://github.com/letsdebug/letsdebug/releases). Keep in mind you will still need to have libunbound present on your system.
### Building
go get -u github.com/letsdebug/letsdebug/...
cd $GOPATH/src/github.com/letsdebug/letsdebug
make deps
make letsdebug-cli letsdebug-server
## Contributing
Any contributions containing JavaScript will be discarded, but other feedback, bug reports, suggestions and enhancements are welcome - please open an issue first.
## LICENSE
MIT License
Copyright (c) 2018 Let's Debug
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,107 +0,0 @@
package letsdebug
import (
"crypto/sha256"
"errors"
"fmt"
"reflect"
"time"
)
// ValidationMethod represents an ACME validation method
type ValidationMethod string
const (
HTTP01 ValidationMethod = "http-01" // HTTP01 represents the ACME http-01 validation method.
DNS01 ValidationMethod = "dns-01" // DNS01 represents the ACME dns-01 validation method.
TLSALPN01 ValidationMethod = "tls-alpn-01" // TLSALPN01 represents the ACME tls-alpn-01 validation method.
)
var (
validMethods = map[ValidationMethod]bool{HTTP01: true, DNS01: true, TLSALPN01: true}
errNotApplicable = errors.New("Checker not applicable for this domain and method")
checkers []checker
)
func init() {
// Since the OFAC SDN checker polls, we need to initialize it
ofac := &ofacSanctionChecker{}
ofac.setup()
// We want to launch the slowest checkers as early as possible,
// unless they have a dependency on an earlier checker
checkers = []checker{
asyncCheckerBlock{
validMethodChecker{},
validDomainChecker{},
wildcardDNS01OnlyChecker{},
statusioChecker{},
ofac,
},
asyncCheckerBlock{
caaChecker{}, // depends on valid*Checker
&rateLimitChecker{}, // depends on valid*Checker
dnsAChecker{}, // depends on valid*Checker
txtRecordChecker{}, // depends on valid*Checker
txtDoubledLabelChecker{}, // depends on valid*Checker
},
asyncCheckerBlock{
httpAccessibilityChecker{}, // depends on dnsAChecker
cloudflareChecker{}, // depends on dnsAChecker to some extent
&acmeStagingChecker{}, // Gets the final word
},
}
}
type checker interface {
Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error)
}
// asyncCheckerBlock represents a checker which is composed of other checkers that can be run simultaneously.
type asyncCheckerBlock []checker
type asyncResult struct {
Problems []Problem
Error error
}
func (c asyncCheckerBlock) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) {
resultCh := make(chan asyncResult, len(c))
id := fmt.Sprintf("%x", sha256.Sum256([]byte(fmt.Sprintf("%d", time.Now().UnixNano()))))[:4]
debug("[%s] Launching async\n", id)
for _, task := range c {
go func(task checker, ctx *scanContext, domain string, method ValidationMethod) {
defer func() {
if r := recover(); r != nil {
resultCh <- asyncResult{nil, fmt.Errorf("Check %T paniced: %v", task, r)}
}
}()
t := reflect.TypeOf(task)
debug("[%s] async: + %v\n", id, t)
start := time.Now()
probs, err := task.Check(ctx, domain, method)
debug("[%s] async: - %v in %v\n", id, t, time.Since(start))
resultCh <- asyncResult{probs, err}
}(task, ctx, domain, method)
}
var probs []Problem
for i := 0; i < len(c); i++ {
result := <-resultCh
if result.Error != nil && result.Error != errNotApplicable {
debug("[%s] Exiting async via error\n", id)
return nil, result.Error
}
if len(result.Problems) > 0 {
probs = append(probs, result.Problems...)
}
}
debug("[%s] Exiting async gracefully\n", id)
return probs, nil
}

View File

@@ -1,81 +0,0 @@
package letsdebug
import (
"fmt"
"math/rand"
"net"
"sync"
"github.com/miekg/dns"
)
type lookupResult struct {
RRs []dns.RR
Error error
}
type scanContext struct {
rrs map[string]map[uint16]lookupResult
rrsMutex sync.Mutex
httpRequestPath string
httpExpectResponse string
}
func newScanContext() *scanContext {
return &scanContext{
rrs: map[string]map[uint16]lookupResult{},
httpRequestPath: "letsdebug-test",
}
}
func (sc *scanContext) Lookup(name string, rrType uint16) ([]dns.RR, error) {
sc.rrsMutex.Lock()
rrMap, ok := sc.rrs[name]
if !ok {
rrMap = map[uint16]lookupResult{}
sc.rrs[name] = rrMap
}
result, ok := rrMap[rrType]
sc.rrsMutex.Unlock()
if ok {
return result.RRs, result.Error
}
resolved, err := lookup(name, rrType)
sc.rrsMutex.Lock()
rrMap[rrType] = lookupResult{
RRs: resolved,
Error: err,
}
sc.rrsMutex.Unlock()
return resolved, err
}
// Only slightly random - it will use AAAA over A if possible.
func (sc *scanContext) LookupRandomHTTPRecord(name string) (net.IP, error) {
v6RRs, err := sc.Lookup(name, dns.TypeAAAA)
if err != nil {
return net.IP{}, err
}
if len(v6RRs) > 0 {
if selected, ok := v6RRs[rand.Intn(len(v6RRs))].(*dns.AAAA); ok {
return selected.AAAA, nil
}
}
v4RRs, err := sc.Lookup(name, dns.TypeA)
if err != nil {
return net.IP{}, err
}
if len(v4RRs) > 0 {
if selected, ok := v4RRs[rand.Intn(len(v4RRs))].(*dns.A); ok {
return selected.A, nil
}
}
return net.IP{}, fmt.Errorf("No AAAA or A records were found for %s", name)
}

View File

@@ -1,156 +0,0 @@
package letsdebug
import (
"crypto/rand"
"fmt"
"sort"
"strings"
"sync"
"github.com/miekg/dns"
"github.com/weppos/publicsuffix-go/publicsuffix"
)
// wildcardDNS01OnlyChecker ensures that a wildcard domain is only validated via dns-01.
type wildcardDNS01OnlyChecker struct{}
func (c wildcardDNS01OnlyChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) {
if !strings.HasPrefix(domain, "*.") {
return nil, errNotApplicable
}
if method == DNS01 {
return nil, errNotApplicable
}
return []Problem{wildcardHTTP01(domain, method)}, nil
}
func wildcardHTTP01(domain string, method ValidationMethod) Problem {
return Problem{
Name: "MethodNotSuitable",
Explanation: fmt.Sprintf("A wildcard domain like %s can only be issued using a dns-01 validation method.", domain),
Detail: fmt.Sprintf("Invalid method: %s", method),
Severity: SeverityFatal,
}
}
// txtRecordChecker ensures there is no resolution errors with the _acme-challenge txt record
type txtRecordChecker struct{}
func (c txtRecordChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) {
if method != DNS01 {
return nil, errNotApplicable
}
domain = strings.TrimPrefix(domain, "*.")
if _, err := ctx.Lookup("_acme-challenge."+domain, dns.TypeTXT); err != nil {
// report this problem as a fatal problem as that is the purpose of this checker
return []Problem{txtRecordError(domain, err)}, nil
}
return nil, nil
}
func txtRecordError(domain string, err error) Problem {
return Problem{
Name: "TXTRecordError",
Explanation: fmt.Sprintf(`An error occurred while attempting to lookup the TXT record on _acme-challenge.%s . `+
`Any resolver errors that the Let's Encrypt CA encounters on this record will cause certificate issuance to fail.`, domain),
Detail: err.Error(),
Severity: SeverityFatal,
}
}
// txtDoubledLabelChecker ensures that a record for _acme-challenge.example.org.example.org
// wasn't accidentally created
type txtDoubledLabelChecker struct{}
func (c txtDoubledLabelChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) {
if method != DNS01 {
return nil, errNotApplicable
}
registeredDomain, _ := publicsuffix.Domain(domain)
variants := []string{
fmt.Sprintf("_acme-challenge.%s.%s", domain, domain), // _acme-challenge.www.example.org.www.example.org
fmt.Sprintf("_acme-challenge.%s.%s", domain, registeredDomain), // _acme-challenge.www.example.org.example.org
}
var found []string
distinctCombined := map[string]struct{}{}
var randomCombined string
var foundMu sync.Mutex
var wg sync.WaitGroup
wg.Add(len(variants) + 1)
doQuery := func(q string) ([]string, string) {
found := []string{}
combined := []string{}
rrs, _ := ctx.Lookup(q, dns.TypeTXT)
for _, rr := range rrs {
txt, ok := rr.(*dns.TXT)
if !ok {
continue
}
found = append(found, txt.String())
combined = append(combined, txt.Txt...)
}
sort.Strings(combined)
return found, strings.Join(combined, "\n")
}
// Check the double label variants
for _, variant := range variants {
go func(q string) {
defer wg.Done()
values, combined := doQuery(q)
if len(values) == 0 {
return
}
foundMu.Lock()
defer foundMu.Unlock()
found = append(found, values...)
distinctCombined[combined] = struct{}{}
}(variant)
}
// Check the response for a random subdomain, to detect the presence of a wildcard TXT record
go func() {
defer wg.Done()
nonce := make([]byte, 4)
_, _ = rand.Read(nonce)
_, randomCombined = doQuery(fmt.Sprintf("_acme-challenge.%s.%s", fmt.Sprintf("rand-%x", nonce), domain))
}()
wg.Wait()
// If a randomized subdomain has the exact same non-empty TXT response as any of the "double labels", then
// we are probably dealing with a wildcard TXT record in the zone, and it is probably not a meaningful
// misconfiguration. In this case, say nothing.
if _, ok := distinctCombined[randomCombined]; ok && randomCombined != "" {
return nil, nil
}
if len(found) > 0 {
return []Problem{{
Name: "TXTDoubleLabel",
Explanation: "Some DNS records were found that indicate TXT records may have been incorrectly manually entered into " +
`DNS editor interfaces. The correct way to enter these records is to either remove the domain from the label (so ` +
`enter "_acme-challenge.www.example.org" as "_acme-challenge.www") or include a period (.) at the ` +
`end of the label (enter "_acme-challenge.example.org.").`,
Detail: fmt.Sprintf("The following probably-erroneous TXT records were found:\n%s", strings.Join(found, "\n")),
Severity: SeverityWarning,
}}, nil
}
return nil, nil
}

View File

@@ -1,127 +0,0 @@
package letsdebug
import (
"fmt"
"net"
"strings"
"github.com/miekg/dns"
"github.com/miekg/unbound"
)
var (
reservedNets []*net.IPNet
)
func lookup(name string, rrType uint16) ([]dns.RR, error) {
ub := unbound.New()
defer ub.Destroy()
if err := setUnboundConfig(ub); err != nil {
return nil, fmt.Errorf("Failed to configure Unbound resolver: %v", err)
}
result, err := ub.Resolve(name, rrType, dns.ClassINET)
if err != nil {
return nil, err
}
if result.Bogus {
return nil, fmt.Errorf("DNS response for %s had fatal DNSSEC issues: %v", name, result.WhyBogus)
}
if result.Rcode == dns.RcodeServerFailure || result.Rcode == dns.RcodeRefused {
return nil, fmt.Errorf("DNS response for %s/%s did not have an acceptable response code: %s",
name, dns.TypeToString[rrType], dns.RcodeToString[result.Rcode])
}
return result.Rr, nil
}
func normalizeFqdn(name string) string {
name = strings.TrimSpace(name)
name = strings.TrimSuffix(name, ".")
return strings.ToLower(name)
}
func isAddressReserved(ip net.IP) bool {
for _, reserved := range reservedNets {
if reserved.Contains(ip) {
return true
}
}
return false
}
func init() {
reservedNets = []*net.IPNet{}
reservedCIDRs := []string{
"0.0.0.0/8", "10.0.0.0/8", "100.64.0.0/10",
"127.0.0.0/8", "169.254.0.0/16", "172.16.0.0/12",
"192.0.0.0/24", "192.0.2.0/24", "192.88.99.0/24",
"192.168.0.0/16", "198.18.0.0/15", "198.51.100.0/24",
"203.0.113.0/24", "224.0.0.0/4", "240.0.0.0/4",
"255.255.255.255/32", "::/128", "::1/128", /*"::ffff:0:0/96",*/
"64:ff9b::/96", "100::/64", "2001::/32", "2001:10::/28",
"2001:20::/28", "2001:db8::/32", "2002::/16", "fc00::/7",
"fe80::/10", "ff00::/8",
}
for _, cidr := range reservedCIDRs {
_, n, err := net.ParseCIDR(cidr)
if err != nil {
panic(err)
}
reservedNets = append(reservedNets, n)
}
}
func setUnboundConfig(ub *unbound.Unbound) error {
// options need the : in the option key according to docs
opts := []struct {
Opt string
Val string
}{
{"verbosity:", "0"},
{"use-syslog:", "no"},
{"do-ip4:", "yes"},
{"do-ip6:", "yes"},
{"do-udp:", "yes"},
{"do-tcp:", "yes"},
{"tcp-upstream:", "no"},
{"harden-glue:", "yes"},
{"harden-dnssec-stripped:", "yes"},
{"cache-min-ttl:", "0"},
{"cache-max-ttl:", "0"},
{"cache-max-negative-ttl:", "0"},
{"neg-cache-size:", "0"},
{"prefetch:", "no"},
{"unwanted-reply-threshold:", "10000"},
{"do-not-query-localhost:", "yes"},
{"val-clean-additional:", "yes"},
{"harden-algo-downgrade:", "yes"},
{"edns-buffer-size:", "512"},
{"val-sig-skew-min:", "0"},
{"val-sig-skew-max:", "0"},
{"target-fetch-policy:", "0 0 0 0 0"},
}
for _, opt := range opts {
// Can't ignore these because we cant silently have policies being ignored
if err := ub.SetOption(opt.Opt, opt.Val); err != nil {
return fmt.Errorf("Failed to configure unbound with option %s %v", opt.Opt, err)
}
}
// use-caps-for-id was bugged (no colon) < 1.7.1, try both ways in order to be compatible
// https://www.nlnetlabs.nl/bugs-script/show_bug.cgi?id=4092
if err := ub.SetOption("use-caps-for-id:", "yes"); err != nil {
if err = ub.SetOption("use-caps-for-id", "yes"); err != nil {
return fmt.Errorf("Failed to configure unbound with use-caps-for-id: %v", err)
}
}
return ub.AddTa(`. 172800 IN DNSKEY 257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU=
. 172800 IN DNSKEY 256 3 8 AwEAAdp440E6Mz7c+Vl4sPd0lTv2Qnc85dTW64j0RDD7sS/zwxWDJ3QRES2VKDO0OXLMqVJSs2YCCSDKuZXpDPuf++YfAu0j7lzYYdWTGwyNZhEaXtMQJIKYB96pW6cRkiG2Dn8S2vvo/PxW9PKQsyLbtd8PcwWglHgReBVp7kEv/Dd+3b3YMukt4jnWgDUddAySg558Zld+c9eGWkgWoOiuhg4rQRkFstMX1pRyOSHcZuH38o1WcsT4y3eT0U/SR6TOSLIB/8Ftirux/h297oS7tCcwSPt0wwry5OFNTlfMo8v7WGurogfk8hPipf7TTKHIi20LWen5RCsvYsQBkYGpF78=
. 172800 IN DNSKEY 257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjFFVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoXbfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaDX6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpzW5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relSQageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulqQxA+Uk1ihz0=
. 172800 IN RRSIG DNSKEY 8 0 172800 20181101000000 20181011000000 20326 . M/LTswhCjuJUTvX1CFqC+TiJ4Fez7AROa5mM+1AI2MJ+zLHhr3JaMxyydFLWrBHR0056Hz7hNqQ9i63hGeiR6uMfanF0jIRb9XqgGP8nY37T8ESpS1UiM9rJn4b40RFqDSEvuFdd4hGwK3EX0snOCLdUT8JezxtreXI0RilmqDC2g44TAKyFw+Is9Qwl+k6+fbMQ/atA8adANbYgyuHfiwQCCUtXRaTCpRgQtsAz9izO0VYIGeHIoJta0demAIrLCOHNVH2ogHTqMEQ18VqUNzTd0aGURACBdS7PeP2KogPD7N8Q970O84TFmO4ahPIvqO+milCn5OQTbbgsjHqY6Q==`)
}

View File

@@ -1,861 +0,0 @@
package letsdebug
import (
"context"
"crypto/x509"
"database/sql"
"encoding/pem"
"encoding/xml"
"io/ioutil"
"net"
"os"
"sort"
"strings"
"sync"
"github.com/eggsampler/acme/v3"
"fmt"
"net/http"
"net/url"
"time"
"encoding/json"
// Driver for crtwatch/ratelimitChecker
_ "github.com/lib/pq"
"github.com/miekg/dns"
"github.com/weppos/publicsuffix-go/net/publicsuffix"
psl "github.com/weppos/publicsuffix-go/publicsuffix"
)
// validMethodChecker ensures that the provided authorization method is valid and supported.
type validMethodChecker struct{}
func (c validMethodChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) {
if validMethods[method] {
return nil, errNotApplicable
}
return []Problem{notValidMethod(method)}, nil
}
func notValidMethod(method ValidationMethod) Problem {
var supportedMethods []string
for k := range validMethods {
supportedMethods = append(supportedMethods, string(k))
}
return Problem{
Name: "InvalidMethod",
Explanation: fmt.Sprintf(`"%s" is not a supported validation method.`, method),
Detail: fmt.Sprintf("Supported methods: %s", strings.Join(supportedMethods, ", ")),
Severity: SeverityFatal,
}
}
// validDomainChecker ensures that the FQDN is well-formed and is part of a public suffix.
type validDomainChecker struct{}
func (c validDomainChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) {
var probs []Problem
domain = strings.TrimPrefix(domain, "*.")
for _, ch := range []byte(domain) {
if !(('a' <= ch && ch <= 'z') ||
('A' <= ch && ch <= 'A') ||
('0' <= ch && ch <= '9') ||
ch == '.' || ch == '-') {
probs = append(probs, invalidDomain(domain, fmt.Sprintf("Invalid character present: %c", ch)))
return probs, nil
}
}
if len(domain) > 230 {
probs = append(probs, invalidDomain(domain, "Domain too long"))
return probs, nil
}
if ip := net.ParseIP(domain); ip != nil {
probs = append(probs, invalidDomain(domain, "Domain is an IP address"))
return probs, nil
}
rule := psl.DefaultList.Find(domain, &psl.FindOptions{IgnorePrivate: true, DefaultRule: nil})
if rule == nil {
probs = append(probs, invalidDomain(domain, "Domain doesn't end in a public TLD"))
return probs, nil
}
if r := rule.Decompose(domain)[1]; r == "" {
probs = append(probs, invalidDomain(domain, "Domain is a TLD"))
return probs, nil
} else {
probs = append(probs, debugProblem("PublicSuffix", "The IANA public suffix is the TLD of the Registered Domain",
fmt.Sprintf("The TLD for %s is: %s", domain, r)))
}
return probs, nil
}
// caaChecker ensures that any caa record on the domain, or up the domain tree, allow issuance for letsencrypt.org
type caaChecker struct{}
func (c caaChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) {
var probs []Problem
wildcard := false
if strings.HasPrefix(domain, "*.") {
wildcard = true
domain = domain[2:]
}
rrs, err := ctx.Lookup(domain, dns.TypeCAA)
if err != nil {
probs = append(probs, dnsLookupFailed(domain, "CAA", err))
return probs, nil
}
// check any found caa records
if len(rrs) > 0 {
var issue []*dns.CAA
var issuewild []*dns.CAA
var criticalUnknown []*dns.CAA
for _, rr := range rrs {
caaRr, ok := rr.(*dns.CAA)
if !ok {
continue
}
switch caaRr.Tag {
case "issue":
issue = append(issue, caaRr)
case "issuewild":
issuewild = append(issuewild, caaRr)
default:
if caaRr.Flag == 1 {
criticalUnknown = append(criticalUnknown, caaRr)
}
}
}
probs = append(probs, debugProblem("CAA",
"CAA records control authorization for certificate authorities to issue certificates for a domain",
collateRecords(append(issue, issuewild...))))
if len(criticalUnknown) > 0 {
probs = append(probs, caaCriticalUnknown(domain, wildcard, criticalUnknown))
return probs, nil
}
if len(issue) == 0 && !wildcard {
return probs, nil
}
records := issue
if wildcard && len(issuewild) > 0 {
records = issuewild
}
for _, r := range records {
if extractIssuerDomain(r.Value) == "letsencrypt.org" {
return probs, nil
}
}
probs = append(probs, caaIssuanceNotAllowed(domain, wildcard, records))
return probs, nil
}
// recurse up to the public suffix domain until a caa record is found
// a.b.c.com -> b.c.com -> c.com until
if ps, _ := publicsuffix.PublicSuffix(domain); domain != ps && ps != "" {
splitDomain := strings.SplitN(domain, ".", 2)
parentProbs, err := c.Check(ctx, splitDomain[1], method)
if err != nil {
return nil, fmt.Errorf("error checking caa record on domain: %s, %v", splitDomain[1], err)
}
probs = append(probs, parentProbs...)
}
return probs, nil
}
func extractIssuerDomain(value string) string {
// record can be:
// issuedomain.tld; someparams
return strings.Trim(strings.SplitN(value, ";", 2)[0], " \t")
}
func collateRecords(records []*dns.CAA) string {
var s []string
for _, r := range records {
s = append(s, r.String())
}
return strings.Join(s, "\n")
}
func caaCriticalUnknown(domain string, wildcard bool, records []*dns.CAA) Problem {
return Problem{
Name: "CAACriticalUnknown",
Explanation: fmt.Sprintf(`CAA record(s) exist on %s (wildcard=%t) that are marked as critical but are unknown to Let's Encrypt. `+
`These record(s) as shown in the detail must be removed, or marked as non-critical, before a certificate can be issued by the Let's Encrypt CA.`, domain, wildcard),
Detail: collateRecords(records),
Severity: SeverityFatal,
}
}
func caaIssuanceNotAllowed(domain string, wildcard bool, records []*dns.CAA) Problem {
return Problem{
Name: "CAAIssuanceNotAllowed",
Explanation: fmt.Sprintf(`No CAA record on %s (wildcard=%t) contains the issuance domain "letsencrypt.org". `+
`You must either add an additional record to include "letsencrypt.org" or remove every existing CAA record. `+
`A list of the CAA records are provided in the details.`, domain, wildcard),
Detail: collateRecords(records),
Severity: SeverityFatal,
}
}
func invalidDomain(domain, reason string) Problem {
return Problem{
Name: "InvalidDomain",
Explanation: fmt.Sprintf(`"%s" is not a valid domain name that Let's Encrypt would be able to issue a certificate for.`, domain),
Detail: reason,
Severity: SeverityFatal,
}
}
// cloudflareChecker determines if the domain is using cloudflare, and whether a certificate has been provisioned by cloudflare yet.
type cloudflareChecker struct{}
func (c cloudflareChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) {
var probs []Problem
domain = strings.TrimPrefix(domain, "*.")
cl := http.Client{
Timeout: httpTimeout * time.Second,
Transport: makeSingleShotHTTPTransport(),
}
resp, err := cl.Get("https://" + domain)
if err == nil { // no tls error, cert must be issued
// check if it's cloudflare
if hasCloudflareHeader(resp.Header) {
probs = append(probs, cloudflareCDN(domain))
}
return probs, nil
}
// disable redirects
cl.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
// attempt to connect over http with redirects disabled to check cloudflare header
resp, err = cl.Get("http://" + domain)
if err != nil {
return probs, nil
}
if hasCloudflareHeader(resp.Header) {
probs = append(probs, cloudflareCDN(domain))
probs = append(probs, cloudflareSslNotProvisioned(domain))
}
return probs, nil
}
func hasCloudflareHeader(h http.Header) bool {
return strings.Contains(strings.ToLower(h.Get("server")), "cloudflare")
}
func cloudflareCDN(domain string) Problem {
return Problem{
Name: "CloudflareCDN",
Explanation: fmt.Sprintf(`The domain %s is being served through Cloudflare CDN. Any Let's Encrypt certificate installed on the `+
`origin server will only encrypt traffic between the server and Cloudflare. It is strongly recommended that the SSL option 'Full SSL (strict)' `+
`be enabled.`, domain),
Detail: "https://support.cloudflare.com/hc/en-us/articles/200170416-What-do-the-SSL-options-mean-",
Severity: SeverityWarning,
}
}
func cloudflareSslNotProvisioned(domain string) Problem {
return Problem{
Name: "CloudflareSSLNotProvisioned",
Explanation: fmt.Sprintf(`The domain %s is being served through Cloudflare CDN and a certificate has not yet been provisioned yet by Cloudflare.`, domain),
Detail: "https://support.cloudflare.com/hc/en-us/articles/203045244-How-long-does-it-take-for-Cloudflare-s-SSL-to-activate-",
Severity: SeverityWarning,
}
}
// statusioChecker ensures there is no reported operational problem with the Let's Encrypt service via the status.io public api.
type statusioChecker struct{}
// statusioSignificantStatuses denotes which statuses warrant raising a warning.
// 100 (operational) and 200 (undocumented but assume "Planned Maintenance") should not be included.
// https://kb.status.io/developers/status-codes/
var statusioSignificantStatuses = map[int]bool{
300: true, // Degraded Performance
400: true, // Partial Service Disruption
500: true, // Service Disruption
600: true, // Security Event
}
func (c statusioChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) {
var probs []Problem
resp, err := http.Get("https://api.status.io/1.0/status/55957a99e800baa4470002da")
if err != nil {
// some connectivity errors with status.io is probably not worth reporting
return probs, nil
}
defer resp.Body.Close()
apiResp := struct {
Result struct {
StatusOverall struct {
Updated time.Time `json:"updated"`
Status string `json:"status"`
StatusCode int `json:"status_code"`
} `json:"status_overall"`
} `json:"result"`
}{}
if err := json.NewDecoder(resp.Body).Decode(&apiResp); err != nil {
return probs, fmt.Errorf("error decoding status.io api response: %v", err)
}
if statusioSignificantStatuses[apiResp.Result.StatusOverall.StatusCode] {
probs = append(probs, statusioNotOperational(apiResp.Result.StatusOverall.Status, apiResp.Result.StatusOverall.Updated))
}
probs = append(probs, debugProblem("StatusIO", "The current status.io status for Let's Encrypt",
fmt.Sprintf("%v", apiResp.Result.StatusOverall.Status)))
return probs, nil
}
func statusioNotOperational(status string, updated time.Time) Problem {
return Problem{
Name: "StatusNotOperational",
Explanation: fmt.Sprintf(`The current status as reported by the Let's Encrypt status page is %s as at %v. `+
`Depending on the reported problem, this may affect certificate issuance. For more information, please visit the status page.`, status, updated),
Detail: "https://letsencrypt.status.io/",
Severity: SeverityWarning,
}
}
type crtList map[string]*x509.Certificate
// FindCommonPSLCertificates finds any certificates which contain any DNSName
// that shares the Registered Domain `registeredDomain`.
func (l crtList) FindWithCommonRegisteredDomain(registeredDomain string) sortedCertificates {
var out sortedCertificates
for _, cert := range l {
for _, name := range cert.DNSNames {
if nameRegDomain, _ := publicsuffix.EffectiveTLDPlusOne(name); nameRegDomain == registeredDomain {
out = append(out, cert)
break
}
}
}
sort.Sort(out)
return out
}
func (l crtList) GetOldestCertificate() *x509.Certificate {
var oldest *x509.Certificate
for _, crt := range l {
if oldest == nil || crt.NotBefore.Before(oldest.NotBefore) {
oldest = crt
}
}
return oldest
}
// CountDuplicates counts how many duplicate certificates there are
// that also contain the name `domain`
func (l crtList) CountDuplicates(domain string) map[string]int {
counts := map[string]int{}
for _, cert := range l {
found := false
for _, name := range cert.DNSNames {
if name == domain {
found = true
break
}
}
if !found {
continue
}
names := make([]string, len(cert.DNSNames))
copy(names, cert.DNSNames)
sort.Strings(names)
k := strings.Join(names, ",")
counts[k]++
}
return counts
}
// rateLimitChecker ensures that the domain is not currently affected
// by domain-based rate limits using crtwatch's database
type rateLimitChecker struct {
}
type sortedCertificates []*x509.Certificate
func (certs sortedCertificates) Len() int { return len(certs) }
func (certs sortedCertificates) Swap(i, j int) { certs[i], certs[j] = certs[j], certs[i] }
func (certs sortedCertificates) Less(i, j int) bool {
return certs[j].NotBefore.Before(certs[i].NotBefore)
}
const rateLimitCheckerQuery = `
WITH ci AS
(SELECT min(sub.CERTIFICATE_ID) ID,
min(sub.ISSUER_CA_ID) ISSUER_CA_ID,
sub.CERTIFICATE DER
FROM
(SELECT *
FROM certificate_and_identities cai
WHERE plainto_tsquery('%s') @@ identities(cai.CERTIFICATE)
AND cai.NAME_VALUE ILIKE ('%%%s%%')
AND x509_notBefore(cai.CERTIFICATE) >= '%s'
AND cai.issuer_ca_id IN (16418, 183267, 183283)
LIMIT 1000) sub
GROUP BY sub.CERTIFICATE)
SELECT ci.DER der
FROM ci
LEFT JOIN LATERAL
(SELECT min(ctle.ENTRY_TIMESTAMP) ENTRY_TIMESTAMP
FROM ct_log_entry ctle
WHERE ctle.CERTIFICATE_ID = ci.ID ) le ON TRUE,
ca
WHERE ci.ISSUER_CA_ID = ca.ID
ORDER BY le.ENTRY_TIMESTAMP DESC;`
// Pointer receiver because we're keeping state across runs
func (c *rateLimitChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) {
if os.Getenv("LETSDEBUG_DISABLE_CERTWATCH") != "" {
return nil, errNotApplicable
}
domain = strings.TrimPrefix(domain, "*.")
db, err := sql.Open("postgres", "user=guest dbname=certwatch host=crt.sh sslmode=disable connect_timeout=5")
if err != nil {
return []Problem{
internalProblem(fmt.Sprintf("Failed to connect to certwatch database to check rate limits: %v", err), SeverityDebug),
}, nil
}
defer db.Close()
// Since we are checking rate limits, we need to query the Registered Domain
// for the domain in question
registeredDomain, _ := publicsuffix.EffectiveTLDPlusOne(domain)
timeoutCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Avoiding using a prepared statement here because it's being weird with crt.sh
q := fmt.Sprintf(rateLimitCheckerQuery,
registeredDomain, registeredDomain, time.Now().Add(-168*time.Hour).Format(time.RFC3339))
rows, err := db.QueryContext(timeoutCtx, q)
if err != nil && err != sql.ErrNoRows {
return []Problem{
internalProblem(fmt.Sprintf("Failed to query certwatch database to check rate limits: %v", err), SeverityDebug),
}, nil
}
probs := []Problem{}
// Read in the DER-encoded certificates
certs := crtList{}
var certBytes []byte
for rows.Next() {
if err := rows.Scan(&certBytes); err != nil {
probs = append(probs, internalProblem(fmt.Sprintf("Failed to query certwatch database while checking rate limits: %v", err), SeverityDebug))
break
}
crt, err := x509.ParseCertificate(certBytes)
if err != nil {
probs = append(probs, internalProblem(fmt.Sprintf("Failed to parse certificate while checking rate limits: %v", err), SeverityDebug))
continue
}
certs[crt.SerialNumber.String()] = crt
}
if err := rows.Err(); err != nil {
return []Problem{
internalProblem(fmt.Sprintf("Failed to query certwatch database to check rate limits: %v", err), SeverityDebug),
}, nil
}
var debug string
// Limit: Certificates per Registered Domain
// TODO: implement Renewal Exemption
certsTowardsRateLimit := certs.FindWithCommonRegisteredDomain(registeredDomain)
if len(certs) > 0 && len(certsTowardsRateLimit) >= 50 {
dropOff := certs.GetOldestCertificate().NotBefore.Add(7 * 24 * time.Hour)
dropOffDiff := time.Until(dropOff).Truncate(time.Minute)
probs = append(probs, rateLimited(domain, fmt.Sprintf("The 'Certificates per Registered Domain' limit ("+
"50 certificates per week that share the same Registered Domain: %s) has been exceeded. "+
"There is no way to work around this rate limit. "+
"The next non-renewal certificate for this Registered Domain should be issuable after %v (%v from now).",
registeredDomain, dropOff, dropOffDiff)))
}
for _, cert := range certsTowardsRateLimit {
debug = fmt.Sprintf("%s\nSerial: %s\nNotBefore: %v\nNames: %v\n", debug, cert.SerialNumber.String(), cert.NotBefore, cert.DNSNames)
}
// Limit: Duplicate Certificate limit of 5 certificates per week
for names, dupes := range certs.CountDuplicates(domain) {
if dupes < 5 {
continue
}
probs = append(probs, rateLimited(domain,
fmt.Sprintf(`The Duplicate Certificate limit (5 certificates with the exact same set of domains per week) has been `+
`exceeded and is affecting the domain "%s". The exact set of domains affected is: "%v". It may be possible to avoid this `+
`rate limit by issuing a certificate with an additional or different domain name.`, domain, names)))
}
if debug != "" {
probs = append(probs, debugProblem("RateLimit",
fmt.Sprintf("%d Certificates contributing to rate limits for this domain", len(certsTowardsRateLimit)), debug))
}
return probs, nil
}
func rateLimited(domain, detail string) Problem {
registeredDomain, _ := publicsuffix.EffectiveTLDPlusOne(domain)
return Problem{
Name: "RateLimit",
Explanation: fmt.Sprintf(`%s is currently affected by Let's Encrypt-based rate limits (https://letsencrypt.org/docs/rate-limits/). `+
`You may review certificates that have already been issued by visiting https://crt.sh/?q=%%%s . `+
`Please note that it is not possible to ask for a rate limit to be manually cleared.`, domain, registeredDomain),
Detail: detail,
Severity: SeverityError,
}
}
// acmeStagingChecker tries to create an authorization on
// Let's Encrypt's staging server and parse the error urn
// to see if there's anything interesting reported.
type acmeStagingChecker struct {
client acme.Client
account acme.Account
clientMu sync.Mutex
}
func (c *acmeStagingChecker) buildAcmeClient() error {
cl, err := acme.NewClient("https://acme-staging-v02.api.letsencrypt.org/directory")
if err != nil {
return err
}
// Give the ACME CA more time to complete challenges
cl.PollTimeout = 100 * time.Second
regrPath := os.Getenv("LETSDEBUG_ACMESTAGING_ACCOUNTFILE")
if regrPath == "" {
regrPath = "acme-account.json"
}
buf, err := ioutil.ReadFile(regrPath)
if err != nil {
return err
}
var out struct {
PEM string `json:"pem"`
URL string `json:"url"`
}
if err := json.Unmarshal(buf, &out); err != nil {
return err
}
block, _ := pem.Decode([]byte(out.PEM))
pk, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return err
}
c.account = acme.Account{PrivateKey: pk, URL: out.URL}
c.client = cl
return nil
}
func (c *acmeStagingChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) {
if os.Getenv("LETSDEBUG_DISABLE_ACMESTAGING") != "" {
return nil, errNotApplicable
}
c.clientMu.Lock()
if c.account.PrivateKey == nil {
if err := c.buildAcmeClient(); err != nil {
c.clientMu.Unlock()
return []Problem{
internalProblem(fmt.Sprintf("Couldn't setup Let's Encrypt staging checker, skipping: %v", err), SeverityWarning),
}, nil
}
}
c.clientMu.Unlock()
probs := []Problem{}
order, err := c.client.NewOrder(c.account, []acme.Identifier{{Type: "dns", Value: domain}})
if err != nil {
if p := translateAcmeError(domain, err); p.Name != "" {
probs = append(probs, p)
}
probs = append(probs, debugProblem("LetsEncryptStaging", "Order creation error", err.Error()))
return probs, nil
}
var wg sync.WaitGroup
wg.Add(len(order.Authorizations))
var probsMu sync.Mutex
unhandledError := func(err error) {
probsMu.Lock()
defer probsMu.Unlock()
probs = append(probs, internalProblem("An unknown problem occurred while performing a test "+
"authorization against the Let's Encrypt staging service: "+err.Error(), SeverityWarning))
}
authzFailures := []string{}
for _, authzURL := range order.Authorizations {
go func(authzURL string) {
defer wg.Done()
authz, err := c.client.FetchAuthorization(c.account, authzURL)
if err != nil {
unhandledError(err)
return
}
chal, ok := authz.ChallengeMap[string(method)]
if !ok {
unhandledError(fmt.Errorf("Missing challenge method (want %v): %v", method, authz.ChallengeMap))
return
}
if _, err := c.client.UpdateChallenge(c.account, chal); err != nil {
probsMu.Lock()
if p := translateAcmeError(domain, err); p.Name != "" {
probs = append(probs, p)
}
authzFailures = append(authzFailures, err.Error())
probsMu.Unlock()
}
}(authzURL)
}
wg.Wait()
if len(authzFailures) > 0 {
probs = append(probs, debugProblem("LetsEncryptStaging",
fmt.Sprintf("Challenge update failures for %s in order %s", domain, order.URL),
strings.Join(authzFailures, "\n")))
} else {
probs = append(probs, debugProblem("LetsEncryptStaging", "Order for "+domain, order.URL))
}
return probs, nil
}
func translateAcmeError(domain string, err error) Problem {
if acmeErr, ok := err.(acme.Problem); ok {
urn := strings.TrimPrefix(acmeErr.Type, "urn:ietf:params:acme:error:")
switch urn {
case "rejectedIdentifier", "unknownHost", "rateLimited", "caa", "dns", "connection":
// Boulder can send error:dns when _acme-challenge is NXDOMAIN, which is
// equivalent to unauthorized
if strings.Contains(acmeErr.Detail, "NXDOMAIN looking up TXT") {
return Problem{}
}
return letsencryptProblem(domain, acmeErr.Detail, SeverityError)
// When something bad is happening on staging
case "serverInternal":
return letsencryptProblem(domain,
fmt.Sprintf(`There may be internal issues on the staging service: %v`, acmeErr.Detail), SeverityWarning)
// Unauthorized is what we expect, except for these exceptions that we should handle:
// - When VA OR RA is checking Google Safe Browsing (groan)
case "unauthorized":
if strings.Contains(acmeErr.Detail, "considered an unsafe domain") {
return letsencryptProblem(domain, acmeErr.Detail, SeverityError)
}
return Problem{}
default:
return Problem{}
}
}
return internalProblem(fmt.Sprintf("An unknown issue occurred when performing a test authorization "+
"against the Let's Encrypt staging service: %v", err), SeverityWarning)
}
func letsencryptProblem(domain, detail string, severity SeverityLevel) Problem {
return Problem{
Name: "IssueFromLetsEncrypt",
Explanation: fmt.Sprintf(`A test authorization for %s to the Let's Encrypt staging service has revealed `+
`issues that may prevent any certificate for this domain being issued.`, domain),
Detail: detail,
Severity: severity,
}
}
// ofacSanctionChecker checks whether a Registered Domain is present on the the XML sanctions list
// (https://www.treasury.gov/ofac/downloads/sdn.xml).
// It is disabled by default, and must be enabled with the environment variable LETSDEBUG_ENABLE_OFAC=1
type ofacSanctionChecker struct {
muRefresh sync.RWMutex
domains map[string]struct{}
}
func (c *ofacSanctionChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) {
if os.Getenv("LETSDEBUG_ENABLE_OFAC") != "1" {
return nil, errNotApplicable
}
c.muRefresh.RLock()
defer c.muRefresh.RUnlock()
rd, _ := publicsuffix.EffectiveTLDPlusOne(domain)
for sanctionedRD := range c.domains {
if rd != sanctionedRD {
continue
}
return []Problem{{
Name: "SanctionedDomain",
Explanation: fmt.Sprintf("The Registered Domain %s was found on the United States' OFAC "+
"Specially Designated Nationals and Blocked Persons (SDN) List. Let's Encrypt are unable to issue certificates "+
"for sanctioned entities. Search on https://sanctionssearch.ofac.treas.gov/ for futher details.", sanctionedRD),
Severity: SeverityError,
}}, nil
}
return nil, nil
}
func (c *ofacSanctionChecker) setup() {
if os.Getenv("LETSDEBUG_ENABLE_OFAC") != "1" {
return
}
c.domains = map[string]struct{}{}
go func() {
for {
if err := c.poll(); err != nil {
fmt.Printf("OFAC SDN poller failed: %v\n", err)
}
time.Sleep(24 * time.Hour)
}
}()
}
func (c *ofacSanctionChecker) poll() error {
req, _ := http.NewRequest(http.MethodGet, "https://www.treasury.gov/ofac/downloads/sdn.xml", nil)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
req = req.WithContext(ctx)
req.Header.Set("User-Agent", "Let's Debug (https://letsdebug.net)")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
dec := xml.NewDecoder(resp.Body)
registeredDomains := map[string]struct{}{}
isID := false
for {
tok, _ := dec.Token()
if tok == nil {
break
}
switch el := tok.(type) {
case xml.StartElement:
if el.Name.Local == "id" {
isID = true
break
}
if el.Name.Local == "idType" {
next, _ := dec.Token()
if next == nil {
break
}
raw, ok := next.(xml.CharData)
if !ok {
break
}
if string(raw) != "Website" {
isID = false
break
}
break
}
if el.Name.Local == "idNumber" && isID {
next, _ := dec.Token()
if next == nil {
break
}
raw, ok := next.(xml.CharData)
if !ok {
break
}
if rd := c.extractRegisteredDomain(string(raw)); rd != "" {
registeredDomains[rd] = struct{}{}
}
}
case xml.EndElement:
if el.Name.Local == "id" {
isID = false
break
}
}
}
c.muRefresh.Lock()
defer c.muRefresh.Unlock()
c.domains = registeredDomains
return nil
}
func (c *ofacSanctionChecker) extractRegisteredDomain(d string) string {
d = strings.ToLower(strings.TrimSpace(d))
if len(d) == 0 {
return ""
}
// If there's a protocol or path, then we need to parse the URL and extract the host
if strings.Contains(d, "/") {
u, err := url.Parse(d)
if err != nil {
return ""
}
d = u.Host
}
d, _ = publicsuffix.EffectiveTLDPlusOne(d)
return d
}

View File

@@ -1,268 +0,0 @@
package letsdebug
import (
"bytes"
"fmt"
"net"
"strings"
"sync"
"github.com/miekg/dns"
)
var (
likelyModemRouters = []string{"micro_httpd", "cisco-IOS", "LANCOM", "Mini web server 1.0 ZTE corp 2005."}
isLikelyNginxTestcookiePayloads = [][]byte{
[]byte(`src="/aes.js"`),
[]byte(`src="/aes.min.js"`),
[]byte(`var a=toNumbers`)}
isHTTP497Payloads = [][]byte{
// httpd: https://github.com/apache/httpd/blob/e820d1ea4d3f1f5152574dbaa13979887a5c14b7/modules/ssl/ssl_engine_kernel.c#L322
[]byte("You're speaking plain HTTP to an SSL-enabled server port"),
// nginx: https://github.com/nginx/nginx/blob/15544440425008d5ad39a295b826665ad56fdc90/src/http/ngx_http_special_response.c#L274
[]byte("400 The plain HTTP request was sent to HTTPS port"),
}
)
// dnsAChecker checks if there are any issues in Unbound looking up the A and
// AAAA records for a domain (such as DNSSEC issues or dead nameservers)
type dnsAChecker struct{}
func (c dnsAChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) {
if method != HTTP01 {
return nil, errNotApplicable
}
var probs []Problem
var aRRs, aaaaRRs []dns.RR
var aErr, aaaaErr error
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
aaaaRRs, aaaaErr = ctx.Lookup(domain, dns.TypeAAAA)
}()
go func() {
defer wg.Done()
aRRs, aErr = ctx.Lookup(domain, dns.TypeA)
}()
wg.Wait()
if aErr != nil {
probs = append(probs, dnsLookupFailed(domain, "A", aErr))
}
if aaaaErr != nil {
probs = append(probs, dnsLookupFailed(domain, "AAAA", aaaaErr))
}
for _, rr := range aRRs {
if aRR, ok := rr.(*dns.A); ok && isAddressReserved(aRR.A) {
probs = append(probs, reservedAddress(domain, aRR.A.String()))
}
}
for _, rr := range aaaaRRs {
if aaaaRR, ok := rr.(*dns.AAAA); ok && isAddressReserved(aaaaRR.AAAA) {
probs = append(probs, reservedAddress(domain, aaaaRR.AAAA.String()))
}
}
var sb []string
for _, rr := range append(aRRs, aaaaRRs...) {
sb = append(sb, rr.String())
}
if len(sb) > 0 {
probs = append(probs, debugProblem("HTTPRecords", "A and AAAA records found for this domain", strings.Join(sb, "\n")))
}
if len(sb) == 0 {
probs = append(probs, noRecords(domain, "No A or AAAA records found."))
}
return probs, nil
}
// httpAccessibilityChecker checks whether an HTTP ACME validation request
// would lead to any issues such as:
// - Bad redirects
// - IPs not listening on port 80
type httpAccessibilityChecker struct{}
func (c httpAccessibilityChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) {
if method != HTTP01 {
return nil, errNotApplicable
}
var probs []Problem
var ips []net.IP
rrs, _ := ctx.Lookup(domain, dns.TypeAAAA)
for _, rr := range rrs {
aaaa, ok := rr.(*dns.AAAA)
if !ok {
continue
}
ips = append(ips, aaaa.AAAA)
}
rrs, _ = ctx.Lookup(domain, dns.TypeA)
for _, rr := range rrs {
a, ok := rr.(*dns.A)
if !ok {
continue
}
ips = append(ips, a.A)
}
if len(ips) == 0 {
return probs, nil
}
// Track whether responses differ between any of the A/AAAA addresses
// for the domain
allCheckResults := []httpCheckResult{}
var debug []string
for _, ip := range ips {
res, prob := checkHTTP(ctx, domain, ip)
allCheckResults = append(allCheckResults, res)
if !prob.IsZero() {
probs = append(probs, prob)
}
debug = append(debug, fmt.Sprintf("Request to: %s/%s, Result: %s, Issue: %s\nTrace:\n%s\n",
domain, ip.String(), res.String(), prob.Name, strings.Join(res.DialStack, "\n")))
}
// Filter out the servers that didn't respond at all
var nonZeroResults []httpCheckResult
for _, v := range allCheckResults {
if v.IsZero() {
continue
}
nonZeroResults = append(nonZeroResults, v)
}
if len(nonZeroResults) > 1 {
firstResult := nonZeroResults[0]
for _, otherResult := range nonZeroResults[1:] {
if firstResult.StatusCode != otherResult.StatusCode ||
firstResult.ServerHeader != otherResult.ServerHeader ||
firstResult.NumRedirects != otherResult.NumRedirects ||
firstResult.InitialStatusCode != otherResult.InitialStatusCode {
probs = append(probs, multipleIPAddressDiscrepancy(domain, firstResult, otherResult))
}
}
}
probs = append(probs, debugProblem("HTTPCheck", "Requests made to the domain", strings.Join(debug, "\n")))
if res := isLikelyModemRouter(allCheckResults); !res.IsZero() {
probs = append(probs, Problem{
Name: "PortForwarding",
Explanation: "A request to your domain revealed that the web server that responded may be " +
"the administrative interface of a modem or router. This can indicate an issue with the port forwarding " +
"setup on that modem or router. You may need to reconfigure the device to properly forward traffic to your " +
"intended webserver.",
Detail: fmt.Sprintf(`The web server that responded identified itself as "%s", `+
"which is a known webserver commonly used by modems/routers.", res.ServerHeader),
Severity: SeverityWarning,
})
}
if res := isLikelyNginxTestcookie(allCheckResults); !res.IsZero() {
probs = append(probs, Problem{
Name: "BlockedByNginxTestCookie",
Explanation: "The validation request to this domain was blocked by a deployment of the nginx " +
"testcookie module (https://github.com/kyprizel/testcookie-nginx-module). This module is designed to " +
"block robots, and causes the Let's Encrypt validation process to fail. The server administrator can " +
"solve this issue by disabling the module (`testcookie off;`) for requests under the path of `/.well-known" +
"/acme-challenge/`.",
Detail: fmt.Sprintf("The server at %s produced this result.", res.IP.String()),
Severity: SeverityError,
})
}
if res := isHTTP497(allCheckResults); !res.IsZero() {
probs = append(probs, Problem{
Name: "HttpOnHttpsPort",
Explanation: "A validation request to this domain resulted in an HTTP request being made to a port that expects " +
"to receive HTTPS requests. This could be the result of an incorrect redirect (such as to http://example.com:443/) " +
"or it could be the result of a webserver misconfiguration, such as trying to enable SSL on a port 80 virtualhost.",
Detail: strings.Join(res.DialStack, "\n"),
Severity: SeverityError,
})
}
return probs, nil
}
func noRecords(name, rrSummary string) Problem {
return Problem{
Name: "NoRecords",
Explanation: fmt.Sprintf(`No valid A or AAAA records could be ultimately resolved for %s. `+
`This means that Let's Encrypt would not be able to to connect to your domain to perform HTTP validation, since `+
`it would not know where to connect to.`, name),
Detail: rrSummary,
Severity: SeverityFatal,
}
}
func reservedAddress(name, address string) Problem {
return Problem{
Name: "ReservedAddress",
Explanation: fmt.Sprintf(`A private, inaccessible, IANA/IETF-reserved IP address was found for %s. Let's Encrypt will always fail HTTP validation `+
`for any domain that is pointing to an address that is not routable on the internet. You should either remove this address `+
`and replace it with a public one or use the DNS validation method instead.`, name),
Detail: address,
Severity: SeverityFatal,
}
}
func multipleIPAddressDiscrepancy(domain string, result1, result2 httpCheckResult) Problem {
return Problem{
Name: "MultipleIPAddressDiscrepancy",
Explanation: fmt.Sprintf(`%s has multiple IP addresses in its DNS records. While they appear to be accessible on the network, `+
`we have detected that they produce differing results when sent an ACME HTTP validation request. This may indicate that `+
`some of the IP addresses may unintentionally point to different servers, which would cause validation to fail.`,
domain),
Detail: fmt.Sprintf("%s vs %s", result1.String(), result2.String()),
Severity: SeverityWarning,
}
}
func isLikelyModemRouter(results []httpCheckResult) httpCheckResult {
for _, res := range results {
for _, toMatch := range likelyModemRouters {
if res.ServerHeader == toMatch {
return res
}
}
}
return httpCheckResult{}
}
func isLikelyNginxTestcookie(results []httpCheckResult) httpCheckResult {
for _, res := range results {
for _, needle := range isLikelyNginxTestcookiePayloads {
if bytes.Contains(res.Content, needle) {
return res
}
}
}
return httpCheckResult{}
}
func isHTTP497(results []httpCheckResult) httpCheckResult {
for _, res := range results {
for _, needle := range isHTTP497Payloads {
if bytes.Contains(res.Content, needle) {
return res
}
}
}
return httpCheckResult{}
}

View File

@@ -1,310 +0,0 @@
package letsdebug
import (
"context"
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
const (
httpTimeout = 10
)
type redirectError string
func (e redirectError) Error() string {
return string(e)
}
type httpCheckResult struct {
StatusCode int
ServerHeader string
IP net.IP
InitialStatusCode int
NumRedirects int
FirstDial time.Time
DialStack []string
Content []byte
}
func (r *httpCheckResult) Trace(s string) {
if r.FirstDial.IsZero() {
r.FirstDial = time.Now()
}
r.DialStack = append(r.DialStack,
fmt.Sprintf("@%dms: %s", time.Since(r.FirstDial).Nanoseconds()/1e6, s))
}
func (r httpCheckResult) IsZero() bool {
return r.StatusCode == 0
}
func (r httpCheckResult) String() string {
addrType := "IPv6"
if r.IP.To4() != nil {
addrType = "IPv4"
}
lines := []string{
"Address=" + r.IP.String(),
"Address Type=" + addrType,
"Server=" + r.ServerHeader,
"HTTP Status=" + strconv.Itoa(r.InitialStatusCode),
}
if r.NumRedirects > 0 {
lines = append(lines, "Number of Redirects="+strconv.Itoa(r.NumRedirects))
lines = append(lines, "Final HTTP Status="+strconv.Itoa(r.StatusCode))
}
return fmt.Sprintf("[%s]", strings.Join(lines, ","))
}
type checkHTTPTransport struct {
transport http.RoundTripper
result *httpCheckResult
}
func (t checkHTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) {
resp, err := t.transport.RoundTrip(req)
if t.result != nil && err != nil {
t.result.Trace(fmt.Sprintf("Experienced error: %v", err))
}
if t.result != nil && resp != nil {
if t.result.InitialStatusCode == 0 {
t.result.InitialStatusCode = resp.StatusCode
}
t.result.Trace(fmt.Sprintf("Server response: HTTP %s", resp.Status))
}
return resp, err
}
func makeSingleShotHTTPTransport() *http.Transport {
return &http.Transport{
// Boulder VA's HTTP transport settings
// https://github.com/letsencrypt/boulder/blob/387e94407c58fe0ff65207a89304776ee7417410/va/http.go#L143-L160
DisableKeepAlives: true,
IdleConnTimeout: time.Second,
TLSHandshakeTimeout: 10 * time.Second,
MaxIdleConns: 1,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
}
func checkHTTP(scanCtx *scanContext, domain string, address net.IP) (httpCheckResult, Problem) {
dialer := net.Dialer{
Timeout: httpTimeout * time.Second,
}
checkRes := &httpCheckResult{
IP: address,
DialStack: []string{},
}
var redirErr redirectError
baseHTTPTransport := makeSingleShotHTTPTransport()
baseHTTPTransport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
host, port, _ := net.SplitHostPort(addr)
host = normalizeFqdn(host)
dialFunc := func(ip net.IP, port string) (net.Conn, error) {
checkRes.Trace(fmt.Sprintf("Dialing %s", ip.String()))
if ip.To4() == nil {
return dialer.DialContext(ctx, "tcp", "["+ip.String()+"]:"+port)
}
return dialer.DialContext(ctx, "tcp", ip.String()+":"+port)
}
// Only override the address for this specific domain.
// We don't want to mangle redirects.
if host == domain {
return dialFunc(address, port)
}
// For other hosts, we need to use Unbound to resolve the name
otherAddr, err := scanCtx.LookupRandomHTTPRecord(host)
if err != nil {
return nil, err
}
return dialFunc(otherAddr, port)
}
cl := http.Client{
Transport: checkHTTPTransport{
result: checkRes,
transport: baseHTTPTransport,
},
// boulder: va.go fetchHTTP
CheckRedirect: func(req *http.Request, via []*http.Request) error {
checkRes.NumRedirects++
if len(via) >= 10 {
redirErr = redirectError(fmt.Sprintf("Too many (%d) redirects, last redirect was to: %s", len(via), req.URL.String()))
return redirErr
}
checkRes.Trace(fmt.Sprintf("Received redirect to %s", req.URL.String()))
host := req.URL.Host
if _, p, err := net.SplitHostPort(host); err == nil {
if port, _ := strconv.Atoi(p); port != 80 && port != 443 {
redirErr = redirectError(fmt.Sprintf("Bad port number provided when fetching %s: %s", req.URL.String(), p))
return redirErr
}
}
scheme := strings.ToLower(req.URL.Scheme)
if scheme != "http" && scheme != "https" {
redirErr = redirectError(fmt.Sprintf("Bad scheme provided when fetching %s: %s", req.URL.String(), scheme))
return redirErr
}
// Also check for domain.tld.well-known/acme-challenge
if strings.HasSuffix(req.URL.Hostname(), ".well-known") {
redirErr = redirectError(fmt.Sprintf("It appears that a redirect was generated by your web server that is missing a trailing "+
"slash after your domain name: %v. Check your web server configuration and .htaccess for Redirect/RedirectMatch/RewriteRule.",
req.URL.String()))
return redirErr
}
return nil
},
}
reqURL := "http://" + domain + "/.well-known/acme-challenge/" + scanCtx.httpRequestPath
checkRes.Trace(fmt.Sprintf("Making a request to %s (using initial IP %s)", reqURL, address))
req, err := http.NewRequest("GET", reqURL, nil)
if err != nil {
return *checkRes, internalProblem(fmt.Sprintf("Failed to construct validation request: %v", err), SeverityError)
}
req.Header.Set("Accept", "*/*")
req.Header.Set("User-Agent", "Mozilla/5.0 (compatible; Let's Debug emulating Let's Encrypt validation server; +https://letsdebug.net)")
ctx, cancel := context.WithTimeout(context.Background(), httpTimeout*time.Second)
defer cancel()
req = req.WithContext(ctx)
resp, err := cl.Do(req)
if resp != nil {
checkRes.StatusCode = resp.StatusCode
checkRes.ServerHeader = resp.Header.Get("Server")
}
if err != nil {
if redirErr != "" {
err = redirErr
}
return *checkRes, translateHTTPError(domain, address, err, checkRes.DialStack)
}
defer resp.Body.Close()
maxLen := 1024
if l := len(scanCtx.httpExpectResponse) + 2; l > maxLen {
maxLen = l
}
r := io.LimitReader(resp.Body, int64(maxLen))
buf, err := ioutil.ReadAll(r)
checkRes.Content = buf
// If we expect a certain response, check for it
if scanCtx.httpExpectResponse != "" {
if err != nil {
return *checkRes, translateHTTPError(domain, address,
fmt.Errorf(`This test expected the server to respond with "%s" but instead we experienced an error reading the response: %v`,
scanCtx.httpExpectResponse, err),
checkRes.DialStack)
} else if respStr := string(buf); respStr != scanCtx.httpExpectResponse {
return *checkRes, translateHTTPError(domain, address,
fmt.Errorf(`This test expected the server to respond with "%s" but instead we got a response beginning with "%s"`,
scanCtx.httpExpectResponse, respStr),
checkRes.DialStack)
}
}
return *checkRes, Problem{}
}
func translateHTTPError(domain string, address net.IP, e error, dialStack []string) Problem {
if redirErr, ok := e.(redirectError); ok {
return badRedirect(domain, redirErr, dialStack)
}
if strings.HasSuffix(e.Error(), "http: server gave HTTP response to HTTPS client") {
return httpServerMisconfiguration(domain, "Web server is serving the wrong protocol on the wrong port: "+e.Error()+
". This may be due to a previous HTTP redirect rather than a webserver misconfiguration.\n\nTrace:\n"+strings.Join(dialStack, "\n"))
}
// Make a nicer error message if it was a context timeout
if urlErr, ok := e.(*url.Error); ok && urlErr.Timeout() {
e = fmt.Errorf("A timeout was experienced while communicating with %s/%s: %v",
domain, address.String(), urlErr)
}
if address.To4() == nil {
return aaaaNotWorking(domain, address.String(), e, dialStack)
} else {
return aNotWorking(domain, address.String(), e, dialStack)
}
}
func httpServerMisconfiguration(domain, detail string) Problem {
return Problem{
Name: "WebserverMisconfiguration",
Explanation: fmt.Sprintf(`%s's webserver may be misconfigured.`, domain),
Detail: detail,
Severity: SeverityError,
}
}
func aaaaNotWorking(domain, ipv6Address string, err error, dialStack []string) Problem {
return Problem{
Name: "AAAANotWorking",
Explanation: fmt.Sprintf(`%s has an AAAA (IPv6) record (%s) but a test request to this address over port 80 did not succeed. `+
`Your web server must have at least one working IPv4 or IPv6 address. `+
`You should either ensure that validation requests to this domain succeed over IPv6, or remove its AAAA record.`,
domain, ipv6Address),
Detail: fmt.Sprintf("%s\n\nTrace:\n%s", err.Error(), strings.Join(dialStack, "\n")),
Severity: SeverityError,
}
}
func aNotWorking(domain, addr string, err error, dialStack []string) Problem {
return Problem{
Name: "ANotWorking",
Explanation: fmt.Sprintf(`%s has an A (IPv4) record (%s) but a request to this address over port 80 did not succeed. `+
`Your web server must have at least one working IPv4 or IPv6 address.`,
domain, addr),
Detail: fmt.Sprintf("%s\n\nTrace:\n%s", err.Error(), strings.Join(dialStack, "\n")),
Severity: SeverityError,
}
}
func badRedirect(domain string, err error, dialStack []string) Problem {
return Problem{
Name: "BadRedirect",
Explanation: fmt.Sprintf(`Sending an ACME HTTP validation request to %s results in an unacceptable redirect. `+
`This is most likely a misconfiguration of your web server or your web application.`,
domain),
Detail: fmt.Sprintf("%s\n\nTrace:\n%s", err.Error(), strings.Join(dialStack, "\n")),
Severity: SeverityError,
}
}

View File

@@ -1,85 +0,0 @@
// Package letsdebug provides an library, web API and CLI to provide diagnostic
// information for why a particular (FQDN, ACME Validation Method) pair *may* fail
// when attempting to issue an SSL Certificate from Let's Encrypt (https://letsencrypt.org).
//
// The usage cannot be generalized to other ACME providers, as the policies checked by this package
// are specific to Let's Encrypt, rather than being mandated by the ACME protocol.
//
// This package relies on libunbound.
package letsdebug
import (
"fmt"
"os"
"reflect"
"time"
)
// Options provide additional configuration to the various checkers
type Options struct {
// HTTPRequestPath alters the /.well-known/acme-challenge/letsdebug-test to
// /acme-challenge/acme-challenge/{{ HTTPRequestPath }}
HTTPRequestPath string
// HTTPExpectResponse causes the HTTP checker to require the remote server to
// respond with specific content. If the content does not match, then the test
// will fail with severity Error.
HTTPExpectResponse string
}
// Check calls CheckWithOptions with default options
func Check(domain string, method ValidationMethod) (probs []Problem, retErr error) {
return CheckWithOptions(domain, method, Options{})
}
// CheckWithOptions will run each checker against the domain and validation method provided.
// It is expected that this method may take a long time to execute, and may not be cancelled.
func CheckWithOptions(domain string, method ValidationMethod, opts Options) (probs []Problem, retErr error) {
defer func() {
if r := recover(); r != nil {
retErr = fmt.Errorf("panic: %v", r)
}
}()
ctx := newScanContext()
if opts.HTTPRequestPath != "" {
ctx.httpRequestPath = opts.HTTPRequestPath
}
if opts.HTTPExpectResponse != "" {
ctx.httpExpectResponse = opts.HTTPExpectResponse
}
domain = normalizeFqdn(domain)
for _, checker := range checkers {
t := reflect.TypeOf(checker)
debug("[*] + %v\n", t)
start := time.Now()
checkerProbs, err := checker.Check(ctx, domain, method)
debug("[*] - %v in %v\n", t, time.Since(start))
if err == nil {
if len(checkerProbs) > 0 {
probs = append(probs, checkerProbs...)
}
// dont continue checking when a fatal error occurs
if hasFatalProblem(probs) {
break
}
} else if err != errNotApplicable {
return nil, err
}
}
return probs, nil
}
var isDebug *bool
func debug(format string, args ...interface{}) {
if isDebug == nil {
d := os.Getenv("LETSDEBUG_DEBUG") != ""
isDebug = &d
}
if !(*isDebug) {
return
}
fmt.Fprintf(os.Stderr, format, args...)
}

View File

@@ -1,75 +0,0 @@
package letsdebug
import (
"fmt"
"strings"
)
// SeverityLevel represents the priority of a reported problem
type SeverityLevel string
// Problem represents an issue found by one of the checkers in this package.
// Explanation is a human-readable explanation of the issue.
// Detail is usually the underlying machine error.
type Problem struct {
Name string `json:"name"`
Explanation string `json:"explanation"`
Detail string `json:"detail"`
Severity SeverityLevel `json:"severity"`
}
const (
SeverityFatal SeverityLevel = "Fatal" // Represents a fatal error which will stop any further checks
SeverityError SeverityLevel = "Error"
SeverityWarning SeverityLevel = "Warning"
SeverityDebug SeverityLevel = "Debug" // Not to be shown by default
)
func (p Problem) String() string {
return fmt.Sprintf("[%s] %s: %s", p.Name, p.Explanation, p.Detail)
}
func (p Problem) IsZero() bool {
return p.Name == ""
}
func (p Problem) DetailLines() []string {
return strings.Split(p.Detail, "\n")
}
func hasFatalProblem(probs []Problem) bool {
for _, p := range probs {
if p.Severity == SeverityFatal {
return true
}
}
return false
}
func internalProblem(message string, level SeverityLevel) Problem {
return Problem{
Name: "InternalProblem",
Explanation: "An internal error occurred while checking the domain",
Detail: message,
Severity: level,
}
}
func dnsLookupFailed(name, rrType string, err error) Problem {
return Problem{
Name: "DNSLookupFailed",
Explanation: fmt.Sprintf(`A fatal issue occurred during the DNS lookup process for %s/%s.`, name, rrType),
Detail: err.Error(),
Severity: SeverityFatal,
}
}
func debugProblem(name, message, detail string) Problem {
return Problem{
Name: name,
Explanation: message,
Detail: detail,
Severity: SeverityDebug,
}
}

View File

@@ -1,4 +0,0 @@
.db
*.test
*~
*.swp

73
vendor/github.com/lib/pq/.travis.sh generated vendored
View File

@@ -1,73 +0,0 @@
#!/bin/bash
set -eu
client_configure() {
sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key
}
pgdg_repository() {
local sourcelist='sources.list.d/postgresql.list'
curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add -
echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist"
sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update
}
postgresql_configure() {
sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config
local all all trust
hostnossl all pqgossltest 127.0.0.1/32 reject
hostnossl all pqgosslcert 127.0.0.1/32 reject
hostssl all pqgossltest 127.0.0.1/32 trust
hostssl all pqgosslcert 127.0.0.1/32 cert
host all all 127.0.0.1/32 trust
hostnossl all pqgossltest ::1/128 reject
hostnossl all pqgosslcert ::1/128 reject
hostssl all pqgossltest ::1/128 trust
hostssl all pqgosslcert ::1/128 cert
host all all ::1/128 trust
config
xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates
certs/root.crt
certs/server.crt
certs/server.key
certificates
sort -VCu <<-versions ||
$PGVERSION
9.2
versions
sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config
ssl_ca_file = 'root.crt'
ssl_cert_file = 'server.crt'
ssl_key_file = 'server.key'
config
echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null
sudo service postgresql restart
}
postgresql_install() {
xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages
postgresql-$PGVERSION
postgresql-server-dev-$PGVERSION
postgresql-contrib-$PGVERSION
packages
}
postgresql_uninstall() {
sudo service postgresql stop
xargs sudo apt-get -y --purge remove <<-packages
libpq-dev
libpq5
postgresql
postgresql-client-common
postgresql-common
packages
sudo rm -rf /var/lib/postgresql
}
$1

44
vendor/github.com/lib/pq/.travis.yml generated vendored
View File

@@ -1,44 +0,0 @@
language: go
go:
- 1.13.x
- 1.14.x
- master
sudo: true
env:
global:
- PGUSER=postgres
- PQGOSSLTESTS=1
- PQSSLCERTTEST_PATH=$PWD/certs
- PGHOST=127.0.0.1
matrix:
- PGVERSION=10
- PGVERSION=9.6
- PGVERSION=9.5
- PGVERSION=9.4
before_install:
- ./.travis.sh postgresql_uninstall
- ./.travis.sh pgdg_repository
- ./.travis.sh postgresql_install
- ./.travis.sh postgresql_configure
- ./.travis.sh client_configure
- go get golang.org/x/tools/cmd/goimports
- go get golang.org/x/lint/golint
- GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2020.1.3
before_script:
- createdb pqgotest
- createuser -DRS pqgossltest
- createuser -DRS pqgosslcert
script:
- >
goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }'
- go vet ./...
- staticcheck -go 1.13 ./...
- golint ./...
- PQTEST_BINARY_PARAMETERS=no go test -race -v ./...
- PQTEST_BINARY_PARAMETERS=yes go test -race -v ./...

View File

@@ -1,8 +0,0 @@
Copyright (c) 2011-2013, 'pq' Contributors
Portions Copyright (C) 2011 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

30
vendor/github.com/lib/pq/README.md generated vendored
View File

@@ -1,30 +0,0 @@
# pq - A pure Go postgres driver for Go's database/sql package
[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc)
## Install
go get github.com/lib/pq
## Features
* SSL
* Handles bad connections for `database/sql`
* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`)
* Scan binary blobs correctly (i.e. `bytea`)
* Package for `hstore` support
* COPY FROM support
* pq.ParseURL for converting urls to connection strings for sql.Open.
* Many libpq compatible environment variables
* Unix socket support
* Notifications: `LISTEN`/`NOTIFY`
* pgpass support
* GSS (Kerberos) auth
## Tests
`go test` is used for testing. See [TESTS.md](TESTS.md) for more details.
## Status
This package is effectively in maintenance mode and is not actively developed. Small patches and features are only rarely reviewed and merged. We recommend using [pgx](https://github.com/jackc/pgx) which is actively maintained.

33
vendor/github.com/lib/pq/TESTS.md generated vendored
View File

@@ -1,33 +0,0 @@
# Tests
## Running Tests
`go test` is used for testing. A running PostgreSQL
server is required, with the ability to log in. The
database to connect to test with is "pqgotest," on
"localhost" but these can be overridden using [environment
variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html).
Example:
PGHOST=/run/postgresql go test
## Benchmarks
A benchmark suite can be run as part of the tests:
go test -bench .
## Example setup (Docker)
Run a postgres container:
```
docker run --expose 5432:5432 postgres
```
Run tests:
```
PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test
```

756
vendor/github.com/lib/pq/array.go generated vendored
View File

@@ -1,756 +0,0 @@
package pq
import (
"bytes"
"database/sql"
"database/sql/driver"
"encoding/hex"
"fmt"
"reflect"
"strconv"
"strings"
)
var typeByteSlice = reflect.TypeOf([]byte{})
var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
// Array returns the optimal driver.Valuer and sql.Scanner for an array or
// slice of any dimension.
//
// For example:
// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401}))
//
// var x []sql.NullInt64
// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x))
//
// Scanning multi-dimensional arrays is not supported. Arrays where the lower
// bound is not one (such as `[0:0]={1}') are not supported.
func Array(a interface{}) interface {
driver.Valuer
sql.Scanner
} {
switch a := a.(type) {
case []bool:
return (*BoolArray)(&a)
case []float64:
return (*Float64Array)(&a)
case []int64:
return (*Int64Array)(&a)
case []string:
return (*StringArray)(&a)
case *[]bool:
return (*BoolArray)(a)
case *[]float64:
return (*Float64Array)(a)
case *[]int64:
return (*Int64Array)(a)
case *[]string:
return (*StringArray)(a)
}
return GenericArray{a}
}
// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner
// to override the array delimiter used by GenericArray.
type ArrayDelimiter interface {
// ArrayDelimiter returns the delimiter character(s) for this element's type.
ArrayDelimiter() string
}
// BoolArray represents a one-dimensional array of the PostgreSQL boolean type.
type BoolArray []bool
// Scan implements the sql.Scanner interface.
func (a *BoolArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to BoolArray", src)
}
func (a *BoolArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "BoolArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(BoolArray, len(elems))
for i, v := range elems {
if len(v) != 1 {
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
switch v[0] {
case 't':
b[i] = true
case 'f':
b[i] = false
default:
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a BoolArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be exactly two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1+2*n)
for i := 0; i < n; i++ {
b[2*i] = ','
if a[i] {
b[1+2*i] = 't'
} else {
b[1+2*i] = 'f'
}
}
b[0] = '{'
b[2*n] = '}'
return string(b), nil
}
return "{}", nil
}
// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type.
type ByteaArray [][]byte
// Scan implements the sql.Scanner interface.
func (a *ByteaArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to ByteaArray", src)
}
func (a *ByteaArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "ByteaArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(ByteaArray, len(elems))
for i, v := range elems {
b[i], err = parseBytea(v)
if err != nil {
return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error())
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface. It uses the "hex" format which
// is only supported on PostgreSQL 9.0 or newer.
func (a ByteaArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// 3*N bytes of hex formatting, and N-1 bytes of delimiters.
size := 1 + 6*n
for _, x := range a {
size += hex.EncodedLen(len(x))
}
b := make([]byte, size)
for i, s := 0, b; i < n; i++ {
o := copy(s, `,"\\x`)
o += hex.Encode(s[o:], a[i])
s[o] = '"'
s = s[o+1:]
}
b[0] = '{'
b[size-1] = '}'
return string(b), nil
}
return "{}", nil
}
// Float64Array represents a one-dimensional array of the PostgreSQL double
// precision type.
type Float64Array []float64
// Scan implements the sql.Scanner interface.
func (a *Float64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to Float64Array", src)
}
func (a *Float64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Float64Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Float64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseFloat(string(v), 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Float64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendFloat(b, a[0], 'f', -1, 64)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendFloat(b, a[i], 'f', -1, 64)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// GenericArray implements the driver.Valuer and sql.Scanner interfaces for
// an array or slice of any dimension.
type GenericArray struct{ A interface{} }
func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) {
var assign func([]byte, reflect.Value) error
var del = ","
// TODO calculate the assign function for other types
// TODO repeat this section on the element type of arrays or slices (multidimensional)
{
if reflect.PtrTo(rt).Implements(typeSQLScanner) {
// dest is always addressable because it is an element of a slice.
assign = func(src []byte, dest reflect.Value) (err error) {
ss := dest.Addr().Interface().(sql.Scanner)
if src == nil {
err = ss.Scan(nil)
} else {
err = ss.Scan(src)
}
return
}
goto FoundType
}
assign = func([]byte, reflect.Value) error {
return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt)
}
}
FoundType:
if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}
return rt, assign, del
}
// Scan implements the sql.Scanner interface.
func (a GenericArray) Scan(src interface{}) error {
dpv := reflect.ValueOf(a.A)
switch {
case dpv.Kind() != reflect.Ptr:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
case dpv.IsNil():
return fmt.Errorf("pq: destination %T is nil", a.A)
}
dv := dpv.Elem()
switch dv.Kind() {
case reflect.Slice:
case reflect.Array:
default:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
}
switch src := src.(type) {
case []byte:
return a.scanBytes(src, dv)
case string:
return a.scanBytes([]byte(src), dv)
case nil:
if dv.Kind() == reflect.Slice {
dv.Set(reflect.Zero(dv.Type()))
return nil
}
}
return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type())
}
func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error {
dtype, assign, del := a.evaluateDestination(dv.Type().Elem())
dims, elems, err := parseArray(src, []byte(del))
if err != nil {
return err
}
// TODO allow multidimensional
if len(dims) > 1 {
return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented",
strings.Replace(fmt.Sprint(dims), " ", "][", -1))
}
// Treat a zero-dimensional array like an array with a single dimension of zero.
if len(dims) == 0 {
dims = append(dims, 0)
}
for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() {
switch rt.Kind() {
case reflect.Slice:
case reflect.Array:
if rt.Len() != dims[i] {
return fmt.Errorf("pq: cannot convert ARRAY%s to %s",
strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type())
}
default:
// TODO handle multidimensional
}
}
values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems))
for i, e := range elems {
if err := assign(e, values.Index(i)); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
// TODO handle multidimensional
switch dv.Kind() {
case reflect.Slice:
dv.Set(values.Slice(0, dims[0]))
case reflect.Array:
for i := 0; i < dims[0]; i++ {
dv.Index(i).Set(values.Index(i))
}
}
return nil
}
// Value implements the driver.Valuer interface.
func (a GenericArray) Value() (driver.Value, error) {
if a.A == nil {
return nil, nil
}
rv := reflect.ValueOf(a.A)
switch rv.Kind() {
case reflect.Slice:
if rv.IsNil() {
return nil, nil
}
case reflect.Array:
default:
return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A)
}
if n := rv.Len(); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 0, 1+2*n)
b, _, err := appendArray(b, rv, n)
return string(b), err
}
return "{}", nil
}
// Int64Array represents a one-dimensional array of the PostgreSQL integer types.
type Int64Array []int64
// Scan implements the sql.Scanner interface.
func (a *Int64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to Int64Array", src)
}
func (a *Int64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Int64Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Int64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Int64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendInt(b, a[0], 10)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendInt(b, a[i], 10)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// StringArray represents a one-dimensional array of the PostgreSQL character types.
type StringArray []string
// Scan implements the sql.Scanner interface.
func (a *StringArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to StringArray", src)
}
func (a *StringArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "StringArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(StringArray, len(elems))
for i, v := range elems {
if b[i] = string(v); v == nil {
return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a StringArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+3*n)
b[0] = '{'
b = appendArrayQuotedBytes(b, []byte(a[0]))
for i := 1; i < n; i++ {
b = append(b, ',')
b = appendArrayQuotedBytes(b, []byte(a[i]))
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// appendArray appends rv to the buffer, returning the extended buffer and
// the delimiter used between elements.
//
// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice.
func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) {
var del string
var err error
b = append(b, '{')
if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil {
return b, del, err
}
for i := 1; i < n; i++ {
b = append(b, del...)
if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil {
return b, del, err
}
}
return append(b, '}'), del, nil
}
// appendArrayElement appends rv to the buffer, returning the extended buffer
// and the delimiter to use before the next element.
//
// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted
// using driver.DefaultParameterConverter and the resulting []byte or string
// is double-quoted.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) {
if k := rv.Kind(); k == reflect.Array || k == reflect.Slice {
if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) {
if n := rv.Len(); n > 0 {
return appendArray(b, rv, n)
}
return b, "", nil
}
}
var del = ","
var err error
var iv interface{} = rv.Interface()
if ad, ok := iv.(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}
if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil {
return b, del, err
}
switch v := iv.(type) {
case nil:
return append(b, "NULL"...), del, nil
case []byte:
return appendArrayQuotedBytes(b, v), del, nil
case string:
return appendArrayQuotedBytes(b, []byte(v)), del, nil
}
b, err = appendValue(b, iv)
return b, del, err
}
func appendArrayQuotedBytes(b, v []byte) []byte {
b = append(b, '"')
for {
i := bytes.IndexAny(v, `"\`)
if i < 0 {
b = append(b, v...)
break
}
if i > 0 {
b = append(b, v[:i]...)
}
b = append(b, '\\', v[i])
v = v[i+1:]
}
return append(b, '"')
}
func appendValue(b []byte, v driver.Value) ([]byte, error) {
return append(b, encode(nil, v, 0)...), nil
}
// parseArray extracts the dimensions and elements of an array represented in
// text format. Only representations emitted by the backend are supported.
// Notably, whitespace around brackets and delimiters is significant, and NULL
// is case-sensitive.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) {
var depth, i int
if len(src) < 1 || src[0] != '{' {
return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0)
}
Open:
for i < len(src) {
switch src[i] {
case '{':
depth++
i++
case '}':
elems = make([][]byte, 0)
goto Close
default:
break Open
}
}
dims = make([]int, i)
Element:
for i < len(src) {
switch src[i] {
case '{':
if depth == len(dims) {
break Element
}
depth++
dims[depth-1] = 0
i++
case '"':
var elem = []byte{}
var escape bool
for i++; i < len(src); i++ {
if escape {
elem = append(elem, src[i])
escape = false
} else {
switch src[i] {
default:
elem = append(elem, src[i])
case '\\':
escape = true
case '"':
elems = append(elems, elem)
i++
break Element
}
}
}
default:
for start := i; i < len(src); i++ {
if bytes.HasPrefix(src[i:], del) || src[i] == '}' {
elem := src[start:i]
if len(elem) == 0 {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
if bytes.Equal(elem, []byte("NULL")) {
elem = nil
}
elems = append(elems, elem)
break Element
}
}
}
}
for i < len(src) {
if bytes.HasPrefix(src[i:], del) && depth > 0 {
dims[depth-1]++
i += len(del)
goto Element
} else if src[i] == '}' && depth > 0 {
dims[depth-1]++
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
Close:
for i < len(src) {
if src[i] == '}' && depth > 0 {
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
if depth > 0 {
err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i)
}
if err == nil {
for _, d := range dims {
if (len(elems) % d) != 0 {
err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions")
}
}
}
return
}
func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) {
dims, elems, err := parseArray(src, del)
if err != nil {
return nil, err
}
if len(dims) > 1 {
return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ)
}
return elems, err
}

91
vendor/github.com/lib/pq/buf.go generated vendored
View File

@@ -1,91 +0,0 @@
package pq
import (
"bytes"
"encoding/binary"
"github.com/lib/pq/oid"
)
type readBuf []byte
func (b *readBuf) int32() (n int) {
n = int(int32(binary.BigEndian.Uint32(*b)))
*b = (*b)[4:]
return
}
func (b *readBuf) oid() (n oid.Oid) {
n = oid.Oid(binary.BigEndian.Uint32(*b))
*b = (*b)[4:]
return
}
// N.B: this is actually an unsigned 16-bit integer, unlike int32
func (b *readBuf) int16() (n int) {
n = int(binary.BigEndian.Uint16(*b))
*b = (*b)[2:]
return
}
func (b *readBuf) string() string {
i := bytes.IndexByte(*b, 0)
if i < 0 {
errorf("invalid message format; expected string terminator")
}
s := (*b)[:i]
*b = (*b)[i+1:]
return string(s)
}
func (b *readBuf) next(n int) (v []byte) {
v = (*b)[:n]
*b = (*b)[n:]
return
}
func (b *readBuf) byte() byte {
return b.next(1)[0]
}
type writeBuf struct {
buf []byte
pos int
}
func (b *writeBuf) int32(n int) {
x := make([]byte, 4)
binary.BigEndian.PutUint32(x, uint32(n))
b.buf = append(b.buf, x...)
}
func (b *writeBuf) int16(n int) {
x := make([]byte, 2)
binary.BigEndian.PutUint16(x, uint16(n))
b.buf = append(b.buf, x...)
}
func (b *writeBuf) string(s string) {
b.buf = append(append(b.buf, s...), '\000')
}
func (b *writeBuf) byte(c byte) {
b.buf = append(b.buf, c)
}
func (b *writeBuf) bytes(v []byte) {
b.buf = append(b.buf, v...)
}
func (b *writeBuf) wrap() []byte {
p := b.buf[b.pos:]
binary.BigEndian.PutUint32(p, uint32(len(p)))
return b.buf
}
func (b *writeBuf) next(c byte) {
p := b.buf[b.pos:]
binary.BigEndian.PutUint32(p, uint32(len(p)))
b.pos = len(b.buf) + 1
b.buf = append(b.buf, c, 0, 0, 0, 0)
}

1996
vendor/github.com/lib/pq/conn.go generated vendored

File diff suppressed because it is too large Load Diff

149
vendor/github.com/lib/pq/conn_go18.go generated vendored
View File

@@ -1,149 +0,0 @@
package pq
import (
"context"
"database/sql"
"database/sql/driver"
"fmt"
"io"
"io/ioutil"
"time"
)
// Implement the "QueryerContext" interface
func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
list := make([]driver.Value, len(args))
for i, nv := range args {
list[i] = nv.Value
}
finish := cn.watchCancel(ctx)
r, err := cn.query(query, list)
if err != nil {
if finish != nil {
finish()
}
return nil, err
}
r.finish = finish
return r, nil
}
// Implement the "ExecerContext" interface
func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
list := make([]driver.Value, len(args))
for i, nv := range args {
list[i] = nv.Value
}
if finish := cn.watchCancel(ctx); finish != nil {
defer finish()
}
return cn.Exec(query, list)
}
// Implement the "ConnBeginTx" interface
func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
var mode string
switch sql.IsolationLevel(opts.Isolation) {
case sql.LevelDefault:
// Don't touch mode: use the server's default
case sql.LevelReadUncommitted:
mode = " ISOLATION LEVEL READ UNCOMMITTED"
case sql.LevelReadCommitted:
mode = " ISOLATION LEVEL READ COMMITTED"
case sql.LevelRepeatableRead:
mode = " ISOLATION LEVEL REPEATABLE READ"
case sql.LevelSerializable:
mode = " ISOLATION LEVEL SERIALIZABLE"
default:
return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation)
}
if opts.ReadOnly {
mode += " READ ONLY"
} else {
mode += " READ WRITE"
}
tx, err := cn.begin(mode)
if err != nil {
return nil, err
}
cn.txnFinish = cn.watchCancel(ctx)
return tx, nil
}
func (cn *conn) Ping(ctx context.Context) error {
if finish := cn.watchCancel(ctx); finish != nil {
defer finish()
}
rows, err := cn.simpleQuery(";")
if err != nil {
return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger
}
rows.Close()
return nil
}
func (cn *conn) watchCancel(ctx context.Context) func() {
if done := ctx.Done(); done != nil {
finished := make(chan struct{})
go func() {
select {
case <-done:
// At this point the function level context is canceled,
// so it must not be used for the additional network
// request to cancel the query.
// Create a new context to pass into the dial.
ctxCancel, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
_ = cn.cancel(ctxCancel)
finished <- struct{}{}
case <-finished:
}
}()
return func() {
select {
case <-finished:
case finished <- struct{}{}:
}
}
}
return nil
}
func (cn *conn) cancel(ctx context.Context) error {
c, err := dial(ctx, cn.dialer, cn.opts)
if err != nil {
return err
}
defer c.Close()
{
can := conn{
c: c,
}
err = can.ssl(cn.opts)
if err != nil {
return err
}
w := can.writeBuf(0)
w.int32(80877102) // cancel request code
w.int32(cn.processID)
w.int32(cn.secretKey)
if err := can.sendStartupPacket(w); err != nil {
return err
}
}
// Read until EOF to ensure that the server received the cancel.
{
_, err := io.Copy(ioutil.Discard, c)
return err
}
}

115
vendor/github.com/lib/pq/connector.go generated vendored
View File

@@ -1,115 +0,0 @@
package pq
import (
"context"
"database/sql/driver"
"errors"
"fmt"
"os"
"strings"
)
// Connector represents a fixed configuration for the pq driver with a given
// name. Connector satisfies the database/sql/driver Connector interface and
// can be used to create any number of DB Conn's via the database/sql OpenDB
// function.
//
// See https://golang.org/pkg/database/sql/driver/#Connector.
// See https://golang.org/pkg/database/sql/#OpenDB.
type Connector struct {
opts values
dialer Dialer
}
// Connect returns a connection to the database using the fixed configuration
// of this Connector. Context is not used.
func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) {
return c.open(ctx)
}
// Driver returns the underlying driver of this Connector.
func (c *Connector) Driver() driver.Driver {
return &Driver{}
}
// NewConnector returns a connector for the pq driver in a fixed configuration
// with the given dsn. The returned connector can be used to create any number
// of equivalent Conn's. The returned connector is intended to be used with
// database/sql.OpenDB.
//
// See https://golang.org/pkg/database/sql/driver/#Connector.
// See https://golang.org/pkg/database/sql/#OpenDB.
func NewConnector(dsn string) (*Connector, error) {
var err error
o := make(values)
// A number of defaults are applied here, in this order:
//
// * Very low precedence defaults applied in every situation
// * Environment variables
// * Explicitly passed connection information
o["host"] = "localhost"
o["port"] = "5432"
// N.B.: Extra float digits should be set to 3, but that breaks
// Postgres 8.4 and older, where the max is 2.
o["extra_float_digits"] = "2"
for k, v := range parseEnviron(os.Environ()) {
o[k] = v
}
if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") {
dsn, err = ParseURL(dsn)
if err != nil {
return nil, err
}
}
if err := parseOpts(dsn, o); err != nil {
return nil, err
}
// Use the "fallback" application name if necessary
if fallback, ok := o["fallback_application_name"]; ok {
if _, ok := o["application_name"]; !ok {
o["application_name"] = fallback
}
}
// We can't work with any client_encoding other than UTF-8 currently.
// However, we have historically allowed the user to set it to UTF-8
// explicitly, and there's no reason to break such programs, so allow that.
// Note that the "options" setting could also set client_encoding, but
// parsing its value is not worth it. Instead, we always explicitly send
// client_encoding as a separate run-time parameter, which should override
// anything set in options.
if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) {
return nil, errors.New("client_encoding must be absent or 'UTF8'")
}
o["client_encoding"] = "UTF8"
// DateStyle needs a similar treatment.
if datestyle, ok := o["datestyle"]; ok {
if datestyle != "ISO, MDY" {
return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle)
}
} else {
o["datestyle"] = "ISO, MDY"
}
// If a user is not provided by any other means, the last
// resort is to use the current operating system provided user
// name.
if _, ok := o["user"]; !ok {
u, err := userCurrent()
if err != nil {
return nil, err
}
o["user"] = u
}
// SSL is not necessary or supported over UNIX domain sockets
if network, _ := network(o); network == "unix" {
o["sslmode"] = "disable"
}
return &Connector{opts: o, dialer: defaultDialer{}}, nil
}

307
vendor/github.com/lib/pq/copy.go generated vendored
View File

@@ -1,307 +0,0 @@
package pq
import (
"database/sql/driver"
"encoding/binary"
"errors"
"fmt"
"sync"
)
var (
errCopyInClosed = errors.New("pq: copyin statement has already been closed")
errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
errCopyInProgress = errors.New("pq: COPY in progress")
)
// CopyIn creates a COPY FROM statement which can be prepared with
// Tx.Prepare(). The target table should be visible in search_path.
func CopyIn(table string, columns ...string) string {
stmt := "COPY " + QuoteIdentifier(table) + " ("
for i, col := range columns {
if i != 0 {
stmt += ", "
}
stmt += QuoteIdentifier(col)
}
stmt += ") FROM STDIN"
return stmt
}
// CopyInSchema creates a COPY FROM statement which can be prepared with
// Tx.Prepare().
func CopyInSchema(schema, table string, columns ...string) string {
stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
for i, col := range columns {
if i != 0 {
stmt += ", "
}
stmt += QuoteIdentifier(col)
}
stmt += ") FROM STDIN"
return stmt
}
type copyin struct {
cn *conn
buffer []byte
rowData chan []byte
done chan bool
driver.Result
closed bool
sync.Mutex // guards err
err error
}
const ciBufferSize = 64 * 1024
// flush buffer before the buffer is filled up and needs reallocation
const ciBufferFlushSize = 63 * 1024
func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
if !cn.isInTransaction() {
return nil, errCopyNotSupportedOutsideTxn
}
ci := &copyin{
cn: cn,
buffer: make([]byte, 0, ciBufferSize),
rowData: make(chan []byte),
done: make(chan bool, 1),
}
// add CopyData identifier + 4 bytes for message length
ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)
b := cn.writeBuf('Q')
b.string(q)
cn.send(b)
awaitCopyInResponse:
for {
t, r := cn.recv1()
switch t {
case 'G':
if r.byte() != 0 {
err = errBinaryCopyNotSupported
break awaitCopyInResponse
}
go ci.resploop()
return ci, nil
case 'H':
err = errCopyToNotSupported
break awaitCopyInResponse
case 'E':
err = parseError(r)
case 'Z':
if err == nil {
ci.setBad()
errorf("unexpected ReadyForQuery in response to COPY")
}
cn.processReadyForQuery(r)
return nil, err
default:
ci.setBad()
errorf("unknown response for copy query: %q", t)
}
}
// something went wrong, abort COPY before we return
b = cn.writeBuf('f')
b.string(err.Error())
cn.send(b)
for {
t, r := cn.recv1()
switch t {
case 'c', 'C', 'E':
case 'Z':
// correctly aborted, we're done
cn.processReadyForQuery(r)
return nil, err
default:
ci.setBad()
errorf("unknown response for CopyFail: %q", t)
}
}
}
func (ci *copyin) flush(buf []byte) {
// set message length (without message identifier)
binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
_, err := ci.cn.c.Write(buf)
if err != nil {
panic(err)
}
}
func (ci *copyin) resploop() {
for {
var r readBuf
t, err := ci.cn.recvMessage(&r)
if err != nil {
ci.setBad()
ci.setError(err)
ci.done <- true
return
}
switch t {
case 'C':
// complete
res, _ := ci.cn.parseComplete(r.string())
ci.setResult(res)
case 'N':
if n := ci.cn.noticeHandler; n != nil {
n(parseError(&r))
}
case 'Z':
ci.cn.processReadyForQuery(&r)
ci.done <- true
return
case 'E':
err := parseError(&r)
ci.setError(err)
default:
ci.setBad()
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
ci.done <- true
return
}
}
}
func (ci *copyin) setBad() {
ci.Lock()
ci.cn.bad = true
ci.Unlock()
}
func (ci *copyin) isBad() bool {
ci.Lock()
b := ci.cn.bad
ci.Unlock()
return b
}
func (ci *copyin) isErrorSet() bool {
ci.Lock()
isSet := (ci.err != nil)
ci.Unlock()
return isSet
}
// setError() sets ci.err if one has not been set already. Caller must not be
// holding ci.Mutex.
func (ci *copyin) setError(err error) {
ci.Lock()
if ci.err == nil {
ci.err = err
}
ci.Unlock()
}
func (ci *copyin) setResult(result driver.Result) {
ci.Lock()
ci.Result = result
ci.Unlock()
}
func (ci *copyin) getResult() driver.Result {
ci.Lock()
result := ci.Result
if result == nil {
return driver.RowsAffected(0)
}
ci.Unlock()
return result
}
func (ci *copyin) NumInput() int {
return -1
}
func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
return nil, ErrNotSupported
}
// Exec inserts values into the COPY stream. The insert is asynchronous
// and Exec can return errors from previous Exec calls to the same
// COPY stmt.
//
// You need to call Exec(nil) to sync the COPY stream and to get any
// errors from pending data, since Stmt.Close() doesn't return errors
// to the user.
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
if ci.closed {
return nil, errCopyInClosed
}
if ci.isBad() {
return nil, driver.ErrBadConn
}
defer ci.cn.errRecover(&err)
if ci.isErrorSet() {
return nil, ci.err
}
if len(v) == 0 {
if err := ci.Close(); err != nil {
return driver.RowsAffected(0), err
}
return ci.getResult(), nil
}
numValues := len(v)
for i, value := range v {
ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
if i < numValues-1 {
ci.buffer = append(ci.buffer, '\t')
}
}
ci.buffer = append(ci.buffer, '\n')
if len(ci.buffer) > ciBufferFlushSize {
ci.flush(ci.buffer)
// reset buffer, keep bytes for message identifier and length
ci.buffer = ci.buffer[:5]
}
return driver.RowsAffected(0), nil
}
func (ci *copyin) Close() (err error) {
if ci.closed { // Don't do anything, we're already closed
return nil
}
ci.closed = true
if ci.isBad() {
return driver.ErrBadConn
}
defer ci.cn.errRecover(&err)
if len(ci.buffer) > 0 {
ci.flush(ci.buffer)
}
// Avoid touching the scratch buffer as resploop could be using it.
err = ci.cn.sendSimpleMessage('c')
if err != nil {
return err
}
<-ci.done
ci.cn.inCopy = false
if ci.isErrorSet() {
err = ci.err
return err
}
return nil
}

268
vendor/github.com/lib/pq/doc.go generated vendored
View File

@@ -1,268 +0,0 @@
/*
Package pq is a pure Go Postgres driver for the database/sql package.
In most cases clients will use the database/sql package instead of
using this package directly. For example:
import (
"database/sql"
_ "github.com/lib/pq"
)
func main() {
connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full"
db, err := sql.Open("postgres", connStr)
if err != nil {
log.Fatal(err)
}
age := 21
rows, err := db.Query("SELECT name FROM users WHERE age = $1", age)
}
You can also connect to a database using a URL. For example:
connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full"
db, err := sql.Open("postgres", connStr)
Connection String Parameters
Similarly to libpq, when establishing a connection using pq you are expected to
supply a connection string containing zero or more parameters.
A subset of the connection parameters supported by libpq are also supported by pq.
Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem)
directly in the connection string. This is different from libpq, which does not allow
run-time parameters in the connection string, instead requiring you to supply
them in the options parameter.
For compatibility with libpq, the following special connection parameters are
supported:
* dbname - The name of the database to connect to
* user - The user to sign in as
* password - The user's password
* host - The host to connect to. Values that start with / are for unix
domain sockets. (default is localhost)
* port - The port to bind to. (default is 5432)
* sslmode - Whether or not to use SSL (default is require, this is not
the default for libpq)
* fallback_application_name - An application_name to fall back to if one isn't provided.
* connect_timeout - Maximum wait for connection, in seconds. Zero or
not specified means wait indefinitely.
* sslcert - Cert file location. The file must contain PEM encoded data.
* sslkey - Key file location. The file must contain PEM encoded data.
* sslrootcert - The location of the root certificate file. The file
must contain PEM encoded data.
Valid values for sslmode are:
* disable - No SSL
* require - Always SSL (skip verification)
* verify-ca - Always SSL (verify that the certificate presented by the
server was signed by a trusted CA)
* verify-full - Always SSL (verify that the certification presented by
the server was signed by a trusted CA and the server host name
matches the one in the certificate)
See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
for more information about connection string parameters.
Use single quotes for values that contain whitespace:
"user=pqgotest password='with spaces'"
A backslash will escape the next character in values:
"user=space\ man password='it\'s valid'"
Note that the connection parameter client_encoding (which sets the
text encoding for the connection) may be set but must be "UTF8",
matching with the same rules as Postgres. It is an error to provide
any other value.
In addition to the parameters listed above, any run-time parameter that can be
set at backend start time can be set in the connection string. For more
information, see
http://www.postgresql.org/docs/current/static/runtime-config.html.
Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html
supported by libpq are also supported by pq. If any of the environment
variables not supported by pq are set, pq will panic during connection
establishment. Environment variables have a lower precedence than explicitly
provided connection parameters.
The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html
is supported, but on Windows PGPASSFILE must be specified explicitly.
Queries
database/sql does not dictate any specific format for parameter
markers in query strings, and pq uses the Postgres-native ordinal markers,
as shown above. The same marker can be reused for the same parameter:
rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1
OR age BETWEEN $2 AND $2 + 3`, "orange", 64)
pq does not support the LastInsertId() method of the Result type in database/sql.
To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres
RETURNING clause with a standard Query or QueryRow call:
var userid int
err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age)
VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid)
For more details on RETURNING, see the Postgres documentation:
http://www.postgresql.org/docs/current/static/sql-insert.html
http://www.postgresql.org/docs/current/static/sql-update.html
http://www.postgresql.org/docs/current/static/sql-delete.html
For additional instructions on querying see the documentation for the database/sql package.
Data Types
Parameters pass through driver.DefaultParameterConverter before they are handled
by this package. When the binary_parameters connection option is enabled,
[]byte values are sent directly to the backend as data in binary format.
This package returns the following types for values from the PostgreSQL backend:
- integer types smallint, integer, and bigint are returned as int64
- floating-point types real and double precision are returned as float64
- character types char, varchar, and text are returned as string
- temporal types date, time, timetz, timestamp, and timestamptz are
returned as time.Time
- the boolean type is returned as bool
- the bytea type is returned as []byte
All other types are returned directly from the backend as []byte values in text format.
Errors
pq may return errors of type *pq.Error which can be interrogated for error details:
if err, ok := err.(*pq.Error); ok {
fmt.Println("pq error:", err.Code.Name())
}
See the pq.Error type for details.
Bulk imports
You can perform bulk imports by preparing a statement returned by pq.CopyIn (or
pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement
handle can then be repeatedly "executed" to copy data into the target table.
After all data has been processed you should call Exec() once with no arguments
to flush all buffered data. Any call to Exec() might return an error which
should be handled appropriately, but because of the internal buffering an error
returned by Exec() might not be related to the data passed in the call that
failed.
CopyIn uses COPY FROM internally. It is not possible to COPY outside of an
explicit transaction in pq.
Usage example:
txn, err := db.Begin()
if err != nil {
log.Fatal(err)
}
stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age"))
if err != nil {
log.Fatal(err)
}
for _, user := range users {
_, err = stmt.Exec(user.Name, int64(user.Age))
if err != nil {
log.Fatal(err)
}
}
_, err = stmt.Exec()
if err != nil {
log.Fatal(err)
}
err = stmt.Close()
if err != nil {
log.Fatal(err)
}
err = txn.Commit()
if err != nil {
log.Fatal(err)
}
Notifications
PostgreSQL supports a simple publish/subscribe model over database
connections. See http://www.postgresql.org/docs/current/static/sql-notify.html
for more information about the general mechanism.
To start listening for notifications, you first have to open a new connection
to the database by calling NewListener. This connection can not be used for
anything other than LISTEN / NOTIFY. Calling Listen will open a "notification
channel"; once a notification channel is open, a notification generated on that
channel will effect a send on the Listener.Notify channel. A notification
channel will remain open until Unlisten is called, though connection loss might
result in some notifications being lost. To solve this problem, Listener sends
a nil pointer over the Notify channel any time the connection is re-established
following a connection loss. The application can get information about the
state of the underlying connection by setting an event callback in the call to
NewListener.
A single Listener can safely be used from concurrent goroutines, which means
that there is often no need to create more than one Listener in your
application. However, a Listener is always connected to a single database, so
you will need to create a new Listener instance for every database you want to
receive notifications in.
The channel name in both Listen and Unlisten is case sensitive, and can contain
any characters legal in an identifier (see
http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
for more information). Note that the channel name will be truncated to 63
bytes by the PostgreSQL server.
You can find a complete, working example of Listener usage at
https://godoc.org/github.com/lib/pq/example/listen.
Kerberos Support
If you need support for Kerberos authentication, add the following to your main
package:
import "github.com/lib/pq/auth/kerberos"
func init() {
pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() })
}
This package is in a separate module so that users who don't need Kerberos
don't have to download unnecessary dependencies.
When imported, additional connection string parameters are supported:
* krbsrvname - GSS (Kerberos) service name when constructing the
SPN (default is `postgres`). This will be combined with the host
to form the full SPN: `krbsrvname/host`.
* krbspn - GSS (Kerberos) SPN. This takes priority over
`krbsrvname` if present.
*/
package pq

622
vendor/github.com/lib/pq/encode.go generated vendored
View File

@@ -1,622 +0,0 @@
package pq
import (
"bytes"
"database/sql/driver"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/lib/pq/oid"
)
var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`)
func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte {
switch v := x.(type) {
case []byte:
return v
default:
return encode(parameterStatus, x, oid.T_unknown)
}
}
func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte {
switch v := x.(type) {
case int64:
return strconv.AppendInt(nil, v, 10)
case float64:
return strconv.AppendFloat(nil, v, 'f', -1, 64)
case []byte:
if pgtypOid == oid.T_bytea {
return encodeBytea(parameterStatus.serverVersion, v)
}
return v
case string:
if pgtypOid == oid.T_bytea {
return encodeBytea(parameterStatus.serverVersion, []byte(v))
}
return []byte(v)
case bool:
return strconv.AppendBool(nil, v)
case time.Time:
return formatTs(v)
default:
errorf("encode: unknown type for %T", v)
}
panic("not reached")
}
func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} {
switch f {
case formatBinary:
return binaryDecode(parameterStatus, s, typ)
case formatText:
return textDecode(parameterStatus, s, typ)
default:
panic("not reached")
}
}
func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
switch typ {
case oid.T_bytea:
return s
case oid.T_int8:
return int64(binary.BigEndian.Uint64(s))
case oid.T_int4:
return int64(int32(binary.BigEndian.Uint32(s)))
case oid.T_int2:
return int64(int16(binary.BigEndian.Uint16(s)))
case oid.T_uuid:
b, err := decodeUUIDBinary(s)
if err != nil {
panic(err)
}
return b
default:
errorf("don't know how to decode binary parameter of type %d", uint32(typ))
}
panic("not reached")
}
func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
switch typ {
case oid.T_char, oid.T_varchar, oid.T_text:
return string(s)
case oid.T_bytea:
b, err := parseBytea(s)
if err != nil {
errorf("%s", err)
}
return b
case oid.T_timestamptz:
return parseTs(parameterStatus.currentLocation, string(s))
case oid.T_timestamp, oid.T_date:
return parseTs(nil, string(s))
case oid.T_time:
return mustParse("15:04:05", typ, s)
case oid.T_timetz:
return mustParse("15:04:05-07", typ, s)
case oid.T_bool:
return s[0] == 't'
case oid.T_int8, oid.T_int4, oid.T_int2:
i, err := strconv.ParseInt(string(s), 10, 64)
if err != nil {
errorf("%s", err)
}
return i
case oid.T_float4, oid.T_float8:
// We always use 64 bit parsing, regardless of whether the input text is for
// a float4 or float8, because clients expect float64s for all float datatypes
// and returning a 32-bit parsed float64 produces lossy results.
f, err := strconv.ParseFloat(string(s), 64)
if err != nil {
errorf("%s", err)
}
return f
}
return s
}
// appendEncodedText encodes item in text format as required by COPY
// and appends to buf
func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte {
switch v := x.(type) {
case int64:
return strconv.AppendInt(buf, v, 10)
case float64:
return strconv.AppendFloat(buf, v, 'f', -1, 64)
case []byte:
encodedBytea := encodeBytea(parameterStatus.serverVersion, v)
return appendEscapedText(buf, string(encodedBytea))
case string:
return appendEscapedText(buf, v)
case bool:
return strconv.AppendBool(buf, v)
case time.Time:
return append(buf, formatTs(v)...)
case nil:
return append(buf, "\\N"...)
default:
errorf("encode: unknown type for %T", v)
}
panic("not reached")
}
func appendEscapedText(buf []byte, text string) []byte {
escapeNeeded := false
startPos := 0
var c byte
// check if we need to escape
for i := 0; i < len(text); i++ {
c = text[i]
if c == '\\' || c == '\n' || c == '\r' || c == '\t' {
escapeNeeded = true
startPos = i
break
}
}
if !escapeNeeded {
return append(buf, text...)
}
// copy till first char to escape, iterate the rest
result := append(buf, text[:startPos]...)
for i := startPos; i < len(text); i++ {
c = text[i]
switch c {
case '\\':
result = append(result, '\\', '\\')
case '\n':
result = append(result, '\\', 'n')
case '\r':
result = append(result, '\\', 'r')
case '\t':
result = append(result, '\\', 't')
default:
result = append(result, c)
}
}
return result
}
func mustParse(f string, typ oid.Oid, s []byte) time.Time {
str := string(s)
// check for a 30-minute-offset timezone
if (typ == oid.T_timestamptz || typ == oid.T_timetz) &&
str[len(str)-3] == ':' {
f += ":00"
}
// Special case for 24:00 time.
// Unfortunately, golang does not parse 24:00 as a proper time.
// In this case, we want to try "round to the next day", to differentiate.
// As such, we find if the 24:00 time matches at the beginning; if so,
// we default it back to 00:00 but add a day later.
var is2400Time bool
switch typ {
case oid.T_timetz, oid.T_time:
if matches := time2400Regex.FindStringSubmatch(str); matches != nil {
// Concatenate timezone information at the back.
str = "00:00:00" + str[len(matches[1]):]
is2400Time = true
}
}
t, err := time.Parse(f, str)
if err != nil {
errorf("decode: %s", err)
}
if is2400Time {
t = t.Add(24 * time.Hour)
}
return t
}
var errInvalidTimestamp = errors.New("invalid timestamp")
type timestampParser struct {
err error
}
func (p *timestampParser) expect(str string, char byte, pos int) {
if p.err != nil {
return
}
if pos+1 > len(str) {
p.err = errInvalidTimestamp
return
}
if c := str[pos]; c != char && p.err == nil {
p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c)
}
}
func (p *timestampParser) mustAtoi(str string, begin int, end int) int {
if p.err != nil {
return 0
}
if begin < 0 || end < 0 || begin > end || end > len(str) {
p.err = errInvalidTimestamp
return 0
}
result, err := strconv.Atoi(str[begin:end])
if err != nil {
if p.err == nil {
p.err = fmt.Errorf("expected number; got '%v'", str)
}
return 0
}
return result
}
// The location cache caches the time zones typically used by the client.
type locationCache struct {
cache map[int]*time.Location
lock sync.Mutex
}
// All connections share the same list of timezones. Benchmarking shows that
// about 5% speed could be gained by putting the cache in the connection and
// losing the mutex, at the cost of a small amount of memory and a somewhat
// significant increase in code complexity.
var globalLocationCache = newLocationCache()
func newLocationCache() *locationCache {
return &locationCache{cache: make(map[int]*time.Location)}
}
// Returns the cached timezone for the specified offset, creating and caching
// it if necessary.
func (c *locationCache) getLocation(offset int) *time.Location {
c.lock.Lock()
defer c.lock.Unlock()
location, ok := c.cache[offset]
if !ok {
location = time.FixedZone("", offset)
c.cache[offset] = location
}
return location
}
var infinityTsEnabled = false
var infinityTsNegative time.Time
var infinityTsPositive time.Time
const (
infinityTsEnabledAlready = "pq: infinity timestamp enabled already"
infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive"
)
// EnableInfinityTs controls the handling of Postgres' "-infinity" and
// "infinity" "timestamp"s.
//
// If EnableInfinityTs is not called, "-infinity" and "infinity" will return
// []byte("-infinity") and []byte("infinity") respectively, and potentially
// cause error "sql: Scan error on column index 0: unsupported driver -> Scan
// pair: []uint8 -> *time.Time", when scanning into a time.Time value.
//
// Once EnableInfinityTs has been called, all connections created using this
// driver will decode Postgres' "-infinity" and "infinity" for "timestamp",
// "timestamp with time zone" and "date" types to the predefined minimum and
// maximum times, respectively. When encoding time.Time values, any time which
// equals or precedes the predefined minimum time will be encoded to
// "-infinity". Any values at or past the maximum time will similarly be
// encoded to "infinity".
//
// If EnableInfinityTs is called with negative >= positive, it will panic.
// Calling EnableInfinityTs after a connection has been established results in
// undefined behavior. If EnableInfinityTs is called more than once, it will
// panic.
func EnableInfinityTs(negative time.Time, positive time.Time) {
if infinityTsEnabled {
panic(infinityTsEnabledAlready)
}
if !negative.Before(positive) {
panic(infinityTsNegativeMustBeSmaller)
}
infinityTsEnabled = true
infinityTsNegative = negative
infinityTsPositive = positive
}
/*
* Testing might want to toggle infinityTsEnabled
*/
func disableInfinityTs() {
infinityTsEnabled = false
}
// This is a time function specific to the Postgres default DateStyle
// setting ("ISO, MDY"), the only one we currently support. This
// accounts for the discrepancies between the parsing available with
// time.Parse and the Postgres date formatting quirks.
func parseTs(currentLocation *time.Location, str string) interface{} {
switch str {
case "-infinity":
if infinityTsEnabled {
return infinityTsNegative
}
return []byte(str)
case "infinity":
if infinityTsEnabled {
return infinityTsPositive
}
return []byte(str)
}
t, err := ParseTimestamp(currentLocation, str)
if err != nil {
panic(err)
}
return t
}
// ParseTimestamp parses Postgres' text format. It returns a time.Time in
// currentLocation iff that time's offset agrees with the offset sent from the
// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the
// fixed offset offset provided by the Postgres server.
func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) {
p := timestampParser{}
monSep := strings.IndexRune(str, '-')
// this is Gregorian year, not ISO Year
// In Gregorian system, the year 1 BC is followed by AD 1
year := p.mustAtoi(str, 0, monSep)
daySep := monSep + 3
month := p.mustAtoi(str, monSep+1, daySep)
p.expect(str, '-', daySep)
timeSep := daySep + 3
day := p.mustAtoi(str, daySep+1, timeSep)
minLen := monSep + len("01-01") + 1
isBC := strings.HasSuffix(str, " BC")
if isBC {
minLen += 3
}
var hour, minute, second int
if len(str) > minLen {
p.expect(str, ' ', timeSep)
minSep := timeSep + 3
p.expect(str, ':', minSep)
hour = p.mustAtoi(str, timeSep+1, minSep)
secSep := minSep + 3
p.expect(str, ':', secSep)
minute = p.mustAtoi(str, minSep+1, secSep)
secEnd := secSep + 3
second = p.mustAtoi(str, secSep+1, secEnd)
}
remainderIdx := monSep + len("01-01 00:00:00") + 1
// Three optional (but ordered) sections follow: the
// fractional seconds, the time zone offset, and the BC
// designation. We set them up here and adjust the other
// offsets if the preceding sections exist.
nanoSec := 0
tzOff := 0
if remainderIdx < len(str) && str[remainderIdx] == '.' {
fracStart := remainderIdx + 1
fracOff := strings.IndexAny(str[fracStart:], "-+ ")
if fracOff < 0 {
fracOff = len(str) - fracStart
}
fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff)
nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff))))
remainderIdx += fracOff + 1
}
if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') {
// time zone separator is always '-' or '+' (UTC is +00)
var tzSign int
switch c := str[tzStart]; c {
case '-':
tzSign = -1
case '+':
tzSign = +1
default:
return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c)
}
tzHours := p.mustAtoi(str, tzStart+1, tzStart+3)
remainderIdx += 3
var tzMin, tzSec int
if remainderIdx < len(str) && str[remainderIdx] == ':' {
tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
remainderIdx += 3
}
if remainderIdx < len(str) && str[remainderIdx] == ':' {
tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
remainderIdx += 3
}
tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec)
}
var isoYear int
if isBC {
isoYear = 1 - year
remainderIdx += 3
} else {
isoYear = year
}
if remainderIdx < len(str) {
return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:])
}
t := time.Date(isoYear, time.Month(month), day,
hour, minute, second, nanoSec,
globalLocationCache.getLocation(tzOff))
if currentLocation != nil {
// Set the location of the returned Time based on the session's
// TimeZone value, but only if the local time zone database agrees with
// the remote database on the offset.
lt := t.In(currentLocation)
_, newOff := lt.Zone()
if newOff == tzOff {
t = lt
}
}
return t, p.err
}
// formatTs formats t into a format postgres understands.
func formatTs(t time.Time) []byte {
if infinityTsEnabled {
// t <= -infinity : ! (t > -infinity)
if !t.After(infinityTsNegative) {
return []byte("-infinity")
}
// t >= infinity : ! (!t < infinity)
if !t.Before(infinityTsPositive) {
return []byte("infinity")
}
}
return FormatTimestamp(t)
}
// FormatTimestamp formats t into Postgres' text format for timestamps.
func FormatTimestamp(t time.Time) []byte {
// Need to send dates before 0001 A.D. with " BC" suffix, instead of the
// minus sign preferred by Go.
// Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
bc := false
if t.Year() <= 0 {
// flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11"
t = t.AddDate((-t.Year())*2+1, 0, 0)
bc = true
}
b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00"))
_, offset := t.Zone()
offset %= 60
if offset != 0 {
// RFC3339Nano already printed the minus sign
if offset < 0 {
offset = -offset
}
b = append(b, ':')
if offset < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(offset), 10)
}
if bc {
b = append(b, " BC"...)
}
return b
}
// Parse a bytea value received from the server. Both "hex" and the legacy
// "escape" format are supported.
func parseBytea(s []byte) (result []byte, err error) {
if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) {
// bytea_output = hex
s = s[2:] // trim off leading "\\x"
result = make([]byte, hex.DecodedLen(len(s)))
_, err := hex.Decode(result, s)
if err != nil {
return nil, err
}
} else {
// bytea_output = escape
for len(s) > 0 {
if s[0] == '\\' {
// escaped '\\'
if len(s) >= 2 && s[1] == '\\' {
result = append(result, '\\')
s = s[2:]
continue
}
// '\\' followed by an octal number
if len(s) < 4 {
return nil, fmt.Errorf("invalid bytea sequence %v", s)
}
r, err := strconv.ParseInt(string(s[1:4]), 8, 9)
if err != nil {
return nil, fmt.Errorf("could not parse bytea value: %s", err.Error())
}
result = append(result, byte(r))
s = s[4:]
} else {
// We hit an unescaped, raw byte. Try to read in as many as
// possible in one go.
i := bytes.IndexByte(s, '\\')
if i == -1 {
result = append(result, s...)
break
}
result = append(result, s[:i]...)
s = s[i:]
}
}
}
return result, nil
}
func encodeBytea(serverVersion int, v []byte) (result []byte) {
if serverVersion >= 90000 {
// Use the hex format if we know that the server supports it
result = make([]byte, 2+hex.EncodedLen(len(v)))
result[0] = '\\'
result[1] = 'x'
hex.Encode(result[2:], v)
} else {
// .. or resort to "escape"
for _, b := range v {
if b == '\\' {
result = append(result, '\\', '\\')
} else if b < 0x20 || b > 0x7e {
result = append(result, []byte(fmt.Sprintf("\\%03o", b))...)
} else {
result = append(result, b)
}
}
}
return result
}
// NullTime represents a time.Time that may be null. NullTime implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString.
type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}
// Scan implements the Scanner interface.
func (nt *NullTime) Scan(value interface{}) error {
nt.Time, nt.Valid = value.(time.Time)
return nil
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if !nt.Valid {
return nil, nil
}
return nt.Time, nil
}

515
vendor/github.com/lib/pq/error.go generated vendored
View File

@@ -1,515 +0,0 @@
package pq
import (
"database/sql/driver"
"fmt"
"io"
"net"
"runtime"
)
// Error severities
const (
Efatal = "FATAL"
Epanic = "PANIC"
Ewarning = "WARNING"
Enotice = "NOTICE"
Edebug = "DEBUG"
Einfo = "INFO"
Elog = "LOG"
)
// Error represents an error communicating with the server.
//
// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields
type Error struct {
Severity string
Code ErrorCode
Message string
Detail string
Hint string
Position string
InternalPosition string
InternalQuery string
Where string
Schema string
Table string
Column string
DataTypeName string
Constraint string
File string
Line string
Routine string
}
// ErrorCode is a five-character error code.
type ErrorCode string
// Name returns a more human friendly rendering of the error code, namely the
// "condition name".
//
// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
// details.
func (ec ErrorCode) Name() string {
return errorCodeNames[ec]
}
// ErrorClass is only the class part of an error code.
type ErrorClass string
// Name returns the condition name of an error class. It is equivalent to the
// condition name of the "standard" error code (i.e. the one having the last
// three characters "000").
func (ec ErrorClass) Name() string {
return errorCodeNames[ErrorCode(ec+"000")]
}
// Class returns the error class, e.g. "28".
//
// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
// details.
func (ec ErrorCode) Class() ErrorClass {
return ErrorClass(ec[0:2])
}
// errorCodeNames is a mapping between the five-character error codes and the
// human readable "condition names". It is derived from the list at
// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html
var errorCodeNames = map[ErrorCode]string{
// Class 00 - Successful Completion
"00000": "successful_completion",
// Class 01 - Warning
"01000": "warning",
"0100C": "dynamic_result_sets_returned",
"01008": "implicit_zero_bit_padding",
"01003": "null_value_eliminated_in_set_function",
"01007": "privilege_not_granted",
"01006": "privilege_not_revoked",
"01004": "string_data_right_truncation",
"01P01": "deprecated_feature",
// Class 02 - No Data (this is also a warning class per the SQL standard)
"02000": "no_data",
"02001": "no_additional_dynamic_result_sets_returned",
// Class 03 - SQL Statement Not Yet Complete
"03000": "sql_statement_not_yet_complete",
// Class 08 - Connection Exception
"08000": "connection_exception",
"08003": "connection_does_not_exist",
"08006": "connection_failure",
"08001": "sqlclient_unable_to_establish_sqlconnection",
"08004": "sqlserver_rejected_establishment_of_sqlconnection",
"08007": "transaction_resolution_unknown",
"08P01": "protocol_violation",
// Class 09 - Triggered Action Exception
"09000": "triggered_action_exception",
// Class 0A - Feature Not Supported
"0A000": "feature_not_supported",
// Class 0B - Invalid Transaction Initiation
"0B000": "invalid_transaction_initiation",
// Class 0F - Locator Exception
"0F000": "locator_exception",
"0F001": "invalid_locator_specification",
// Class 0L - Invalid Grantor
"0L000": "invalid_grantor",
"0LP01": "invalid_grant_operation",
// Class 0P - Invalid Role Specification
"0P000": "invalid_role_specification",
// Class 0Z - Diagnostics Exception
"0Z000": "diagnostics_exception",
"0Z002": "stacked_diagnostics_accessed_without_active_handler",
// Class 20 - Case Not Found
"20000": "case_not_found",
// Class 21 - Cardinality Violation
"21000": "cardinality_violation",
// Class 22 - Data Exception
"22000": "data_exception",
"2202E": "array_subscript_error",
"22021": "character_not_in_repertoire",
"22008": "datetime_field_overflow",
"22012": "division_by_zero",
"22005": "error_in_assignment",
"2200B": "escape_character_conflict",
"22022": "indicator_overflow",
"22015": "interval_field_overflow",
"2201E": "invalid_argument_for_logarithm",
"22014": "invalid_argument_for_ntile_function",
"22016": "invalid_argument_for_nth_value_function",
"2201F": "invalid_argument_for_power_function",
"2201G": "invalid_argument_for_width_bucket_function",
"22018": "invalid_character_value_for_cast",
"22007": "invalid_datetime_format",
"22019": "invalid_escape_character",
"2200D": "invalid_escape_octet",
"22025": "invalid_escape_sequence",
"22P06": "nonstandard_use_of_escape_character",
"22010": "invalid_indicator_parameter_value",
"22023": "invalid_parameter_value",
"2201B": "invalid_regular_expression",
"2201W": "invalid_row_count_in_limit_clause",
"2201X": "invalid_row_count_in_result_offset_clause",
"22009": "invalid_time_zone_displacement_value",
"2200C": "invalid_use_of_escape_character",
"2200G": "most_specific_type_mismatch",
"22004": "null_value_not_allowed",
"22002": "null_value_no_indicator_parameter",
"22003": "numeric_value_out_of_range",
"2200H": "sequence_generator_limit_exceeded",
"22026": "string_data_length_mismatch",
"22001": "string_data_right_truncation",
"22011": "substring_error",
"22027": "trim_error",
"22024": "unterminated_c_string",
"2200F": "zero_length_character_string",
"22P01": "floating_point_exception",
"22P02": "invalid_text_representation",
"22P03": "invalid_binary_representation",
"22P04": "bad_copy_file_format",
"22P05": "untranslatable_character",
"2200L": "not_an_xml_document",
"2200M": "invalid_xml_document",
"2200N": "invalid_xml_content",
"2200S": "invalid_xml_comment",
"2200T": "invalid_xml_processing_instruction",
// Class 23 - Integrity Constraint Violation
"23000": "integrity_constraint_violation",
"23001": "restrict_violation",
"23502": "not_null_violation",
"23503": "foreign_key_violation",
"23505": "unique_violation",
"23514": "check_violation",
"23P01": "exclusion_violation",
// Class 24 - Invalid Cursor State
"24000": "invalid_cursor_state",
// Class 25 - Invalid Transaction State
"25000": "invalid_transaction_state",
"25001": "active_sql_transaction",
"25002": "branch_transaction_already_active",
"25008": "held_cursor_requires_same_isolation_level",
"25003": "inappropriate_access_mode_for_branch_transaction",
"25004": "inappropriate_isolation_level_for_branch_transaction",
"25005": "no_active_sql_transaction_for_branch_transaction",
"25006": "read_only_sql_transaction",
"25007": "schema_and_data_statement_mixing_not_supported",
"25P01": "no_active_sql_transaction",
"25P02": "in_failed_sql_transaction",
// Class 26 - Invalid SQL Statement Name
"26000": "invalid_sql_statement_name",
// Class 27 - Triggered Data Change Violation
"27000": "triggered_data_change_violation",
// Class 28 - Invalid Authorization Specification
"28000": "invalid_authorization_specification",
"28P01": "invalid_password",
// Class 2B - Dependent Privilege Descriptors Still Exist
"2B000": "dependent_privilege_descriptors_still_exist",
"2BP01": "dependent_objects_still_exist",
// Class 2D - Invalid Transaction Termination
"2D000": "invalid_transaction_termination",
// Class 2F - SQL Routine Exception
"2F000": "sql_routine_exception",
"2F005": "function_executed_no_return_statement",
"2F002": "modifying_sql_data_not_permitted",
"2F003": "prohibited_sql_statement_attempted",
"2F004": "reading_sql_data_not_permitted",
// Class 34 - Invalid Cursor Name
"34000": "invalid_cursor_name",
// Class 38 - External Routine Exception
"38000": "external_routine_exception",
"38001": "containing_sql_not_permitted",
"38002": "modifying_sql_data_not_permitted",
"38003": "prohibited_sql_statement_attempted",
"38004": "reading_sql_data_not_permitted",
// Class 39 - External Routine Invocation Exception
"39000": "external_routine_invocation_exception",
"39001": "invalid_sqlstate_returned",
"39004": "null_value_not_allowed",
"39P01": "trigger_protocol_violated",
"39P02": "srf_protocol_violated",
// Class 3B - Savepoint Exception
"3B000": "savepoint_exception",
"3B001": "invalid_savepoint_specification",
// Class 3D - Invalid Catalog Name
"3D000": "invalid_catalog_name",
// Class 3F - Invalid Schema Name
"3F000": "invalid_schema_name",
// Class 40 - Transaction Rollback
"40000": "transaction_rollback",
"40002": "transaction_integrity_constraint_violation",
"40001": "serialization_failure",
"40003": "statement_completion_unknown",
"40P01": "deadlock_detected",
// Class 42 - Syntax Error or Access Rule Violation
"42000": "syntax_error_or_access_rule_violation",
"42601": "syntax_error",
"42501": "insufficient_privilege",
"42846": "cannot_coerce",
"42803": "grouping_error",
"42P20": "windowing_error",
"42P19": "invalid_recursion",
"42830": "invalid_foreign_key",
"42602": "invalid_name",
"42622": "name_too_long",
"42939": "reserved_name",
"42804": "datatype_mismatch",
"42P18": "indeterminate_datatype",
"42P21": "collation_mismatch",
"42P22": "indeterminate_collation",
"42809": "wrong_object_type",
"42703": "undefined_column",
"42883": "undefined_function",
"42P01": "undefined_table",
"42P02": "undefined_parameter",
"42704": "undefined_object",
"42701": "duplicate_column",
"42P03": "duplicate_cursor",
"42P04": "duplicate_database",
"42723": "duplicate_function",
"42P05": "duplicate_prepared_statement",
"42P06": "duplicate_schema",
"42P07": "duplicate_table",
"42712": "duplicate_alias",
"42710": "duplicate_object",
"42702": "ambiguous_column",
"42725": "ambiguous_function",
"42P08": "ambiguous_parameter",
"42P09": "ambiguous_alias",
"42P10": "invalid_column_reference",
"42611": "invalid_column_definition",
"42P11": "invalid_cursor_definition",
"42P12": "invalid_database_definition",
"42P13": "invalid_function_definition",
"42P14": "invalid_prepared_statement_definition",
"42P15": "invalid_schema_definition",
"42P16": "invalid_table_definition",
"42P17": "invalid_object_definition",
// Class 44 - WITH CHECK OPTION Violation
"44000": "with_check_option_violation",
// Class 53 - Insufficient Resources
"53000": "insufficient_resources",
"53100": "disk_full",
"53200": "out_of_memory",
"53300": "too_many_connections",
"53400": "configuration_limit_exceeded",
// Class 54 - Program Limit Exceeded
"54000": "program_limit_exceeded",
"54001": "statement_too_complex",
"54011": "too_many_columns",
"54023": "too_many_arguments",
// Class 55 - Object Not In Prerequisite State
"55000": "object_not_in_prerequisite_state",
"55006": "object_in_use",
"55P02": "cant_change_runtime_param",
"55P03": "lock_not_available",
// Class 57 - Operator Intervention
"57000": "operator_intervention",
"57014": "query_canceled",
"57P01": "admin_shutdown",
"57P02": "crash_shutdown",
"57P03": "cannot_connect_now",
"57P04": "database_dropped",
// Class 58 - System Error (errors external to PostgreSQL itself)
"58000": "system_error",
"58030": "io_error",
"58P01": "undefined_file",
"58P02": "duplicate_file",
// Class F0 - Configuration File Error
"F0000": "config_file_error",
"F0001": "lock_file_exists",
// Class HV - Foreign Data Wrapper Error (SQL/MED)
"HV000": "fdw_error",
"HV005": "fdw_column_name_not_found",
"HV002": "fdw_dynamic_parameter_value_needed",
"HV010": "fdw_function_sequence_error",
"HV021": "fdw_inconsistent_descriptor_information",
"HV024": "fdw_invalid_attribute_value",
"HV007": "fdw_invalid_column_name",
"HV008": "fdw_invalid_column_number",
"HV004": "fdw_invalid_data_type",
"HV006": "fdw_invalid_data_type_descriptors",
"HV091": "fdw_invalid_descriptor_field_identifier",
"HV00B": "fdw_invalid_handle",
"HV00C": "fdw_invalid_option_index",
"HV00D": "fdw_invalid_option_name",
"HV090": "fdw_invalid_string_length_or_buffer_length",
"HV00A": "fdw_invalid_string_format",
"HV009": "fdw_invalid_use_of_null_pointer",
"HV014": "fdw_too_many_handles",
"HV001": "fdw_out_of_memory",
"HV00P": "fdw_no_schemas",
"HV00J": "fdw_option_name_not_found",
"HV00K": "fdw_reply_handle",
"HV00Q": "fdw_schema_not_found",
"HV00R": "fdw_table_not_found",
"HV00L": "fdw_unable_to_create_execution",
"HV00M": "fdw_unable_to_create_reply",
"HV00N": "fdw_unable_to_establish_connection",
// Class P0 - PL/pgSQL Error
"P0000": "plpgsql_error",
"P0001": "raise_exception",
"P0002": "no_data_found",
"P0003": "too_many_rows",
// Class XX - Internal Error
"XX000": "internal_error",
"XX001": "data_corrupted",
"XX002": "index_corrupted",
}
func parseError(r *readBuf) *Error {
err := new(Error)
for t := r.byte(); t != 0; t = r.byte() {
msg := r.string()
switch t {
case 'S':
err.Severity = msg
case 'C':
err.Code = ErrorCode(msg)
case 'M':
err.Message = msg
case 'D':
err.Detail = msg
case 'H':
err.Hint = msg
case 'P':
err.Position = msg
case 'p':
err.InternalPosition = msg
case 'q':
err.InternalQuery = msg
case 'W':
err.Where = msg
case 's':
err.Schema = msg
case 't':
err.Table = msg
case 'c':
err.Column = msg
case 'd':
err.DataTypeName = msg
case 'n':
err.Constraint = msg
case 'F':
err.File = msg
case 'L':
err.Line = msg
case 'R':
err.Routine = msg
}
}
return err
}
// Fatal returns true if the Error Severity is fatal.
func (err *Error) Fatal() bool {
return err.Severity == Efatal
}
// Get implements the legacy PGError interface. New code should use the fields
// of the Error struct directly.
func (err *Error) Get(k byte) (v string) {
switch k {
case 'S':
return err.Severity
case 'C':
return string(err.Code)
case 'M':
return err.Message
case 'D':
return err.Detail
case 'H':
return err.Hint
case 'P':
return err.Position
case 'p':
return err.InternalPosition
case 'q':
return err.InternalQuery
case 'W':
return err.Where
case 's':
return err.Schema
case 't':
return err.Table
case 'c':
return err.Column
case 'd':
return err.DataTypeName
case 'n':
return err.Constraint
case 'F':
return err.File
case 'L':
return err.Line
case 'R':
return err.Routine
}
return ""
}
func (err Error) Error() string {
return "pq: " + err.Message
}
// PGError is an interface used by previous versions of pq. It is provided
// only to support legacy code. New code should use the Error type.
type PGError interface {
Error() string
Fatal() bool
Get(k byte) (v string)
}
func errorf(s string, args ...interface{}) {
panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)))
}
// TODO(ainar-g) Rename to errorf after removing panics.
func fmterrorf(s string, args ...interface{}) error {
return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))
}
func errRecoverNoErrBadConn(err *error) {
e := recover()
if e == nil {
// Do nothing
return
}
var ok bool
*err, ok = e.(error)
if !ok {
*err = fmt.Errorf("pq: unexpected error: %#v", e)
}
}
func (cn *conn) errRecover(err *error) {
e := recover()
switch v := e.(type) {
case nil:
// Do nothing
case runtime.Error:
cn.bad = true
panic(v)
case *Error:
if v.Fatal() {
*err = driver.ErrBadConn
} else {
*err = v
}
case *net.OpError:
cn.bad = true
*err = v
case error:
if v == io.EOF || v.(error).Error() == "remote error: handshake failure" {
*err = driver.ErrBadConn
} else {
*err = v
}
default:
cn.bad = true
panic(fmt.Sprintf("unknown error: %#v", e))
}
// Any time we return ErrBadConn, we need to remember it since *Tx doesn't
// mark the connection bad in database/sql.
if *err == driver.ErrBadConn {
cn.bad = true
}
}

27
vendor/github.com/lib/pq/krb.go generated vendored
View File

@@ -1,27 +0,0 @@
package pq
// NewGSSFunc creates a GSS authentication provider, for use with
// RegisterGSSProvider.
type NewGSSFunc func() (GSS, error)
var newGss NewGSSFunc
// RegisterGSSProvider registers a GSS authentication provider. For example, if
// you need to use Kerberos to authenticate with your server, add this to your
// main package:
//
// import "github.com/lib/pq/auth/kerberos"
//
// func init() {
// pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() })
// }
func RegisterGSSProvider(newGssArg NewGSSFunc) {
newGss = newGssArg
}
// GSS provides GSSAPI authentication (e.g., Kerberos).
type GSS interface {
GetInitToken(host string, service string) ([]byte, error)
GetInitTokenFromSpn(spn string) ([]byte, error)
Continue(inToken []byte) (done bool, outToken []byte, err error)
}

71
vendor/github.com/lib/pq/notice.go generated vendored
View File

@@ -1,71 +0,0 @@
// +build go1.10
package pq
import (
"context"
"database/sql/driver"
)
// NoticeHandler returns the notice handler on the given connection, if any. A
// runtime panic occurs if c is not a pq connection. This is rarely used
// directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead.
func NoticeHandler(c driver.Conn) func(*Error) {
return c.(*conn).noticeHandler
}
// SetNoticeHandler sets the given notice handler on the given connection. A
// runtime panic occurs if c is not a pq connection. A nil handler may be used
// to unset it. This is rarely used directly, use ConnectorNoticeHandler and
// ConnectorWithNoticeHandler instead.
//
// Note: Notice handlers are executed synchronously by pq meaning commands
// won't continue to be processed until the handler returns.
func SetNoticeHandler(c driver.Conn, handler func(*Error)) {
c.(*conn).noticeHandler = handler
}
// NoticeHandlerConnector wraps a regular connector and sets a notice handler
// on it.
type NoticeHandlerConnector struct {
driver.Connector
noticeHandler func(*Error)
}
// Connect calls the underlying connector's connect method and then sets the
// notice handler.
func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) {
c, err := n.Connector.Connect(ctx)
if err == nil {
SetNoticeHandler(c, n.noticeHandler)
}
return c, err
}
// ConnectorNoticeHandler returns the currently set notice handler, if any. If
// the given connector is not a result of ConnectorWithNoticeHandler, nil is
// returned.
func ConnectorNoticeHandler(c driver.Connector) func(*Error) {
if c, ok := c.(*NoticeHandlerConnector); ok {
return c.noticeHandler
}
return nil
}
// ConnectorWithNoticeHandler creates or sets the given handler for the given
// connector. If the given connector is a result of calling this function
// previously, it is simply set on the given connector and returned. Otherwise,
// this returns a new connector wrapping the given one and setting the notice
// handler. A nil notice handler may be used to unset it.
//
// The returned connector is intended to be used with database/sql.OpenDB.
//
// Note: Notice handlers are executed synchronously by pq meaning commands
// won't continue to be processed until the handler returns.
func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector {
if c, ok := c.(*NoticeHandlerConnector); ok {
c.noticeHandler = handler
return c
}
return &NoticeHandlerConnector{Connector: c, noticeHandler: handler}
}

858
vendor/github.com/lib/pq/notify.go generated vendored
View File

@@ -1,858 +0,0 @@
package pq
// Package pq is a pure Go Postgres driver for the database/sql package.
// This module contains support for Postgres LISTEN/NOTIFY.
import (
"context"
"database/sql/driver"
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
)
// Notification represents a single notification from the database.
type Notification struct {
// Process ID (PID) of the notifying postgres backend.
BePid int
// Name of the channel the notification was sent on.
Channel string
// Payload, or the empty string if unspecified.
Extra string
}
func recvNotification(r *readBuf) *Notification {
bePid := r.int32()
channel := r.string()
extra := r.string()
return &Notification{bePid, channel, extra}
}
// SetNotificationHandler sets the given notification handler on the given
// connection. A runtime panic occurs if c is not a pq connection. A nil handler
// may be used to unset it.
//
// Note: Notification handlers are executed synchronously by pq meaning commands
// won't continue to be processed until the handler returns.
func SetNotificationHandler(c driver.Conn, handler func(*Notification)) {
c.(*conn).notificationHandler = handler
}
// NotificationHandlerConnector wraps a regular connector and sets a notification handler
// on it.
type NotificationHandlerConnector struct {
driver.Connector
notificationHandler func(*Notification)
}
// Connect calls the underlying connector's connect method and then sets the
// notification handler.
func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) {
c, err := n.Connector.Connect(ctx)
if err == nil {
SetNotificationHandler(c, n.notificationHandler)
}
return c, err
}
// ConnectorNotificationHandler returns the currently set notification handler, if any. If
// the given connector is not a result of ConnectorWithNotificationHandler, nil is
// returned.
func ConnectorNotificationHandler(c driver.Connector) func(*Notification) {
if c, ok := c.(*NotificationHandlerConnector); ok {
return c.notificationHandler
}
return nil
}
// ConnectorWithNotificationHandler creates or sets the given handler for the given
// connector. If the given connector is a result of calling this function
// previously, it is simply set on the given connector and returned. Otherwise,
// this returns a new connector wrapping the given one and setting the notification
// handler. A nil notification handler may be used to unset it.
//
// The returned connector is intended to be used with database/sql.OpenDB.
//
// Note: Notification handlers are executed synchronously by pq meaning commands
// won't continue to be processed until the handler returns.
func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector {
if c, ok := c.(*NotificationHandlerConnector); ok {
c.notificationHandler = handler
return c
}
return &NotificationHandlerConnector{Connector: c, notificationHandler: handler}
}
const (
connStateIdle int32 = iota
connStateExpectResponse
connStateExpectReadyForQuery
)
type message struct {
typ byte
err error
}
var errListenerConnClosed = errors.New("pq: ListenerConn has been closed")
// ListenerConn is a low-level interface for waiting for notifications. You
// should use Listener instead.
type ListenerConn struct {
// guards cn and err
connectionLock sync.Mutex
cn *conn
err error
connState int32
// the sending goroutine will be holding this lock
senderLock sync.Mutex
notificationChan chan<- *Notification
replyChan chan message
}
// NewListenerConn creates a new ListenerConn. Use NewListener instead.
func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) {
return newDialListenerConn(defaultDialer{}, name, notificationChan)
}
func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) {
cn, err := DialOpen(d, name)
if err != nil {
return nil, err
}
l := &ListenerConn{
cn: cn.(*conn),
notificationChan: c,
connState: connStateIdle,
replyChan: make(chan message, 2),
}
go l.listenerConnMain()
return l, nil
}
// We can only allow one goroutine at a time to be running a query on the
// connection for various reasons, so the goroutine sending on the connection
// must be holding senderLock.
//
// Returns an error if an unrecoverable error has occurred and the ListenerConn
// should be abandoned.
func (l *ListenerConn) acquireSenderLock() error {
// we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery
l.senderLock.Lock()
l.connectionLock.Lock()
err := l.err
l.connectionLock.Unlock()
if err != nil {
l.senderLock.Unlock()
return err
}
return nil
}
func (l *ListenerConn) releaseSenderLock() {
l.senderLock.Unlock()
}
// setState advances the protocol state to newState. Returns false if moving
// to that state from the current state is not allowed.
func (l *ListenerConn) setState(newState int32) bool {
var expectedState int32
switch newState {
case connStateIdle:
expectedState = connStateExpectReadyForQuery
case connStateExpectResponse:
expectedState = connStateIdle
case connStateExpectReadyForQuery:
expectedState = connStateExpectResponse
default:
panic(fmt.Sprintf("unexpected listenerConnState %d", newState))
}
return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState)
}
// Main logic is here: receive messages from the postgres backend, forward
// notifications and query replies and keep the internal state in sync with the
// protocol state. Returns when the connection has been lost, is about to go
// away or should be discarded because we couldn't agree on the state with the
// server backend.
func (l *ListenerConn) listenerConnLoop() (err error) {
defer errRecoverNoErrBadConn(&err)
r := &readBuf{}
for {
t, err := l.cn.recvMessage(r)
if err != nil {
return err
}
switch t {
case 'A':
// recvNotification copies all the data so we don't need to worry
// about the scratch buffer being overwritten.
l.notificationChan <- recvNotification(r)
case 'T', 'D':
// only used by tests; ignore
case 'E':
// We might receive an ErrorResponse even when not in a query; it
// is expected that the server will close the connection after
// that, but we should make sure that the error we display is the
// one from the stray ErrorResponse, not io.ErrUnexpectedEOF.
if !l.setState(connStateExpectReadyForQuery) {
return parseError(r)
}
l.replyChan <- message{t, parseError(r)}
case 'C', 'I':
if !l.setState(connStateExpectReadyForQuery) {
// protocol out of sync
return fmt.Errorf("unexpected CommandComplete")
}
// ExecSimpleQuery doesn't need to know about this message
case 'Z':
if !l.setState(connStateIdle) {
// protocol out of sync
return fmt.Errorf("unexpected ReadyForQuery")
}
l.replyChan <- message{t, nil}
case 'S':
// ignore
case 'N':
if n := l.cn.noticeHandler; n != nil {
n(parseError(r))
}
default:
return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t)
}
}
}
// This is the main routine for the goroutine receiving on the database
// connection. Most of the main logic is in listenerConnLoop.
func (l *ListenerConn) listenerConnMain() {
err := l.listenerConnLoop()
// listenerConnLoop terminated; we're done, but we still have to clean up.
// Make sure nobody tries to start any new queries by making sure the err
// pointer is set. It is important that we do not overwrite its value; a
// connection could be closed by either this goroutine or one sending on
// the connection -- whoever closes the connection is assumed to have the
// more meaningful error message (as the other one will probably get
// net.errClosed), so that goroutine sets the error we expose while the
// other error is discarded. If the connection is lost while two
// goroutines are operating on the socket, it probably doesn't matter which
// error we expose so we don't try to do anything more complex.
l.connectionLock.Lock()
if l.err == nil {
l.err = err
}
l.cn.Close()
l.connectionLock.Unlock()
// There might be a query in-flight; make sure nobody's waiting for a
// response to it, since there's not going to be one.
close(l.replyChan)
// let the listener know we're done
close(l.notificationChan)
// this ListenerConn is done
}
// Listen sends a LISTEN query to the server. See ExecSimpleQuery.
func (l *ListenerConn) Listen(channel string) (bool, error) {
return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel))
}
// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery.
func (l *ListenerConn) Unlisten(channel string) (bool, error) {
return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel))
}
// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery.
func (l *ListenerConn) UnlistenAll() (bool, error) {
return l.ExecSimpleQuery("UNLISTEN *")
}
// Ping the remote server to make sure it's alive. Non-nil error means the
// connection has failed and should be abandoned.
func (l *ListenerConn) Ping() error {
sent, err := l.ExecSimpleQuery("")
if !sent {
return err
}
if err != nil {
// shouldn't happen
panic(err)
}
return nil
}
// Attempt to send a query on the connection. Returns an error if sending the
// query failed, and the caller should initiate closure of this connection.
// The caller must be holding senderLock (see acquireSenderLock and
// releaseSenderLock).
func (l *ListenerConn) sendSimpleQuery(q string) (err error) {
defer errRecoverNoErrBadConn(&err)
// must set connection state before sending the query
if !l.setState(connStateExpectResponse) {
panic("two queries running at the same time")
}
// Can't use l.cn.writeBuf here because it uses the scratch buffer which
// might get overwritten by listenerConnLoop.
b := &writeBuf{
buf: []byte("Q\x00\x00\x00\x00"),
pos: 1,
}
b.string(q)
l.cn.send(b)
return nil
}
// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable
// parameters) on the connection. The possible return values are:
// 1) "executed" is true; the query was executed to completion on the
// database server. If the query failed, err will be set to the error
// returned by the database, otherwise err will be nil.
// 2) If "executed" is false, the query could not be executed on the remote
// server. err will be non-nil.
//
// After a call to ExecSimpleQuery has returned an executed=false value, the
// connection has either been closed or will be closed shortly thereafter, and
// all subsequently executed queries will return an error.
func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) {
if err = l.acquireSenderLock(); err != nil {
return false, err
}
defer l.releaseSenderLock()
err = l.sendSimpleQuery(q)
if err != nil {
// We can't know what state the protocol is in, so we need to abandon
// this connection.
l.connectionLock.Lock()
// Set the error pointer if it hasn't been set already; see
// listenerConnMain.
if l.err == nil {
l.err = err
}
l.connectionLock.Unlock()
l.cn.c.Close()
return false, err
}
// now we just wait for a reply..
for {
m, ok := <-l.replyChan
if !ok {
// We lost the connection to server, don't bother waiting for a
// a response. err should have been set already.
l.connectionLock.Lock()
err := l.err
l.connectionLock.Unlock()
return false, err
}
switch m.typ {
case 'Z':
// sanity check
if m.err != nil {
panic("m.err != nil")
}
// done; err might or might not be set
return true, err
case 'E':
// sanity check
if m.err == nil {
panic("m.err == nil")
}
// server responded with an error; ReadyForQuery to follow
err = m.err
default:
return false, fmt.Errorf("unknown response for simple query: %q", m.typ)
}
}
}
// Close closes the connection.
func (l *ListenerConn) Close() error {
l.connectionLock.Lock()
if l.err != nil {
l.connectionLock.Unlock()
return errListenerConnClosed
}
l.err = errListenerConnClosed
l.connectionLock.Unlock()
// We can't send anything on the connection without holding senderLock.
// Simply close the net.Conn to wake up everyone operating on it.
return l.cn.c.Close()
}
// Err returns the reason the connection was closed. It is not safe to call
// this function until l.Notify has been closed.
func (l *ListenerConn) Err() error {
return l.err
}
var errListenerClosed = errors.New("pq: Listener has been closed")
// ErrChannelAlreadyOpen is returned from Listen when a channel is already
// open.
var ErrChannelAlreadyOpen = errors.New("pq: channel is already open")
// ErrChannelNotOpen is returned from Unlisten when a channel is not open.
var ErrChannelNotOpen = errors.New("pq: channel is not open")
// ListenerEventType is an enumeration of listener event types.
type ListenerEventType int
const (
// ListenerEventConnected is emitted only when the database connection
// has been initially initialized. The err argument of the callback
// will always be nil.
ListenerEventConnected ListenerEventType = iota
// ListenerEventDisconnected is emitted after a database connection has
// been lost, either because of an error or because Close has been
// called. The err argument will be set to the reason the database
// connection was lost.
ListenerEventDisconnected
// ListenerEventReconnected is emitted after a database connection has
// been re-established after connection loss. The err argument of the
// callback will always be nil. After this event has been emitted, a
// nil pq.Notification is sent on the Listener.Notify channel.
ListenerEventReconnected
// ListenerEventConnectionAttemptFailed is emitted after a connection
// to the database was attempted, but failed. The err argument will be
// set to an error describing why the connection attempt did not
// succeed.
ListenerEventConnectionAttemptFailed
)
// EventCallbackType is the event callback type. See also ListenerEventType
// constants' documentation.
type EventCallbackType func(event ListenerEventType, err error)
// Listener provides an interface for listening to notifications from a
// PostgreSQL database. For general usage information, see section
// "Notifications".
//
// Listener can safely be used from concurrently running goroutines.
type Listener struct {
// Channel for receiving notifications from the database. In some cases a
// nil value will be sent. See section "Notifications" above.
Notify chan *Notification
name string
minReconnectInterval time.Duration
maxReconnectInterval time.Duration
dialer Dialer
eventCallback EventCallbackType
lock sync.Mutex
isClosed bool
reconnectCond *sync.Cond
cn *ListenerConn
connNotificationChan <-chan *Notification
channels map[string]struct{}
}
// NewListener creates a new database connection dedicated to LISTEN / NOTIFY.
//
// name should be set to a connection string to be used to establish the
// database connection (see section "Connection String Parameters" above).
//
// minReconnectInterval controls the duration to wait before trying to
// re-establish the database connection after connection loss. After each
// consecutive failure this interval is doubled, until maxReconnectInterval is
// reached. Successfully completing the connection establishment procedure
// resets the interval back to minReconnectInterval.
//
// The last parameter eventCallback can be set to a function which will be
// called by the Listener when the state of the underlying database connection
// changes. This callback will be called by the goroutine which dispatches the
// notifications over the Notify channel, so you should try to avoid doing
// potentially time-consuming operations from the callback.
func NewListener(name string,
minReconnectInterval time.Duration,
maxReconnectInterval time.Duration,
eventCallback EventCallbackType) *Listener {
return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback)
}
// NewDialListener is like NewListener but it takes a Dialer.
func NewDialListener(d Dialer,
name string,
minReconnectInterval time.Duration,
maxReconnectInterval time.Duration,
eventCallback EventCallbackType) *Listener {
l := &Listener{
name: name,
minReconnectInterval: minReconnectInterval,
maxReconnectInterval: maxReconnectInterval,
dialer: d,
eventCallback: eventCallback,
channels: make(map[string]struct{}),
Notify: make(chan *Notification, 32),
}
l.reconnectCond = sync.NewCond(&l.lock)
go l.listenerMain()
return l
}
// NotificationChannel returns the notification channel for this listener.
// This is the same channel as Notify, and will not be recreated during the
// life time of the Listener.
func (l *Listener) NotificationChannel() <-chan *Notification {
return l.Notify
}
// Listen starts listening for notifications on a channel. Calls to this
// function will block until an acknowledgement has been received from the
// server. Note that Listener automatically re-establishes the connection
// after connection loss, so this function may block indefinitely if the
// connection can not be re-established.
//
// Listen will only fail in three conditions:
// 1) The channel is already open. The returned error will be
// ErrChannelAlreadyOpen.
// 2) The query was executed on the remote server, but PostgreSQL returned an
// error message in response to the query. The returned error will be a
// pq.Error containing the information the server supplied.
// 3) Close is called on the Listener before the request could be completed.
//
// The channel name is case-sensitive.
func (l *Listener) Listen(channel string) error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
// The server allows you to issue a LISTEN on a channel which is already
// open, but it seems useful to be able to detect this case to spot for
// mistakes in application logic. If the application genuinely does't
// care, it can check the exported error and ignore it.
_, exists := l.channels[channel]
if exists {
return ErrChannelAlreadyOpen
}
if l.cn != nil {
// If gotResponse is true but error is set, the query was executed on
// the remote server, but resulted in an error. This should be
// relatively rare, so it's fine if we just pass the error to our
// caller. However, if gotResponse is false, we could not complete the
// query on the remote server and our underlying connection is about
// to go away, so we only add relname to l.channels, and wait for
// resync() to take care of the rest.
gotResponse, err := l.cn.Listen(channel)
if gotResponse && err != nil {
return err
}
}
l.channels[channel] = struct{}{}
for l.cn == nil {
l.reconnectCond.Wait()
// we let go of the mutex for a while
if l.isClosed {
return errListenerClosed
}
}
return nil
}
// Unlisten removes a channel from the Listener's channel list. Returns
// ErrChannelNotOpen if the Listener is not listening on the specified channel.
// Returns immediately with no error if there is no connection. Note that you
// might still get notifications for this channel even after Unlisten has
// returned.
//
// The channel name is case-sensitive.
func (l *Listener) Unlisten(channel string) error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
// Similarly to LISTEN, this is not an error in Postgres, but it seems
// useful to distinguish from the normal conditions.
_, exists := l.channels[channel]
if !exists {
return ErrChannelNotOpen
}
if l.cn != nil {
// Similarly to Listen (see comment in that function), the caller
// should only be bothered with an error if it came from the backend as
// a response to our query.
gotResponse, err := l.cn.Unlisten(channel)
if gotResponse && err != nil {
return err
}
}
// Don't bother waiting for resync if there's no connection.
delete(l.channels, channel)
return nil
}
// UnlistenAll removes all channels from the Listener's channel list. Returns
// immediately with no error if there is no connection. Note that you might
// still get notifications for any of the deleted channels even after
// UnlistenAll has returned.
func (l *Listener) UnlistenAll() error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
if l.cn != nil {
// Similarly to Listen (see comment in that function), the caller
// should only be bothered with an error if it came from the backend as
// a response to our query.
gotResponse, err := l.cn.UnlistenAll()
if gotResponse && err != nil {
return err
}
}
// Don't bother waiting for resync if there's no connection.
l.channels = make(map[string]struct{})
return nil
}
// Ping the remote server to make sure it's alive. Non-nil return value means
// that there is no active connection.
func (l *Listener) Ping() error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
if l.cn == nil {
return errors.New("no connection")
}
return l.cn.Ping()
}
// Clean up after losing the server connection. Returns l.cn.Err(), which
// should have the reason the connection was lost.
func (l *Listener) disconnectCleanup() error {
l.lock.Lock()
defer l.lock.Unlock()
// sanity check; can't look at Err() until the channel has been closed
select {
case _, ok := <-l.connNotificationChan:
if ok {
panic("connNotificationChan not closed")
}
default:
panic("connNotificationChan not closed")
}
err := l.cn.Err()
l.cn.Close()
l.cn = nil
return err
}
// Synchronize the list of channels we want to be listening on with the server
// after the connection has been established.
func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error {
doneChan := make(chan error)
go func(notificationChan <-chan *Notification) {
for channel := range l.channels {
// If we got a response, return that error to our caller as it's
// going to be more descriptive than cn.Err().
gotResponse, err := cn.Listen(channel)
if gotResponse && err != nil {
doneChan <- err
return
}
// If we couldn't reach the server, wait for notificationChan to
// close and then return the error message from the connection, as
// per ListenerConn's interface.
if err != nil {
for range notificationChan {
}
doneChan <- cn.Err()
return
}
}
doneChan <- nil
}(notificationChan)
// Ignore notifications while synchronization is going on to avoid
// deadlocks. We have to send a nil notification over Notify anyway as
// we can't possibly know which notifications (if any) were lost while
// the connection was down, so there's no reason to try and process
// these messages at all.
for {
select {
case _, ok := <-notificationChan:
if !ok {
notificationChan = nil
}
case err := <-doneChan:
return err
}
}
}
// caller should NOT be holding l.lock
func (l *Listener) closed() bool {
l.lock.Lock()
defer l.lock.Unlock()
return l.isClosed
}
func (l *Listener) connect() error {
notificationChan := make(chan *Notification, 32)
cn, err := newDialListenerConn(l.dialer, l.name, notificationChan)
if err != nil {
return err
}
l.lock.Lock()
defer l.lock.Unlock()
err = l.resync(cn, notificationChan)
if err != nil {
cn.Close()
return err
}
l.cn = cn
l.connNotificationChan = notificationChan
l.reconnectCond.Broadcast()
return nil
}
// Close disconnects the Listener from the database and shuts it down.
// Subsequent calls to its methods will return an error. Close returns an
// error if the connection has already been closed.
func (l *Listener) Close() error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
if l.cn != nil {
l.cn.Close()
}
l.isClosed = true
// Unblock calls to Listen()
l.reconnectCond.Broadcast()
return nil
}
func (l *Listener) emitEvent(event ListenerEventType, err error) {
if l.eventCallback != nil {
l.eventCallback(event, err)
}
}
// Main logic here: maintain a connection to the server when possible, wait
// for notifications and emit events.
func (l *Listener) listenerConnLoop() {
var nextReconnect time.Time
reconnectInterval := l.minReconnectInterval
for {
for {
err := l.connect()
if err == nil {
break
}
if l.closed() {
return
}
l.emitEvent(ListenerEventConnectionAttemptFailed, err)
time.Sleep(reconnectInterval)
reconnectInterval *= 2
if reconnectInterval > l.maxReconnectInterval {
reconnectInterval = l.maxReconnectInterval
}
}
if nextReconnect.IsZero() {
l.emitEvent(ListenerEventConnected, nil)
} else {
l.emitEvent(ListenerEventReconnected, nil)
l.Notify <- nil
}
reconnectInterval = l.minReconnectInterval
nextReconnect = time.Now().Add(reconnectInterval)
for {
notification, ok := <-l.connNotificationChan
if !ok {
// lost connection, loop again
break
}
l.Notify <- notification
}
err := l.disconnectCleanup()
if l.closed() {
return
}
l.emitEvent(ListenerEventDisconnected, err)
time.Sleep(time.Until(nextReconnect))
}
}
func (l *Listener) listenerMain() {
l.listenerConnLoop()
close(l.Notify)
}

View File

@@ -1,6 +0,0 @@
// Package oid contains OID constants
// as defined by the Postgres server.
package oid
// Oid is a Postgres Object ID.
type Oid uint32

343
vendor/github.com/lib/pq/oid/types.go generated vendored
View File

@@ -1,343 +0,0 @@
// Code generated by gen.go. DO NOT EDIT.
package oid
const (
T_bool Oid = 16
T_bytea Oid = 17
T_char Oid = 18
T_name Oid = 19
T_int8 Oid = 20
T_int2 Oid = 21
T_int2vector Oid = 22
T_int4 Oid = 23
T_regproc Oid = 24
T_text Oid = 25
T_oid Oid = 26
T_tid Oid = 27
T_xid Oid = 28
T_cid Oid = 29
T_oidvector Oid = 30
T_pg_ddl_command Oid = 32
T_pg_type Oid = 71
T_pg_attribute Oid = 75
T_pg_proc Oid = 81
T_pg_class Oid = 83
T_json Oid = 114
T_xml Oid = 142
T__xml Oid = 143
T_pg_node_tree Oid = 194
T__json Oid = 199
T_smgr Oid = 210
T_index_am_handler Oid = 325
T_point Oid = 600
T_lseg Oid = 601
T_path Oid = 602
T_box Oid = 603
T_polygon Oid = 604
T_line Oid = 628
T__line Oid = 629
T_cidr Oid = 650
T__cidr Oid = 651
T_float4 Oid = 700
T_float8 Oid = 701
T_abstime Oid = 702
T_reltime Oid = 703
T_tinterval Oid = 704
T_unknown Oid = 705
T_circle Oid = 718
T__circle Oid = 719
T_money Oid = 790
T__money Oid = 791
T_macaddr Oid = 829
T_inet Oid = 869
T__bool Oid = 1000
T__bytea Oid = 1001
T__char Oid = 1002
T__name Oid = 1003
T__int2 Oid = 1005
T__int2vector Oid = 1006
T__int4 Oid = 1007
T__regproc Oid = 1008
T__text Oid = 1009
T__tid Oid = 1010
T__xid Oid = 1011
T__cid Oid = 1012
T__oidvector Oid = 1013
T__bpchar Oid = 1014
T__varchar Oid = 1015
T__int8 Oid = 1016
T__point Oid = 1017
T__lseg Oid = 1018
T__path Oid = 1019
T__box Oid = 1020
T__float4 Oid = 1021
T__float8 Oid = 1022
T__abstime Oid = 1023
T__reltime Oid = 1024
T__tinterval Oid = 1025
T__polygon Oid = 1027
T__oid Oid = 1028
T_aclitem Oid = 1033
T__aclitem Oid = 1034
T__macaddr Oid = 1040
T__inet Oid = 1041
T_bpchar Oid = 1042
T_varchar Oid = 1043
T_date Oid = 1082
T_time Oid = 1083
T_timestamp Oid = 1114
T__timestamp Oid = 1115
T__date Oid = 1182
T__time Oid = 1183
T_timestamptz Oid = 1184
T__timestamptz Oid = 1185
T_interval Oid = 1186
T__interval Oid = 1187
T__numeric Oid = 1231
T_pg_database Oid = 1248
T__cstring Oid = 1263
T_timetz Oid = 1266
T__timetz Oid = 1270
T_bit Oid = 1560
T__bit Oid = 1561
T_varbit Oid = 1562
T__varbit Oid = 1563
T_numeric Oid = 1700
T_refcursor Oid = 1790
T__refcursor Oid = 2201
T_regprocedure Oid = 2202
T_regoper Oid = 2203
T_regoperator Oid = 2204
T_regclass Oid = 2205
T_regtype Oid = 2206
T__regprocedure Oid = 2207
T__regoper Oid = 2208
T__regoperator Oid = 2209
T__regclass Oid = 2210
T__regtype Oid = 2211
T_record Oid = 2249
T_cstring Oid = 2275
T_any Oid = 2276
T_anyarray Oid = 2277
T_void Oid = 2278
T_trigger Oid = 2279
T_language_handler Oid = 2280
T_internal Oid = 2281
T_opaque Oid = 2282
T_anyelement Oid = 2283
T__record Oid = 2287
T_anynonarray Oid = 2776
T_pg_authid Oid = 2842
T_pg_auth_members Oid = 2843
T__txid_snapshot Oid = 2949
T_uuid Oid = 2950
T__uuid Oid = 2951
T_txid_snapshot Oid = 2970
T_fdw_handler Oid = 3115
T_pg_lsn Oid = 3220
T__pg_lsn Oid = 3221
T_tsm_handler Oid = 3310
T_anyenum Oid = 3500
T_tsvector Oid = 3614
T_tsquery Oid = 3615
T_gtsvector Oid = 3642
T__tsvector Oid = 3643
T__gtsvector Oid = 3644
T__tsquery Oid = 3645
T_regconfig Oid = 3734
T__regconfig Oid = 3735
T_regdictionary Oid = 3769
T__regdictionary Oid = 3770
T_jsonb Oid = 3802
T__jsonb Oid = 3807
T_anyrange Oid = 3831
T_event_trigger Oid = 3838
T_int4range Oid = 3904
T__int4range Oid = 3905
T_numrange Oid = 3906
T__numrange Oid = 3907
T_tsrange Oid = 3908
T__tsrange Oid = 3909
T_tstzrange Oid = 3910
T__tstzrange Oid = 3911
T_daterange Oid = 3912
T__daterange Oid = 3913
T_int8range Oid = 3926
T__int8range Oid = 3927
T_pg_shseclabel Oid = 4066
T_regnamespace Oid = 4089
T__regnamespace Oid = 4090
T_regrole Oid = 4096
T__regrole Oid = 4097
)
var TypeName = map[Oid]string{
T_bool: "BOOL",
T_bytea: "BYTEA",
T_char: "CHAR",
T_name: "NAME",
T_int8: "INT8",
T_int2: "INT2",
T_int2vector: "INT2VECTOR",
T_int4: "INT4",
T_regproc: "REGPROC",
T_text: "TEXT",
T_oid: "OID",
T_tid: "TID",
T_xid: "XID",
T_cid: "CID",
T_oidvector: "OIDVECTOR",
T_pg_ddl_command: "PG_DDL_COMMAND",
T_pg_type: "PG_TYPE",
T_pg_attribute: "PG_ATTRIBUTE",
T_pg_proc: "PG_PROC",
T_pg_class: "PG_CLASS",
T_json: "JSON",
T_xml: "XML",
T__xml: "_XML",
T_pg_node_tree: "PG_NODE_TREE",
T__json: "_JSON",
T_smgr: "SMGR",
T_index_am_handler: "INDEX_AM_HANDLER",
T_point: "POINT",
T_lseg: "LSEG",
T_path: "PATH",
T_box: "BOX",
T_polygon: "POLYGON",
T_line: "LINE",
T__line: "_LINE",
T_cidr: "CIDR",
T__cidr: "_CIDR",
T_float4: "FLOAT4",
T_float8: "FLOAT8",
T_abstime: "ABSTIME",
T_reltime: "RELTIME",
T_tinterval: "TINTERVAL",
T_unknown: "UNKNOWN",
T_circle: "CIRCLE",
T__circle: "_CIRCLE",
T_money: "MONEY",
T__money: "_MONEY",
T_macaddr: "MACADDR",
T_inet: "INET",
T__bool: "_BOOL",
T__bytea: "_BYTEA",
T__char: "_CHAR",
T__name: "_NAME",
T__int2: "_INT2",
T__int2vector: "_INT2VECTOR",
T__int4: "_INT4",
T__regproc: "_REGPROC",
T__text: "_TEXT",
T__tid: "_TID",
T__xid: "_XID",
T__cid: "_CID",
T__oidvector: "_OIDVECTOR",
T__bpchar: "_BPCHAR",
T__varchar: "_VARCHAR",
T__int8: "_INT8",
T__point: "_POINT",
T__lseg: "_LSEG",
T__path: "_PATH",
T__box: "_BOX",
T__float4: "_FLOAT4",
T__float8: "_FLOAT8",
T__abstime: "_ABSTIME",
T__reltime: "_RELTIME",
T__tinterval: "_TINTERVAL",
T__polygon: "_POLYGON",
T__oid: "_OID",
T_aclitem: "ACLITEM",
T__aclitem: "_ACLITEM",
T__macaddr: "_MACADDR",
T__inet: "_INET",
T_bpchar: "BPCHAR",
T_varchar: "VARCHAR",
T_date: "DATE",
T_time: "TIME",
T_timestamp: "TIMESTAMP",
T__timestamp: "_TIMESTAMP",
T__date: "_DATE",
T__time: "_TIME",
T_timestamptz: "TIMESTAMPTZ",
T__timestamptz: "_TIMESTAMPTZ",
T_interval: "INTERVAL",
T__interval: "_INTERVAL",
T__numeric: "_NUMERIC",
T_pg_database: "PG_DATABASE",
T__cstring: "_CSTRING",
T_timetz: "TIMETZ",
T__timetz: "_TIMETZ",
T_bit: "BIT",
T__bit: "_BIT",
T_varbit: "VARBIT",
T__varbit: "_VARBIT",
T_numeric: "NUMERIC",
T_refcursor: "REFCURSOR",
T__refcursor: "_REFCURSOR",
T_regprocedure: "REGPROCEDURE",
T_regoper: "REGOPER",
T_regoperator: "REGOPERATOR",
T_regclass: "REGCLASS",
T_regtype: "REGTYPE",
T__regprocedure: "_REGPROCEDURE",
T__regoper: "_REGOPER",
T__regoperator: "_REGOPERATOR",
T__regclass: "_REGCLASS",
T__regtype: "_REGTYPE",
T_record: "RECORD",
T_cstring: "CSTRING",
T_any: "ANY",
T_anyarray: "ANYARRAY",
T_void: "VOID",
T_trigger: "TRIGGER",
T_language_handler: "LANGUAGE_HANDLER",
T_internal: "INTERNAL",
T_opaque: "OPAQUE",
T_anyelement: "ANYELEMENT",
T__record: "_RECORD",
T_anynonarray: "ANYNONARRAY",
T_pg_authid: "PG_AUTHID",
T_pg_auth_members: "PG_AUTH_MEMBERS",
T__txid_snapshot: "_TXID_SNAPSHOT",
T_uuid: "UUID",
T__uuid: "_UUID",
T_txid_snapshot: "TXID_SNAPSHOT",
T_fdw_handler: "FDW_HANDLER",
T_pg_lsn: "PG_LSN",
T__pg_lsn: "_PG_LSN",
T_tsm_handler: "TSM_HANDLER",
T_anyenum: "ANYENUM",
T_tsvector: "TSVECTOR",
T_tsquery: "TSQUERY",
T_gtsvector: "GTSVECTOR",
T__tsvector: "_TSVECTOR",
T__gtsvector: "_GTSVECTOR",
T__tsquery: "_TSQUERY",
T_regconfig: "REGCONFIG",
T__regconfig: "_REGCONFIG",
T_regdictionary: "REGDICTIONARY",
T__regdictionary: "_REGDICTIONARY",
T_jsonb: "JSONB",
T__jsonb: "_JSONB",
T_anyrange: "ANYRANGE",
T_event_trigger: "EVENT_TRIGGER",
T_int4range: "INT4RANGE",
T__int4range: "_INT4RANGE",
T_numrange: "NUMRANGE",
T__numrange: "_NUMRANGE",
T_tsrange: "TSRANGE",
T__tsrange: "_TSRANGE",
T_tstzrange: "TSTZRANGE",
T__tstzrange: "_TSTZRANGE",
T_daterange: "DATERANGE",
T__daterange: "_DATERANGE",
T_int8range: "INT8RANGE",
T__int8range: "_INT8RANGE",
T_pg_shseclabel: "PG_SHSECLABEL",
T_regnamespace: "REGNAMESPACE",
T__regnamespace: "_REGNAMESPACE",
T_regrole: "REGROLE",
T__regrole: "_REGROLE",
}

93
vendor/github.com/lib/pq/rows.go generated vendored
View File

@@ -1,93 +0,0 @@
package pq
import (
"math"
"reflect"
"time"
"github.com/lib/pq/oid"
)
const headerSize = 4
type fieldDesc struct {
// The object ID of the data type.
OID oid.Oid
// The data type size (see pg_type.typlen).
// Note that negative values denote variable-width types.
Len int
// The type modifier (see pg_attribute.atttypmod).
// The meaning of the modifier is type-specific.
Mod int
}
func (fd fieldDesc) Type() reflect.Type {
switch fd.OID {
case oid.T_int8:
return reflect.TypeOf(int64(0))
case oid.T_int4:
return reflect.TypeOf(int32(0))
case oid.T_int2:
return reflect.TypeOf(int16(0))
case oid.T_varchar, oid.T_text:
return reflect.TypeOf("")
case oid.T_bool:
return reflect.TypeOf(false)
case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz:
return reflect.TypeOf(time.Time{})
case oid.T_bytea:
return reflect.TypeOf([]byte(nil))
default:
return reflect.TypeOf(new(interface{})).Elem()
}
}
func (fd fieldDesc) Name() string {
return oid.TypeName[fd.OID]
}
func (fd fieldDesc) Length() (length int64, ok bool) {
switch fd.OID {
case oid.T_text, oid.T_bytea:
return math.MaxInt64, true
case oid.T_varchar, oid.T_bpchar:
return int64(fd.Mod - headerSize), true
default:
return 0, false
}
}
func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) {
switch fd.OID {
case oid.T_numeric, oid.T__numeric:
mod := fd.Mod - headerSize
precision = int64((mod >> 16) & 0xffff)
scale = int64(mod & 0xffff)
return precision, scale, true
default:
return 0, 0, false
}
}
// ColumnTypeScanType returns the value type that can be used to scan types into.
func (rs *rows) ColumnTypeScanType(index int) reflect.Type {
return rs.colTyps[index].Type()
}
// ColumnTypeDatabaseTypeName return the database system type name.
func (rs *rows) ColumnTypeDatabaseTypeName(index int) string {
return rs.colTyps[index].Name()
}
// ColumnTypeLength returns the length of the column type if the column is a
// variable length type. If the column is not a variable length type ok
// should return false.
func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) {
return rs.colTyps[index].Length()
}
// ColumnTypePrecisionScale should return the precision and scale for decimal
// types. If not applicable, ok should be false.
func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
return rs.colTyps[index].PrecisionScale()
}

View File

@@ -1,264 +0,0 @@
// Copyright (c) 2014 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802.
//
// http://tools.ietf.org/html/rfc5802
//
package scram
import (
"bytes"
"crypto/hmac"
"crypto/rand"
"encoding/base64"
"fmt"
"hash"
"strconv"
"strings"
)
// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc).
//
// A Client may be used within a SASL conversation with logic resembling:
//
// var in []byte
// var client = scram.NewClient(sha1.New, user, pass)
// for client.Step(in) {
// out := client.Out()
// // send out to server
// in := serverOut
// }
// if client.Err() != nil {
// // auth failed
// }
//
type Client struct {
newHash func() hash.Hash
user string
pass string
step int
out bytes.Buffer
err error
clientNonce []byte
serverNonce []byte
saltedPass []byte
authMsg bytes.Buffer
}
// NewClient returns a new SCRAM-* client with the provided hash algorithm.
//
// For SCRAM-SHA-256, for example, use:
//
// client := scram.NewClient(sha256.New, user, pass)
//
func NewClient(newHash func() hash.Hash, user, pass string) *Client {
c := &Client{
newHash: newHash,
user: user,
pass: pass,
}
c.out.Grow(256)
c.authMsg.Grow(256)
return c
}
// Out returns the data to be sent to the server in the current step.
func (c *Client) Out() []byte {
if c.out.Len() == 0 {
return nil
}
return c.out.Bytes()
}
// Err returns the error that occurred, or nil if there were no errors.
func (c *Client) Err() error {
return c.err
}
// SetNonce sets the client nonce to the provided value.
// If not set, the nonce is generated automatically out of crypto/rand on the first step.
func (c *Client) SetNonce(nonce []byte) {
c.clientNonce = nonce
}
var escaper = strings.NewReplacer("=", "=3D", ",", "=2C")
// Step processes the incoming data from the server and makes the
// next round of data for the server available via Client.Out.
// Step returns false if there are no errors and more data is
// still expected.
func (c *Client) Step(in []byte) bool {
c.out.Reset()
if c.step > 2 || c.err != nil {
return false
}
c.step++
switch c.step {
case 1:
c.err = c.step1(in)
case 2:
c.err = c.step2(in)
case 3:
c.err = c.step3(in)
}
return c.step > 2 || c.err != nil
}
func (c *Client) step1(in []byte) error {
if len(c.clientNonce) == 0 {
const nonceLen = 16
buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen))
if _, err := rand.Read(buf[:nonceLen]); err != nil {
return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err)
}
c.clientNonce = buf[nonceLen:]
b64.Encode(c.clientNonce, buf[:nonceLen])
}
c.authMsg.WriteString("n=")
escaper.WriteString(&c.authMsg, c.user)
c.authMsg.WriteString(",r=")
c.authMsg.Write(c.clientNonce)
c.out.WriteString("n,,")
c.out.Write(c.authMsg.Bytes())
return nil
}
var b64 = base64.StdEncoding
func (c *Client) step2(in []byte) error {
c.authMsg.WriteByte(',')
c.authMsg.Write(in)
fields := bytes.Split(in, []byte(","))
if len(fields) != 3 {
return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in)
}
if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 {
return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0])
}
if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 {
return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1])
}
if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 {
return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2])
}
c.serverNonce = fields[0][2:]
if !bytes.HasPrefix(c.serverNonce, c.clientNonce) {
return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce)
}
salt := make([]byte, b64.DecodedLen(len(fields[1][2:])))
n, err := b64.Decode(salt, fields[1][2:])
if err != nil {
return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1])
}
salt = salt[:n]
iterCount, err := strconv.Atoi(string(fields[2][2:]))
if err != nil {
return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2])
}
c.saltPassword(salt, iterCount)
c.authMsg.WriteString(",c=biws,r=")
c.authMsg.Write(c.serverNonce)
c.out.WriteString("c=biws,r=")
c.out.Write(c.serverNonce)
c.out.WriteString(",p=")
c.out.Write(c.clientProof())
return nil
}
func (c *Client) step3(in []byte) error {
var isv, ise bool
var fields = bytes.Split(in, []byte(","))
if len(fields) == 1 {
isv = bytes.HasPrefix(fields[0], []byte("v="))
ise = bytes.HasPrefix(fields[0], []byte("e="))
}
if ise {
return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:])
} else if !isv {
return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in)
}
if !bytes.Equal(c.serverSignature(), fields[0][2:]) {
return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:])
}
return nil
}
func (c *Client) saltPassword(salt []byte, iterCount int) {
mac := hmac.New(c.newHash, []byte(c.pass))
mac.Write(salt)
mac.Write([]byte{0, 0, 0, 1})
ui := mac.Sum(nil)
hi := make([]byte, len(ui))
copy(hi, ui)
for i := 1; i < iterCount; i++ {
mac.Reset()
mac.Write(ui)
mac.Sum(ui[:0])
for j, b := range ui {
hi[j] ^= b
}
}
c.saltedPass = hi
}
func (c *Client) clientProof() []byte {
mac := hmac.New(c.newHash, c.saltedPass)
mac.Write([]byte("Client Key"))
clientKey := mac.Sum(nil)
hash := c.newHash()
hash.Write(clientKey)
storedKey := hash.Sum(nil)
mac = hmac.New(c.newHash, storedKey)
mac.Write(c.authMsg.Bytes())
clientProof := mac.Sum(nil)
for i, b := range clientKey {
clientProof[i] ^= b
}
clientProof64 := make([]byte, b64.EncodedLen(len(clientProof)))
b64.Encode(clientProof64, clientProof)
return clientProof64
}
func (c *Client) serverSignature() []byte {
mac := hmac.New(c.newHash, c.saltedPass)
mac.Write([]byte("Server Key"))
serverKey := mac.Sum(nil)
mac = hmac.New(c.newHash, serverKey)
mac.Write(c.authMsg.Bytes())
serverSignature := mac.Sum(nil)
encoded := make([]byte, b64.EncodedLen(len(serverSignature)))
b64.Encode(encoded, serverSignature)
return encoded
}

175
vendor/github.com/lib/pq/ssl.go generated vendored
View File

@@ -1,175 +0,0 @@
package pq
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"net"
"os"
"os/user"
"path/filepath"
)
// ssl generates a function to upgrade a net.Conn based on the "sslmode" and
// related settings. The function is nil when no upgrade should take place.
func ssl(o values) (func(net.Conn) (net.Conn, error), error) {
verifyCaOnly := false
tlsConf := tls.Config{}
switch mode := o["sslmode"]; mode {
// "require" is the default.
case "", "require":
// We must skip TLS's own verification since it requires full
// verification since Go 1.3.
tlsConf.InsecureSkipVerify = true
// From http://www.postgresql.org/docs/current/static/libpq-ssl.html:
//
// Note: For backwards compatibility with earlier versions of
// PostgreSQL, if a root CA file exists, the behavior of
// sslmode=require will be the same as that of verify-ca, meaning the
// server certificate is validated against the CA. Relying on this
// behavior is discouraged, and applications that need certificate
// validation should always use verify-ca or verify-full.
if sslrootcert, ok := o["sslrootcert"]; ok {
if _, err := os.Stat(sslrootcert); err == nil {
verifyCaOnly = true
} else {
delete(o, "sslrootcert")
}
}
case "verify-ca":
// We must skip TLS's own verification since it requires full
// verification since Go 1.3.
tlsConf.InsecureSkipVerify = true
verifyCaOnly = true
case "verify-full":
tlsConf.ServerName = o["host"]
case "disable":
return nil, nil
default:
return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode)
}
err := sslClientCertificates(&tlsConf, o)
if err != nil {
return nil, err
}
err = sslCertificateAuthority(&tlsConf, o)
if err != nil {
return nil, err
}
// Accept renegotiation requests initiated by the backend.
//
// Renegotiation was deprecated then removed from PostgreSQL 9.5, but
// the default configuration of older versions has it enabled. Redshift
// also initiates renegotiations and cannot be reconfigured.
tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient
return func(conn net.Conn) (net.Conn, error) {
client := tls.Client(conn, &tlsConf)
if verifyCaOnly {
err := sslVerifyCertificateAuthority(client, &tlsConf)
if err != nil {
return nil, err
}
}
return client, nil
}, nil
}
// sslClientCertificates adds the certificate specified in the "sslcert" and
// "sslkey" settings, or if they aren't set, from the .postgresql directory
// in the user's home directory. The configured files must exist and have
// the correct permissions.
func sslClientCertificates(tlsConf *tls.Config, o values) error {
// user.Current() might fail when cross-compiling. We have to ignore the
// error and continue without home directory defaults, since we wouldn't
// know from where to load them.
user, _ := user.Current()
// In libpq, the client certificate is only loaded if the setting is not blank.
//
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037
sslcert := o["sslcert"]
if len(sslcert) == 0 && user != nil {
sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
}
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045
if len(sslcert) == 0 {
return nil
}
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054
if _, err := os.Stat(sslcert); os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
// In libpq, the ssl key is only loaded if the setting is not blank.
//
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222
sslkey := o["sslkey"]
if len(sslkey) == 0 && user != nil {
sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
}
if len(sslkey) > 0 {
if err := sslKeyPermissions(sslkey); err != nil {
return err
}
}
cert, err := tls.LoadX509KeyPair(sslcert, sslkey)
if err != nil {
return err
}
tlsConf.Certificates = []tls.Certificate{cert}
return nil
}
// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting.
func sslCertificateAuthority(tlsConf *tls.Config, o values) error {
// In libpq, the root certificate is only loaded if the setting is not blank.
//
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951
if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 {
tlsConf.RootCAs = x509.NewCertPool()
cert, err := ioutil.ReadFile(sslrootcert)
if err != nil {
return err
}
if !tlsConf.RootCAs.AppendCertsFromPEM(cert) {
return fmterrorf("couldn't parse pem in sslrootcert")
}
}
return nil
}
// sslVerifyCertificateAuthority carries out a TLS handshake to the server and
// verifies the presented certificate against the CA, i.e. the one specified in
// sslrootcert or the system CA if sslrootcert was not specified.
func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error {
err := client.Handshake()
if err != nil {
return err
}
certs := client.ConnectionState().PeerCertificates
opts := x509.VerifyOptions{
DNSName: client.ConnectionState().ServerName,
Intermediates: x509.NewCertPool(),
Roots: tlsConf.RootCAs,
}
for i, cert := range certs {
if i == 0 {
continue
}
opts.Intermediates.AddCert(cert)
}
_, err = certs[0].Verify(opts)
return err
}

View File

@@ -1,20 +0,0 @@
// +build !windows
package pq
import "os"
// sslKeyPermissions checks the permissions on user-supplied ssl key files.
// The key file should have very little access.
//
// libpq does not check key file permissions on Windows.
func sslKeyPermissions(sslkey string) error {
info, err := os.Stat(sslkey)
if err != nil {
return err
}
if info.Mode().Perm()&0077 != 0 {
return ErrSSLKeyHasWorldPermissions
}
return nil
}

View File

@@ -1,9 +0,0 @@
// +build windows
package pq
// sslKeyPermissions checks the permissions on user-supplied ssl key files.
// The key file should have very little access.
//
// libpq does not check key file permissions on Windows.
func sslKeyPermissions(string) error { return nil }

76
vendor/github.com/lib/pq/url.go generated vendored
View File

@@ -1,76 +0,0 @@
package pq
import (
"fmt"
"net"
nurl "net/url"
"sort"
"strings"
)
// ParseURL no longer needs to be used by clients of this library since supplying a URL as a
// connection string to sql.Open() is now supported:
//
// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full")
//
// It remains exported here for backwards-compatibility.
//
// ParseURL converts a url to a connection string for driver.Open.
// Example:
//
// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full"
//
// converts to:
//
// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full"
//
// A minimal example:
//
// "postgres://"
//
// This will be blank, causing driver.Open to use all of the defaults
func ParseURL(url string) (string, error) {
u, err := nurl.Parse(url)
if err != nil {
return "", err
}
if u.Scheme != "postgres" && u.Scheme != "postgresql" {
return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme)
}
var kvs []string
escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`)
accrue := func(k, v string) {
if v != "" {
kvs = append(kvs, k+"="+escaper.Replace(v))
}
}
if u.User != nil {
v := u.User.Username()
accrue("user", v)
v, _ = u.User.Password()
accrue("password", v)
}
if host, port, err := net.SplitHostPort(u.Host); err != nil {
accrue("host", u.Host)
} else {
accrue("host", host)
accrue("port", port)
}
if u.Path != "" {
accrue("dbname", u.Path[1:])
}
q := u.Query()
for k := range q {
accrue(k, q.Get(k))
}
sort.Strings(kvs) // Makes testing easier (not a performance concern)
return strings.Join(kvs, " "), nil
}

View File

@@ -1,24 +0,0 @@
// Package pq is a pure Go Postgres driver for the database/sql package.
// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris rumprun
package pq
import (
"os"
"os/user"
)
func userCurrent() (string, error) {
u, err := user.Current()
if err == nil {
return u.Username, nil
}
name := os.Getenv("USER")
if name != "" {
return name, nil
}
return "", ErrCouldNotDetectUsername
}

View File

@@ -1,27 +0,0 @@
// Package pq is a pure Go Postgres driver for the database/sql package.
package pq
import (
"path/filepath"
"syscall"
)
// Perform Windows user name lookup identically to libpq.
//
// The PostgreSQL code makes use of the legacy Win32 function
// GetUserName, and that function has not been imported into stock Go.
// GetUserNameEx is available though, the difference being that a
// wider range of names are available. To get the output to be the
// same as GetUserName, only the base (or last) component of the
// result is returned.
func userCurrent() (string, error) {
pw_name := make([]uint16, 128)
pwname_size := uint32(len(pw_name)) - 1
err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size)
if err != nil {
return "", ErrCouldNotDetectUsername
}
s := syscall.UTF16ToString(pw_name)
u := filepath.Base(s)
return u, nil
}

23
vendor/github.com/lib/pq/uuid.go generated vendored
View File

@@ -1,23 +0,0 @@
package pq
import (
"encoding/hex"
"fmt"
)
// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format.
func decodeUUIDBinary(src []byte) ([]byte, error) {
if len(src) != 16 {
return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src))
}
dst := make([]byte, 36)
dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-'
hex.Encode(dst[0:], src[0:4])
hex.Encode(dst[9:], src[4:6])
hex.Encode(dst[14:], src[6:8])
hex.Encode(dst[19:], src[8:10])
hex.Encode(dst[24:], src[10:16])
return dst, nil
}

View File

@@ -1,8 +0,0 @@
language: go
go:
- 1.9
before_install:
- sudo apt-get update -qq
- sudo apt-get install -qq libunbound-dev
script:
- go test -race -v -bench=. ./...

View File

@@ -1,14 +0,0 @@
# Unbound
A wrapper for Unbound in Go.
Unbound's `ub_result` has been extended with an slice of dns.RRs, this alleviates
the need to parse `ub_result.data` yourself.
The website for Unbound is https://unbound.net/, where you can find further documentation.
Tested/compiled to work for versions: 1.4.22 and 1.6.0-3+deb9u1 (Debian Stretch).
Note: using cgo means the executables will use shared libraries (OpenSSL, ldns and libunbound).
The tutorials found here are the originals ones adapted to Go.

View File

@@ -1,87 +0,0 @@
package unbound
import (
"math/rand"
"sort"
"github.com/miekg/dns"
)
// AddTaRR calls AddTa, but allows to directly use an dns.RR.
// This method is not found in Unbound.
func (u *Unbound) AddTaRR(ta dns.RR) error { return u.AddTa(ta.String()) }
// DataAddRR calls DataAdd, but allows to directly use an dns.RR.
// This method is not found in Unbound.
func (u *Unbound) DataAddRR(data dns.RR) error { return u.DataAdd(data.String()) }
// DataRemoveRR calls DataRemove, but allows to directly use an dns.RR.
// This method is not found in Unbound.
func (u *Unbound) DataRemoveRR(data dns.RR) error { return u.DataRemove(data.String()) }
// Copied from the standard library
// byPriorityWeight sorts SRV records by ascending priority and weight.
type byPriorityWeight []*dns.SRV
func (s byPriorityWeight) Len() int { return len(s) }
func (s byPriorityWeight) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s byPriorityWeight) Less(i, j int) bool {
return s[i].Priority < s[j].Priority ||
(s[i].Priority == s[j].Priority && s[i].Weight < s[j].Weight)
}
// shuffleByWeight shuffles SRV records by weight using the algorithm
// described in RFC 2782.
func (addrs byPriorityWeight) shuffleByWeight() {
sum := 0
for _, addr := range addrs {
sum += int(addr.Weight)
}
for sum > 0 && len(addrs) > 1 {
s := 0
n := rand.Intn(sum + 1)
for i := range addrs {
s += int(addrs[i].Weight)
if s >= n {
if i > 0 {
t := addrs[i]
copy(addrs[1:i+1], addrs[0:i])
addrs[0] = t
}
break
}
}
sum -= int(addrs[0].Weight)
addrs = addrs[1:]
}
}
// sort reorders SRV records as specified in RFC 2782.
func (addrs byPriorityWeight) sort() {
sort.Sort(addrs)
i := 0
for j := 1; j < len(addrs); j++ {
if addrs[i].Priority != addrs[j].Priority {
addrs[i:j].shuffleByWeight()
i = j
}
}
addrs[i:].shuffleByWeight()
}
// byPref implements sort.Interface to sort MX records by preference
type byPref []*dns.MX
func (s byPref) Len() int { return len(s) }
func (s byPref) Less(i, j int) bool { return s[i].Preference < s[j].Preference }
func (s byPref) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// sort reorders MX records as specified in RFC 5321.
func (s byPref) sort() {
for i := range s {
j := rand.Intn(i + 1)
s[i], s[j] = s[j], s[i]
}
sort.Sort(s)
}

View File

@@ -1,164 +0,0 @@
package unbound
import (
"net"
"github.com/miekg/dns"
)
// These are function are a re-implementation of the net.Lookup* ones
// They are adapted to the package unbound and the package dns.
// LookupAddr performs a reverse lookup for the given address, returning a
// list of names mapping to that address.
func (u *Unbound) LookupAddr(addr string) (name []string, err error) {
reverse, err := dns.ReverseAddr(addr)
if err != nil {
return nil, err
}
r, err := u.Resolve(reverse, dns.TypePTR, dns.ClassINET)
if err != nil {
return nil, err
}
for _, rr := range r.Rr {
name = append(name, rr.(*dns.PTR).Ptr)
}
return
}
// LookupCNAME returns the canonical DNS host for the given name. Callers
// that do not care about the canonical name can call LookupHost or
// LookupIP directly; both take care of resolving the canonical name as
// part of the lookup.
func (u *Unbound) LookupCNAME(name string) (cname string, err error) {
r, err := u.Resolve(name, dns.TypeA, dns.ClassINET)
// TODO(mg): if nothing found try AAAA?
return r.CanonName, err
}
// LookupHost looks up the given host using Unbound. It returns
// an array of that host's addresses.
func (u *Unbound) LookupHost(host string) (addrs []string, err error) {
ipaddrs, err := u.LookupIP(host)
if err != nil {
return nil, err
}
for _, ip := range ipaddrs {
addrs = append(addrs, ip.String())
}
return addrs, nil
}
// LookupIP looks up host using Unbound. It returns an array of
// that host's IPv4 and IPv6 addresses.
// The A and AAAA lookups are performed in parallel.
func (u *Unbound) LookupIP(host string) (addrs []net.IP, err error) {
c := make(chan *ResultError)
u.ResolveAsync(host, dns.TypeA, dns.ClassINET, c)
u.ResolveAsync(host, dns.TypeAAAA, dns.ClassINET, c)
seen := 0
// TODO(miek): timeout?
Wait:
for {
select {
case r := <-c:
for _, rr := range r.Rr {
if x, ok := rr.(*dns.A); ok {
addrs = append(addrs, x.A)
}
if x, ok := rr.(*dns.AAAA); ok {
addrs = append(addrs, x.AAAA)
}
}
seen++
if seen == 2 {
break Wait
}
}
}
return
}
// LookupMX returns the DNS MX records for the given domain name sorted by
// preference.
func (u *Unbound) LookupMX(name string) (mx []*dns.MX, err error) {
r, err := u.Resolve(name, dns.TypeMX, dns.ClassINET)
if err != nil {
return nil, err
}
for _, rr := range r.Rr {
mx = append(mx, rr.(*dns.MX))
}
byPref(mx).sort()
return
}
// LookupNS returns the DNS NS records for the given domain name.
func (u *Unbound) LookupNS(name string) (ns []*dns.NS, err error) {
r, err := u.Resolve(name, dns.TypeNS, dns.ClassINET)
if err != nil {
return nil, err
}
for _, rr := range r.Rr {
ns = append(ns, rr.(*dns.NS))
}
return
}
// LookupSRV tries to resolve an SRV query of the given service, protocol,
// and domain name. The proto is "tcp" or "udp". The returned records are
// sorted by priority and randomized by weight within a priority.
//
// LookupSRV constructs the DNS name to look up following RFC 2782. That
// is, it looks up _service._proto.name. To accommodate services publishing
// SRV records under non-standard names, if both service and proto are
// empty strings, LookupSRV looks up name directly.
func (u *Unbound) LookupSRV(service, proto, name string) (cname string, srv []*dns.SRV, err error) {
r := new(Result)
if service == "" && proto == "" {
r, err = u.Resolve(name, dns.TypeSRV, dns.ClassINET)
} else {
r, err = u.Resolve("_"+service+"._"+proto+"."+name, dns.TypeSRV, dns.ClassINET)
}
if err != nil {
return "", nil, err
}
for _, rr := range r.Rr {
srv = append(srv, rr.(*dns.SRV))
}
byPriorityWeight(srv).sort()
return "", srv, err
}
// LookupTXT returns the DNS TXT records for the given domain name.
func (u *Unbound) LookupTXT(name string) (txt []string, err error) {
r, err := u.Resolve(name, dns.TypeTXT, dns.ClassINET)
if err != nil {
return nil, err
}
for _, rr := range r.Rr {
txt = append(txt, rr.(*dns.TXT).Txt...)
}
return
}
// LookupTLSA returns the DNS DANE records for the given domain service, protocol
// and domainname.
//
// LookupTLSA constructs the DNS name to look up following RFC 6698. That
// is, it looks up _port._proto.name.
func (u *Unbound) LookupTLSA(service, proto, name string) (tlsa []*dns.TLSA, err error) {
tlsaname, err := dns.TLSAName(name, service, proto)
if err != nil {
return nil, err
}
r, err := u.Resolve(tlsaname, dns.TypeTLSA, dns.ClassINET)
if err != nil {
return nil, err
}
for _, rr := range r.Rr {
tlsa = append(tlsa, rr.(*dns.TLSA))
}
return tlsa, nil
}

View File

@@ -1,386 +0,0 @@
// Package unbound implements a wrapper for libunbound(3).
// Unbound is a DNSSEC aware resolver, see https://unbound.net/
// for more information. It's up to the caller to configure
// Unbound with trust anchors. With these anchors a DNSSEC
// answer can be validated.
//
// The method's documentation can be found in libunbound(3).
// The names of the methods are in sync with the
// names used in unbound, but the underscores are removed and they
// are in camel-case, e.g. ub_ctx_resolv_conf becomes u.ResolvConf.
// Except for ub_ctx_create() and ub_ctx_delete(),
// which become: New() and Destroy() to be more in line with the standard
// Go practice.
//
// Basic use pattern:
// u := unbound.New()
// defer u.Destroy()
// u.ResolvConf("/etc/resolv.conf")
// u.AddTaFile("trustanchor")
// r, e := u.Resolve("miek.nl.", dns.TypeA, dns.ClassINET)
//
// The asynchronous functions are implemented using goroutines. This
// means the following functions are not useful in Go and therefor
// not implemented: ub_fd, ub_wait, ub_poll, ub_process and ub_cancel.
//
// Unbound's ub_result (named Result in the package) has been modified.
// An extra field has been added, 'Rr', which is a []dns.RR.
//
// The Lookup* functions of the net package are re-implemented in this package.
package unbound
/*
#cgo LDFLAGS: -lunbound
#include <stdlib.h>
#include <stdio.h>
#include <unbound.h>
#ifndef offsetof
#define offsetof(type, member) __builtin_offsetof (type, member)
#endif
int array_elem_int(int *l, int i) { return l[i]; }
char * array_elem_char(char **l, int i) { if (l == NULL) return NULL; return l[i]; }
char * new_char_pointer() { char *p = NULL; return p; }
struct ub_result *new_ub_result() {
struct ub_result *r;
r = calloc(sizeof(struct ub_result), 1);
return r;
}
int ub_ttl(struct ub_result *r) {
int *p;
// Go to why_bogus add the pointer and then we will find the ttl, hopefully.
p = (int*) ((char*)r + offsetof(struct ub_result, why_bogus) + sizeof(char*));
return (int)*p;
}
*/
import "C"
import (
"encoding/binary"
"os"
"strconv"
"strings"
"time"
"unsafe"
"github.com/miekg/dns"
)
type Unbound struct {
ctx *C.struct_ub_ctx
version [3]int
}
// Results is Unbound's ub_result adapted for Go.
type Result struct {
Qname string // Text string, original question
Qtype uint16 // Type code asked for
Qclass uint16 // Class code asked for
Data [][]byte // Slice of rdata items formed from the reply
Rr []dns.RR // The RR encoded from Data, Qclass, Qtype, Qname and Ttl (not in Unbound)
CanonName string // Canonical name of result
Rcode int // Additional error code in case of no data
AnswerPacket *dns.Msg // Full answer packet
HaveData bool // True if there is data
NxDomain bool // True if the name does not exist
Secure bool // True if the result is secure
Bogus bool // True if a security failure happened
WhyBogus string // String with error when bogus
Ttl uint32 // TTL for the result in seconds (0 for unbound versions < 1.4.20)
Rtt time.Duration // Time the query took (not in Unbound)
}
// UnboundError is an error returned from Unbound, it wraps both the
// return code and the error string as returned by ub_strerror.
type UnboundError struct {
Err string
code int
}
// ResultError encapsulates a *Result and an error. This is used to
// communicate with unbound over a channel.
type ResultError struct {
*Result
Error error
}
func (e *UnboundError) Error() string {
return e.Err
}
func newError(i int) error {
if i == 0 {
return nil
}
e := new(UnboundError)
e.Err = errorString(i)
e.code = i
return e
}
func errorString(i int) string {
return C.GoString(C.ub_strerror(C.int(i)))
}
// unbound version from 1.4.20 (inclusive) and above fill in the Tll in the result
// check if we have such a version
func (u *Unbound) haveTtlFeature() bool {
if u.version[0] < 1 {
return false
} else if u.version[0] == 1 && u.version[1] < 4 {
return false
} else if u.version[0] == 1 && u.version[1] == 4 && u.version[2] <= 20 {
return false
} else {
return true
}
}
// New wraps Unbound's ub_ctx_create.
func New() *Unbound {
u := new(Unbound)
u.ctx = C.ub_ctx_create()
u.version = u.Version()
return u
}
// Destroy wraps Unbound's ub_ctx_delete.
func (u *Unbound) Destroy() {
C.ub_ctx_delete(u.ctx)
}
// ResolvConf wraps Unbound's ub_ctx_resolvconf.
func (u *Unbound) ResolvConf(fname string) error {
cfname := C.CString(fname)
defer C.free(unsafe.Pointer(cfname))
i := C.ub_ctx_resolvconf(u.ctx, cfname)
return newError(int(i))
}
// SetOption wraps Unbound's ub_ctx_set_option.
func (u *Unbound) SetOption(opt, val string) error {
copt := C.CString(opt)
defer C.free(unsafe.Pointer(copt))
cval := C.CString(val)
defer C.free(unsafe.Pointer(cval))
i := C.ub_ctx_set_option(u.ctx, copt, cval)
return newError(int(i))
}
// GetOption wraps Unbound's ub_ctx_get_option.
func (u *Unbound) GetOption(opt string) (string, error) {
copt := C.CString(opt)
defer C.free(unsafe.Pointer(copt))
cval := C.new_char_pointer()
defer C.free(unsafe.Pointer(cval))
i := C.ub_ctx_get_option(u.ctx, C.CString(opt), &cval)
return C.GoString(cval), newError(int(i))
}
// Config wraps Unbound's ub_ctx_config.
func (u *Unbound) Config(fname string) error {
cfname := C.CString(fname)
defer C.free(unsafe.Pointer(cfname))
i := C.ub_ctx_config(u.ctx, cfname)
return newError(int(i))
}
// SetFwd wraps Unbound's ub_ctx_set_fwd.
func (u *Unbound) SetFwd(addr string) error {
caddr := C.CString(addr)
defer C.free(unsafe.Pointer(caddr))
i := C.ub_ctx_set_fwd(u.ctx, caddr)
return newError(int(i))
}
// Hosts wraps Unbound's ub_ctx_hosts.
func (u *Unbound) Hosts(fname string) error {
cfname := C.CString(fname)
defer C.free(unsafe.Pointer(cfname))
i := C.ub_ctx_hosts(u.ctx, cfname)
return newError(int(i))
}
// Resolve wraps Unbound's ub_resolve.
func (u *Unbound) Resolve(name string, rrtype, rrclass uint16) (*Result, error) {
name = dns.Fqdn(name)
cname := C.CString(name)
defer C.free(unsafe.Pointer(cname))
res := C.new_ub_result()
r := new(Result)
// Normally, we would call 'defer C.ub_resolve_free(res)' here, but
// that does not work (in Go 1.6.1), see
// https://github.com/miekg/unbound/issues/8
// This is likely related to https://github.com/golang/go/issues/15921
t := time.Now()
i := C.ub_resolve(u.ctx, cname, C.int(rrtype), C.int(rrclass), &res)
r.Rtt = time.Since(t)
err := newError(int(i))
if err != nil {
C.ub_resolve_free(res)
return nil, err
}
r.Qname = C.GoString(res.qname)
r.Qtype = uint16(res.qtype)
r.Qclass = uint16(res.qclass)
r.CanonName = C.GoString(res.canonname)
r.Rcode = int(res.rcode)
r.AnswerPacket = new(dns.Msg)
r.AnswerPacket.Unpack(C.GoBytes(res.answer_packet, res.answer_len)) // Should always work
r.HaveData = res.havedata == 1
r.NxDomain = res.nxdomain == 1
r.Secure = res.secure == 1
r.Bogus = res.bogus == 1
r.WhyBogus = C.GoString(res.why_bogus)
if u.haveTtlFeature() {
r.Ttl = uint32(C.ub_ttl(res))
}
// Re-create the RRs
var h dns.RR_Header
h.Name = r.Qname
h.Rrtype = r.Qtype
h.Class = r.Qclass
h.Ttl = r.Ttl
j := 0
if r.HaveData {
r.Data = make([][]byte, 0)
r.Rr = make([]dns.RR, 0)
b := C.GoBytes(unsafe.Pointer(C.array_elem_char(res.data, C.int(j))), C.array_elem_int(res.len, C.int(j)))
// Create the RR; write out the header details and
// the rdata to a buffer, and unpack it again into an
// actual RR, for ever rr found by resolve
hdrBuf := make([]byte, len(h.Name)+11)
off, _ := dns.PackDomainName(h.Name, hdrBuf, 0, nil, false)
binary.BigEndian.PutUint16(hdrBuf[off:], h.Rrtype)
off += 2
binary.BigEndian.PutUint16(hdrBuf[off:], h.Class)
off += 2
binary.BigEndian.PutUint32(hdrBuf[off:], h.Ttl)
off += 4
for len(b) != 0 {
h.Rdlength = uint16(len(b))
// Note: we are rewriting the rdata len so we do not
// increase off anymore.
binary.BigEndian.PutUint16(hdrBuf[off:], h.Rdlength)
rrBuf := append(hdrBuf, b...)
rr, _, err := dns.UnpackRR(rrBuf, 0)
if err == nil {
r.Rr = append(r.Rr, rr)
}
r.Data = append(r.Data, b)
j++
b = C.GoBytes(unsafe.Pointer(C.array_elem_char(res.data, C.int(j))), C.array_elem_int(res.len, C.int(j)))
}
}
C.ub_resolve_free(res)
return r, err
}
// ResolveAsync does *not* wrap the Unbound function, instead
// it utilizes Go's goroutines and channels to implement the asynchronous behavior Unbound
// implements. As a result the function signature is different.
// The result (or an error) is returned on the channel c.
// Also the ub_cancel, ub_wait_, ub_fd, ub_process are not implemented.
func (u *Unbound) ResolveAsync(name string, rrtype, rrclass uint16, c chan *ResultError) {
go func() {
r, e := u.Resolve(name, rrtype, rrclass)
c <- &ResultError{r, e}
}()
return
}
// AddTa wraps Unbound's ub_ctx_add_ta.
func (u *Unbound) AddTa(ta string) error {
cta := C.CString(ta)
i := C.ub_ctx_add_ta(u.ctx, cta)
return newError(int(i))
}
// AddTaFile wraps Unbound's ub_ctx_add_ta_file.
func (u *Unbound) AddTaFile(fname string) error {
cfname := C.CString(fname)
defer C.free(unsafe.Pointer(cfname))
i := C.ub_ctx_add_ta_file(u.ctx, cfname)
return newError(int(i))
}
// TrustedKeys wraps Unbound's ub_ctx_trustedkeys.
func (u *Unbound) TrustedKeys(fname string) error {
cfname := C.CString(fname)
defer C.free(unsafe.Pointer(cfname))
i := C.ub_ctx_trustedkeys(u.ctx, cfname)
return newError(int(i))
}
// ZoneAdd wraps Unbound's ub_ctx_zone_add.
func (u *Unbound) ZoneAdd(zone_name, zone_type string) error {
czone_name := C.CString(zone_name)
defer C.free(unsafe.Pointer(czone_name))
czone_type := C.CString(zone_type)
defer C.free(unsafe.Pointer(czone_type))
i := C.ub_ctx_zone_add(u.ctx, czone_name, czone_type)
return newError(int(i))
}
// ZoneRemove wraps Unbound's ub_ctx_zone_remove.
func (u *Unbound) ZoneRemove(zone_name string) error {
czone_name := C.CString(zone_name)
defer C.free(unsafe.Pointer(czone_name))
i := C.ub_ctx_zone_remove(u.ctx, czone_name)
return newError(int(i))
}
// DataAdd wraps Unbound's ub_ctx_data_add.
func (u *Unbound) DataAdd(data string) error {
cdata := C.CString(data)
defer C.free(unsafe.Pointer(cdata))
i := C.ub_ctx_data_add(u.ctx, cdata)
return newError(int(i))
}
// DataRemove wraps Unbound's ub_ctx_data_remove.
func (u *Unbound) DataRemove(data string) error {
cdata := C.CString(data)
defer C.free(unsafe.Pointer(cdata))
i := C.ub_ctx_data_remove(u.ctx, cdata)
return newError(int(i))
}
// DebugOut wraps Unbound's ub_ctx_debugout.
func (u *Unbound) DebugOut(out *os.File) error {
cmode := C.CString("a+")
defer C.free(unsafe.Pointer(cmode))
file := C.fdopen(C.int(out.Fd()), cmode)
i := C.ub_ctx_debugout(u.ctx, unsafe.Pointer(file))
return newError(int(i))
}
// DebugLevel wraps Unbound's ub_ctx_data_level.
func (u *Unbound) DebugLevel(d int) error {
i := C.ub_ctx_debuglevel(u.ctx, C.int(d))
return newError(int(i))
}
// Version wrap Ubounds's ub_version. Return the version of the Unbound
// library in as integers [major, minor, patch]
func (u *Unbound) Version() (version [3]int) {
// split the string on the dots
v := strings.SplitN(C.GoString(C.ub_version()), ".", 3)
if len(v) != 3 {
return
}
version[0], _ = strconv.Atoi(v[0])
version[1], _ = strconv.Atoi(v[1])
version[2], _ = strconv.Atoi(v[2])
return
}

View File

@@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2016-2020 Simone Carletti
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,39 +0,0 @@
// Package publicsuffix is a drop-in replacement for the golang.org/x/net/publicsuffix
// based on the weppos/publicsuffix package.
package publicsuffix
import (
psl "github.com/weppos/publicsuffix-go/publicsuffix"
)
// PublicSuffix returns the public suffix of the domain
// using a copy of the publicsuffix.org database packaged into this library.
//
// Note. To maintain compatibility with the golang.org/x/net/publicsuffix
// this method doesn't return an error. However, in case of error,
// the returned value is empty.
func PublicSuffix(domain string) (publicSuffix string, icann bool) {
//d, err := psl.Parse(domain)
//if err != nil {
// return "", false
//}
//
//return d.Rule.Value, !d.Rule.Private
rule := psl.DefaultList.Find(domain, nil)
publicSuffix = rule.Decompose(domain)[1]
icann = !rule.Private
// x/net/publicsuffix sets icann to false when the default rule "*" is used
if rule.Value == "" && rule.Type == psl.WildcardType {
icann = false
}
return
}
// EffectiveTLDPlusOne returns the effective top level domain plus one more label.
// For example, the eTLD+1 for "foo.bar.golang.org" is "golang.org".
func EffectiveTLDPlusOne(domain string) (string, error) {
return psl.Domain(domain)
}

View File

@@ -1,544 +0,0 @@
//go:generate go run ../cmd/gen/gen.go
// Package publicsuffix provides a domain name parser
// based on data from the public suffix list http://publicsuffix.org/.
// A public suffix is one under which Internet users can directly register names.
package publicsuffix
import (
"bufio"
"fmt"
"io"
"net/http/cookiejar"
"os"
"strings"
"golang.org/x/net/idna"
)
const (
// Version identifies the current library version.
// This is a pro forma convention given that Go dependencies
// tends to be fetched directly from the repo.
Version = "0.13.0"
// NormalType represents a normal rule such as "com"
NormalType = 1
// WildcardType represents a wildcard rule such as "*.com"
WildcardType = 2
// ExceptionType represents an exception to a wildard rule
ExceptionType = 3
listTokenPrivateDomains = "===BEGIN PRIVATE DOMAINS==="
listTokenComment = "//"
)
// DefaultList is the default List and it is used by Parse and Domain.
var DefaultList = NewList()
// DefaultRule is the default Rule that represents "*".
var DefaultRule = MustNewRule("*")
// DefaultParserOptions are the default options used to parse a Public Suffix list.
var DefaultParserOptions = &ParserOption{PrivateDomains: true, ASCIIEncoded: false}
// DefaultFindOptions are the default options used to perform the lookup of rules in the list.
var DefaultFindOptions = &FindOptions{IgnorePrivate: false, DefaultRule: DefaultRule}
// Rule represents a single rule in a Public Suffix List.
type Rule struct {
Type int
Value string
Length int
Private bool
}
// ParserOption are the options you can use to customize the way a List
// is parsed from a file or a string.
type ParserOption struct {
// Set to false to skip the private domains when parsing.
// Default to true, which means the private domains are included.
PrivateDomains bool
// Set to false if the input is encoded in U-labels (Unicode)
// as opposite to A-labels.
// Default to false, which means the list is containing Unicode domains.
// This is the default because the original PSL currently contains Unicode.
ASCIIEncoded bool
}
// FindOptions are the options you can use to customize the way a Rule
// is searched within the list.
type FindOptions struct {
// Set to true to ignore the rules within the "Private" section of the Public Suffix List.
IgnorePrivate bool
// The default rule to use when no rule matches the input.
// The format Public Suffix algorithm states that the rule "*" should be used when no other rule matches,
// but some consumers may have different needs.
DefaultRule *Rule
}
// List represents a Public Suffix List.
type List struct {
// rules is kept private because you should not access rules directly
rules map[string]*Rule
}
// NewList creates a new empty list.
func NewList() *List {
return &List{
rules: map[string]*Rule{},
}
}
// NewListFromString parses a string that represents a Public Suffix source
// and returns a List initialized with the rules in the source.
func NewListFromString(src string, options *ParserOption) (*List, error) {
l := NewList()
_, err := l.LoadString(src, options)
return l, err
}
// NewListFromFile parses a string that represents a Public Suffix source
// and returns a List initialized with the rules in the source.
func NewListFromFile(path string, options *ParserOption) (*List, error) {
l := NewList()
_, err := l.LoadFile(path, options)
return l, err
}
// Load parses and loads a set of rules from an io.Reader into the current list.
func (l *List) Load(r io.Reader, options *ParserOption) ([]Rule, error) {
return l.parse(r, options)
}
// LoadString parses and loads a set of rules from a String into the current list.
func (l *List) LoadString(src string, options *ParserOption) ([]Rule, error) {
r := strings.NewReader(src)
return l.parse(r, options)
}
// LoadFile parses and loads a set of rules from a File into the current list.
func (l *List) LoadFile(path string, options *ParserOption) ([]Rule, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return l.parse(f, options)
}
// AddRule adds a new rule to the list.
//
// The exact position of the rule into the list is unpredictable.
// The list may be optimized internally for lookups, therefore the algorithm
// will decide the best position for the new rule.
func (l *List) AddRule(r *Rule) error {
l.rules[r.Value] = r
return nil
}
// Size returns the size of the list, which is the number of rules.
func (l *List) Size() int {
return len(l.rules)
}
func (l *List) parse(r io.Reader, options *ParserOption) ([]Rule, error) {
if options == nil {
options = DefaultParserOptions
}
var rules []Rule
scanner := bufio.NewScanner(r)
var section int // 1 == ICANN, 2 == PRIVATE
Scanning:
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
switch {
// skip blank lines
case line == "":
break
// include private domains or stop scanner
case strings.Contains(line, listTokenPrivateDomains):
if !options.PrivateDomains {
break Scanning
}
section = 2
// skip comments
case strings.HasPrefix(line, listTokenComment):
break
default:
var rule *Rule
var err error
if options.ASCIIEncoded {
rule, err = NewRule(line)
} else {
rule, err = NewRuleUnicode(line)
}
if err != nil {
return []Rule{}, err
}
rule.Private = (section == 2)
l.AddRule(rule)
rules = append(rules, *rule)
}
}
return rules, scanner.Err()
}
// Find and returns the most appropriate rule for the domain name.
func (l *List) Find(name string, options *FindOptions) *Rule {
if options == nil {
options = DefaultFindOptions
}
part := name
for {
rule, ok := l.rules[part]
if ok && rule.Match(name) && !(options.IgnorePrivate && rule.Private) {
return rule
}
i := strings.IndexRune(part, '.')
if i < 0 {
return options.DefaultRule
}
part = part[i+1:]
}
}
// NewRule parses the rule content, creates and returns a Rule.
//
// The content of the rule MUST be encoded in ASCII (A-labels).
func NewRule(content string) (*Rule, error) {
var rule *Rule
var value string
switch content[0:1] {
case "*": // wildcard
if content == "*" {
value = ""
} else {
value = content[2:]
}
rule = &Rule{Type: WildcardType, Value: value, Length: len(Labels(value)) + 1}
case "!": // exception
value = content[1:]
rule = &Rule{Type: ExceptionType, Value: value, Length: len(Labels(value))}
default: // normal
value = content
rule = &Rule{Type: NormalType, Value: value, Length: len(Labels(value))}
}
return rule, nil
}
// NewRuleUnicode is like NewRule, but expects the content to be encoded in Unicode (U-labels).
func NewRuleUnicode(content string) (*Rule, error) {
var err error
content, err = ToASCII(content)
if err != nil {
return nil, err
}
return NewRule(content)
}
// MustNewRule is like NewRule, but panics if the content cannot be parsed.
func MustNewRule(content string) *Rule {
rule, err := NewRule(content)
if err != nil {
panic(err)
}
return rule
}
// Match checks if the rule matches the name.
//
// A domain name is said to match a rule if and only if all of the following conditions are met:
// - When the domain and rule are split into corresponding labels,
// that the domain contains as many or more labels than the rule.
// - Beginning with the right-most labels of both the domain and the rule,
// and continuing for all labels in the rule, one finds that for every pair,
// either they are identical, or that the label from the rule is "*".
//
// See https://publicsuffix.org/list/
func (r *Rule) Match(name string) bool {
left := strings.TrimSuffix(name, r.Value)
// the name contains as many labels than the rule
// this is a match, unless it's a wildcard
// because the wildcard requires one more label
if left == "" {
return r.Type != WildcardType
}
// if there is one more label, the rule match
// because either the rule is shorter than the domain
// or the rule is a wildcard and there is one more label
return left[len(left)-1:] == "."
}
// Decompose takes a name as input and decomposes it into a tuple of <TRD+SLD, TLD>,
// according to the rule definition and type.
func (r *Rule) Decompose(name string) (result [2]string) {
if r == DefaultRule {
i := strings.LastIndex(name, ".")
if i < 0 {
return
}
result[0], result[1] = name[:i], name[i+1:]
return
}
switch r.Type {
case NormalType:
name = strings.TrimSuffix(name, r.Value)
if len(name) == 0 {
return
}
result[0], result[1] = name[:len(name)-1], r.Value
case WildcardType:
name := strings.TrimSuffix(name, r.Value)
if len(name) == 0 {
return
}
name = name[:len(name)-1]
i := strings.LastIndex(name, ".")
if i < 0 {
return
}
result[0], result[1] = name[:i], name[i+1:]+"."+r.Value
case ExceptionType:
i := strings.IndexRune(r.Value, '.')
if i < 0 {
return
}
suffix := r.Value[i+1:]
name = strings.TrimSuffix(name, suffix)
if len(name) == 0 {
return
}
result[0], result[1] = name[:len(name)-1], suffix
}
return
}
// Labels decomposes given domain name into labels,
// corresponding to the dot-separated tokens.
func Labels(name string) []string {
return strings.Split(name, ".")
}
// DomainName represents a domain name.
type DomainName struct {
TLD string
SLD string
TRD string
Rule *Rule
}
// String joins the components of the domain name into a single string.
// Empty labels are skipped.
//
// Examples:
//
// DomainName{"com", "example"}.String()
// // example.com
// DomainName{"com", "example", "www"}.String()
// // www.example.com
//
func (d *DomainName) String() string {
switch {
case d.TLD == "":
return ""
case d.SLD == "":
return d.TLD
case d.TRD == "":
return d.SLD + "." + d.TLD
default:
return d.TRD + "." + d.SLD + "." + d.TLD
}
}
// Domain extract and return the domain name from the input
// using the default (Public Suffix) List.
//
// Examples:
//
// publicsuffix.Domain("example.com")
// // example.com
// publicsuffix.Domain("www.example.com")
// // example.com
// publicsuffix.Domain("www.example.co.uk")
// // example.co.uk
//
func Domain(name string) (string, error) {
return DomainFromListWithOptions(DefaultList, name, DefaultFindOptions)
}
// Parse decomposes the name into TLD, SLD, TRD
// using the default (Public Suffix) List,
// and returns the result as a DomainName
//
// Examples:
//
// list := NewList()
//
// publicsuffix.Parse("example.com")
// // &DomainName{"com", "example"}
// publicsuffix.Parse("www.example.com")
// // &DomainName{"com", "example", "www"}
// publicsuffix.Parse("www.example.co.uk")
// // &DomainName{"co.uk", "example"}
//
func Parse(name string) (*DomainName, error) {
return ParseFromListWithOptions(DefaultList, name, DefaultFindOptions)
}
// DomainFromListWithOptions extract and return the domain name from the input
// using the (Public Suffix) list passed as argument.
//
// Examples:
//
// list := NewList()
//
// publicsuffix.DomainFromListWithOptions(list, "example.com")
// // example.com
// publicsuffix.DomainFromListWithOptions(list, "www.example.com")
// // example.com
// publicsuffix.DomainFromListWithOptions(list, "www.example.co.uk")
// // example.co.uk
//
func DomainFromListWithOptions(l *List, name string, options *FindOptions) (string, error) {
dn, err := ParseFromListWithOptions(l, name, options)
if err != nil {
return "", err
}
return dn.SLD + "." + dn.TLD, nil
}
// ParseFromListWithOptions decomposes the name into TLD, SLD, TRD
// using the (Public Suffix) list passed as argument,
// and returns the result as a DomainName
//
// Examples:
//
// list := NewList()
//
// publicsuffix.ParseFromListWithOptions(list, "example.com")
// // &DomainName{"com", "example"}
// publicsuffix.ParseFromListWithOptions(list, "www.example.com")
// // &DomainName{"com", "example", "www"}
// publicsuffix.ParseFromListWithOptions(list, "www.example.co.uk")
// // &DomainName{"co.uk", "example"}
//
func ParseFromListWithOptions(l *List, name string, options *FindOptions) (*DomainName, error) {
n, err := normalize(name)
if err != nil {
return nil, err
}
r := l.Find(n, options)
if r == nil {
return nil, fmt.Errorf("no rule matching name %s", name)
}
parts := r.Decompose(n)
left, tld := parts[0], parts[1]
if tld == "" {
return nil, fmt.Errorf("%s is a suffix", n)
}
dn := &DomainName{
Rule: r,
TLD: tld,
}
if i := strings.LastIndex(left, "."); i < 0 {
dn.SLD = left
} else {
dn.TRD = left[:i]
dn.SLD = left[i+1:]
}
return dn, nil
}
func normalize(name string) (string, error) {
ret := strings.ToLower(name)
if ret == "" {
return "", fmt.Errorf("name is blank")
}
if ret[0] == '.' {
return "", fmt.Errorf("name %s starts with a dot", ret)
}
return ret, nil
}
// ToASCII is a wrapper for idna.ToASCII.
//
// This wrapper exists because idna.ToASCII backward-compatibility was broken twice in few months
// and I can't call this package directly anymore. The wrapper performs some terrible-but-necessary
// before-after replacements to make sure an already ASCII input always results in the same output
// even if passed through ToASCII.
//
// See golang/net@67957fd0b1, golang/net@f2499483f9, golang/net@78ebe5c8b6,
// and weppos/publicsuffix-go#66.
func ToASCII(s string) (string, error) {
// .example.com should be .example.com
// ..example.com should be ..example.com
if strings.HasPrefix(s, ".") {
dotIndex := 0
for i := 0; i < len(s); i++ {
if s[i] == '.' {
dotIndex = i
} else {
break
}
}
out, err := idna.ToASCII(s[dotIndex+1:])
out = s[:dotIndex+1] + out
return out, err
}
return idna.ToASCII(s)
}
// ToUnicode is a wrapper for idna.ToUnicode.
//
// See ToASCII for more details about why this wrapper exists.
func ToUnicode(s string) (string, error) {
return idna.ToUnicode(s)
}
// CookieJarList implements the cookiejar.PublicSuffixList interface.
var CookieJarList cookiejar.PublicSuffixList = cookiejarList{DefaultList}
type cookiejarList struct {
List *List
}
// PublicSuffix implements cookiejar.PublicSuffixList.
func (l cookiejarList) PublicSuffix(domain string) string {
rule := l.List.Find(domain, nil)
return rule.Decompose(domain)[1]
}
// PublicSuffix implements cookiejar.String.
func (cookiejarList) String() string {
return defaultListVersion
}

File diff suppressed because it is too large Load Diff

18
vendor/modules.txt vendored
View File

@@ -85,9 +85,6 @@ github.com/datarhei/joy4/utils/bits/pio
# github.com/davecgh/go-spew v1.1.1
## explicit
github.com/davecgh/go-spew/spew
# github.com/eggsampler/acme/v3 v3.1.1
## explicit; go 1.11
github.com/eggsampler/acme/v3
# github.com/go-ole/go-ole v1.2.6
## explicit; go 1.12
github.com/go-ole/go-ole
@@ -174,14 +171,6 @@ github.com/labstack/gommon/random
# github.com/leodido/go-urn v1.2.1
## explicit; go 1.13
github.com/leodido/go-urn
# github.com/letsdebug/letsdebug v1.6.1
## explicit; go 1.15
github.com/letsdebug/letsdebug
# github.com/lib/pq v1.8.0
## explicit; go 1.13
github.com/lib/pq
github.com/lib/pq/oid
github.com/lib/pq/scram
# github.com/libdns/libdns v0.2.1
## explicit; go 1.14
github.com/libdns/libdns
@@ -212,9 +201,6 @@ github.com/mholt/acmez/acme
# github.com/miekg/dns v1.1.50
## explicit; go 1.14
github.com/miekg/dns
# github.com/miekg/unbound v0.0.0-20180419064740-e2b53b2dbcba
## explicit
github.com/miekg/unbound
# github.com/mitchellh/mapstructure v1.5.0
## explicit; go 1.14
github.com/mitchellh/mapstructure
@@ -293,10 +279,6 @@ github.com/vektah/gqlparser/v2/lexer
github.com/vektah/gqlparser/v2/parser
github.com/vektah/gqlparser/v2/validator
github.com/vektah/gqlparser/v2/validator/rules
# github.com/weppos/publicsuffix-go v0.13.0
## explicit
github.com/weppos/publicsuffix-go/net/publicsuffix
github.com/weppos/publicsuffix-go/publicsuffix
# github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb
## explicit
github.com/xeipuuv/gojsonpointer