Replace x/crypto/acme/autocert with caddyserver/certmagic

This commit is contained in:
Ingo Oppermann
2022-09-23 10:05:48 +02:00
parent 1ebf1f7f29
commit bc7faf9364
452 changed files with 59853 additions and 12 deletions

View File

@@ -36,7 +36,7 @@ import (
"github.com/datarhei/core/v16/srt"
"github.com/datarhei/core/v16/update"
"golang.org/x/crypto/acme/autocert"
"github.com/caddyserver/certmagic"
)
// The API interface is the implementation for the restreamer API.
@@ -642,23 +642,51 @@ func (a *api) start() error {
a.cache = diskCache
}
var autocertManager *autocert.Manager
var autocertManager *certmagic.Config
if cfg.TLS.Enable && cfg.TLS.Auto {
if len(cfg.Host.Name) == 0 {
return fmt.Errorf("at least one host must be provided in host.name or RS_HOST_NAME")
}
autocertManager = &autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist(cfg.Host.Name...),
Cache: autocert.DirCache(cfg.DB.Dir + "/cert"),
certmagic.DefaultACME.Agreed = true
certmagic.DefaultACME.Email = ""
certmagic.DefaultACME.CA = certmagic.LetsEncryptStagingCA
certmagic.DefaultACME.DisableHTTPChallenge = false
certmagic.DefaultACME.DisableTLSALPNChallenge = true
certmagic.DefaultACME.Logger = nil
certmagic.Default.Storage = &certmagic.FileStorage{
Path: cfg.DB.Dir + "/cert",
}
certmagic.Default.DefaultServerName = cfg.Host.Name[0]
certmagic.Default.Logger = nil
certmagic.Default.OnEvent = func(event string, data interface{}) {
message := ""
switch data := data.(type) {
case string:
message = data
case fmt.Stringer:
message = data.String()
}
if len(message) != 0 {
a.log.logger.core.WithComponent("certmagic").Info().WithField("event", event).Log(message)
}
}
magic := certmagic.NewDefault()
acme := certmagic.NewACMEIssuer(magic, certmagic.DefaultACME)
magic.Issuers = []certmagic.Issuer{acme}
autocertManager = magic
// Start temporary http server on configured port
tempserver := &gohttp.Server{
Addr: cfg.Address,
Handler: autocertManager.HTTPHandler(gohttp.HandlerFunc(func(w gohttp.ResponseWriter, r *gohttp.Request) {
Handler: acme.HTTPChallengeHandler(gohttp.HandlerFunc(func(w gohttp.ResponseWriter, r *gohttp.Request) {
w.WriteHeader(gohttp.StatusNotFound)
})),
ReadTimeout: 10 * time.Second,
@@ -681,9 +709,12 @@ func (a *api) start() error {
logger := a.log.logger.core.WithComponent("Let's Encrypt").WithField("host", host)
logger.Info().Log("Acquiring certificate ...")
_, err := autocertManager.GetCertificate(&tls.ClientHelloInfo{
ServerName: host,
})
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Minute))
err := autocertManager.ManageSync(ctx, []string{host})
cancel()
if err != nil {
logger.Error().WithField("error", err).Log("Failed to acquire certificate")
certerror = true
@@ -899,7 +930,8 @@ func (a *api) start() error {
GetCertificate: autocertManager.GetCertificate,
}
a.sidecarserver.Handler = autocertManager.HTTPHandler(sidecarserverhandler)
acme := autocertManager.Issuers[0].(*certmagic.ACMEIssuer)
a.sidecarserver.Handler = acme.HTTPChallengeHandler(sidecarserverhandler)
}
wgStart.Add(1)

10
go.mod
View File

@@ -5,6 +5,7 @@ go 1.18
require (
github.com/99designs/gqlgen v0.17.16
github.com/atrox/haikunatorgo/v2 v2.0.1
github.com/caddyserver/certmagic v0.16.2
github.com/datarhei/gosrt v0.2.1-0.20220817080252-d44df04a3845
github.com/datarhei/joy4 v0.0.0-20220914170649-23c70d207759
github.com/go-playground/validator/v10 v10.11.0
@@ -24,7 +25,6 @@ require (
github.com/swaggo/swag v1.8.5
github.com/vektah/gqlparser/v2 v2.5.0
github.com/xeipuuv/gojsonschema v1.2.0
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
)
@@ -49,12 +49,16 @@ require (
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/iancoleman/orderedmap v0.2.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/cpuid/v2 v2.0.11 // indirect
github.com/labstack/gommon v0.3.1 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/libdns/libdns v0.2.1 // indirect
github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/mholt/acmez v1.0.4 // indirect
github.com/miekg/dns v1.1.46 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
@@ -72,6 +76,10 @@ require (
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.21.0 // indirect
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7 // indirect
golang.org/x/sys v0.0.0-20220907062415-87db552b00fd // indirect
golang.org/x/text v0.3.7 // indirect

30
go.sum
View File

@@ -55,12 +55,16 @@ github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
github.com/atrox/haikunatorgo/v2 v2.0.1 h1:FCVx2KL2YvZtI1rI9WeEHxeLRrKGr0Dd4wfCJiUXupc=
github.com/atrox/haikunatorgo/v2 v2.0.1/go.mod h1:BBQmx2o+1Z5poziaHRgddAZKOpijwfKdAmMnSYlFK70=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c h1:8XZeJrs4+ZYhJeJ2aZxADI2tGADS15AzIF8MQ8XAhT4=
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c/go.mod h1:x1vxHcL/9AVzuk5HOloOEPrtJY0MaalYr78afXZ+pWI=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/caddyserver/certmagic v0.16.2 h1:k2n3LkkUG3aMUK/kckMuF9/0VFo+0FtMX3drPYESbmQ=
github.com/caddyserver/certmagic v0.16.2/go.mod h1:PgLIr/dSJa+WA7t7z6Je5xuS/e5A/GFCPHRuZ1QP+MQ=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
@@ -216,6 +220,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/cpuid/v2 v2.0.11 h1:i2lw1Pm7Yi/4O6XCSyJWqEHI2MDw2FzUK6o/D21xn2A=
github.com/klauspost/cpuid/v2 v2.0.11/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@@ -234,6 +240,8 @@ github.com/labstack/gommon v0.3.1 h1:OomWaJXm7xR6L1HmEtGyQf26TEn7V6X88mktX9kee9o
github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/libdns/libdns v0.2.1 h1:Wu59T7wSHRgtA0cfxC+n1c/e+O3upJGWytknkmFEDis=
github.com/libdns/libdns v0.2.1/go.mod h1:yQCXzk1lEZmmCPa857bnk4TsOiqYasqpyOEeSObbb40=
github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c=
github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y=
github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxAlxggSjiwMnCUc=
@@ -255,6 +263,10 @@ github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peK
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mholt/acmez v1.0.4 h1:N3cE4Pek+dSolbsofIkAYz6H1d3pE+2G0os7QHslf80=
github.com/mholt/acmez v1.0.4/go.mod h1:qFGLZ4u+ehWINeJZjzPlsnjJBCPAADWTcIqE/7DAYQY=
github.com/miekg/dns v1.1.46 h1:uzwpxRtSVxtcIZmz/4Uz6/Rn7G11DvsaslXoy5LxQio=
github.com/miekg/dns v1.1.46/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@@ -274,6 +286,7 @@ github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -371,6 +384,7 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
@@ -380,6 +394,14 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -456,14 +478,17 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7 h1:1WGATo9HAhkWMbfyuVU0tEFP88OIkUvwaHFveQPvzCQ=
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -483,6 +508,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -518,8 +544,10 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -594,6 +622,8 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=

1
vendor/github.com/caddyserver/certmagic/.gitignore generated vendored Normal file
View File

@@ -0,0 +1 @@
_gitignore/

201
vendor/github.com/caddyserver/certmagic/LICENSE.txt generated vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

526
vendor/github.com/caddyserver/certmagic/README.md generated vendored Normal file
View File

@@ -0,0 +1,526 @@
<p align="center">
<a href="https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc"><img src="https://user-images.githubusercontent.com/1128849/49704830-49d37200-fbd5-11e8-8385-767e0cd033c3.png" alt="CertMagic" width="550"></a>
</p>
<h3 align="center">Easy and Powerful TLS Automation</h3>
<p align="center">The same library used by the <a href="https://caddyserver.com">Caddy Web Server</a></p>
<p align="center">
<a href="https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc"><img src="https://img.shields.io/badge/godoc-reference-blue.svg"></a>
<a href="https://github.com/caddyserver/certmagic/actions?query=workflow%3ATests"><img src="https://github.com/caddyserver/certmagic/workflows/Tests/badge.svg"></a>
<a href="https://sourcegraph.com/github.com/caddyserver/certmagic?badge"><img src="https://sourcegraph.com/github.com/caddyserver/certmagic/-/badge.svg"></a>
</p>
Caddy's [automagic TLS features](https://caddyserver.com/docs/automatic-https)&mdash;now for your own Go programs&mdash;in one powerful and easy-to-use library!
CertMagic is the most mature, robust, and powerful ACME client integration for Go... and perhaps ever.
With CertMagic, you can add one line to your Go application to serve securely over TLS, without ever having to touch certificates.
Instead of:
```go
// plaintext HTTP, gross 🤢
http.ListenAndServe(":80", mux)
```
Use CertMagic:
```go
// encrypted HTTPS with HTTP->HTTPS redirects - yay! 🔒😍
certmagic.HTTPS([]string{"example.com"}, mux)
```
That line of code will serve your HTTP router `mux` over HTTPS, complete with HTTP->HTTPS redirects. It obtains and renews the TLS certificates. It staples OCSP responses for greater privacy and security. As long as your domain name points to your server, CertMagic will keep its connections secure.
Compared to other ACME client libraries for Go, only CertMagic supports the full suite of ACME features, and no other library matches CertMagic's maturity and reliability.
CertMagic - Automatic HTTPS using Let's Encrypt
===============================================
## Menu
- [Features](#features)
- [Requirements](#requirements)
- [Installation](#installation)
- [Usage](#usage)
- [Package Overview](#package-overview)
- [Certificate authority](#certificate-authority)
- [The `Config` type](#the-config-type)
- [Defaults](#defaults)
- [Providing an email address](#providing-an-email-address)
- [Rate limiting](#rate-limiting)
- [Development and testing](#development-and-testing)
- [Examples](#examples)
- [Serving HTTP handlers with HTTPS](#serving-http-handlers-with-https)
- [Starting a TLS listener](#starting-a-tls-listener)
- [Getting a tls.Config](#getting-a-tlsconfig)
- [Advanced use](#advanced-use)
- [Wildcard Certificates](#wildcard-certificates)
- [Behind a load balancer (or in a cluster)](#behind-a-load-balancer-or-in-a-cluster)
- [The ACME Challenges](#the-acme-challenges)
- [HTTP Challenge](#http-challenge)
- [TLS-ALPN Challenge](#tls-alpn-challenge)
- [DNS Challenge](#dns-challenge)
- [On-Demand TLS](#on-demand-tls)
- [Storage](#storage)
- [Cache](#cache)
- [Contributing](#contributing)
- [Project History](#project-history)
- [Credits and License](#credits-and-license)
## Features
- Fully automated certificate management including issuance and renewal
- One-liner, fully managed HTTPS servers
- Full control over almost every aspect of the system
- HTTP->HTTPS redirects
- Solves all 3 ACME challenges: HTTP, TLS-ALPN, and DNS
- Most robust error handling of _any_ ACME client
- Challenges are randomized to avoid accidental dependence
- Challenges are rotated to overcome certain network blockages
- Robust retries for up to 30 days
- Exponential backoff with carefully-tuned intervals
- Retries with optional test/staging CA endpoint instead of production, to avoid rate limits
- Written in Go, a language with memory-safety guarantees
- Powered by [ACMEz](https://github.com/mholt/acmez), _the_ premier ACME client library for Go
- All [libdns](https://github.com/libdns) DNS providers work out-of-the-box
- Pluggable storage implementations (default: file system)
- Wildcard certificates
- Automatic OCSP stapling ([done right](https://gist.github.com/sleevi/5efe9ef98961ecfb4da8#gistcomment-2336055)) [keeps your sites online!](https://twitter.com/caddyserver/status/1234874273724084226)
- Will [automatically attempt](https://twitter.com/mholt6/status/1235577699541762048) to replace [revoked certificates](https://community.letsencrypt.org/t/2020-02-29-caa-rechecking-bug/114591/3?u=mholt)!
- Staples stored to disk in case of responder outages
- Distributed solving of all challenges (works behind load balancers)
- Highly efficient, coordinated management in a fleet
- Active locking
- Smart queueing
- Supports "on-demand" issuance of certificates (during TLS handshakes!)
- Caddy / CertMagic pioneered this technology
- Custom decision functions to regulate and throttle on-demand behavior
- Optional event hooks for observation
- Works with any certificate authority (CA) compliant with the ACME specification
- Certificate revocation (please, only if private key is compromised)
- Must-Staple (optional; not default)
- Cross-platform support! Mac, Windows, Linux, BSD, Android...
- Scales to hundreds of thousands of names/certificates per instance
- Use in conjunction with your own certificates
## Requirements
0. ACME server (can be a publicly-trusted CA, or your own)
1. Public DNS name(s) you control
2. Server reachable from public Internet
- Or use the DNS challenge to waive this requirement
3. Control over port 80 (HTTP) and/or 443 (HTTPS)
- Or they can be forwarded to other ports you control
- Or use the DNS challenge to waive this requirement
- (This is a requirement of the ACME protocol, not a library limitation)
4. Persistent storage
- Typically the local file system (default)
- Other integrations available/possible
**_Before using this library, your domain names MUST be pointed (A/AAAA records) at your server (unless you use the DNS challenge)!_**
## Installation
```bash
$ go get github.com/caddyserver/certmagic
```
## Usage
### Package Overview
#### Certificate authority
This library uses Let's Encrypt by default, but you can use any certificate authority that conforms to the ACME specification. Known/common CAs are provided as consts in the package, for example `LetsEncryptStagingCA` and `LetsEncryptProductionCA`.
#### The `Config` type
The `certmagic.Config` struct is how you can wield the power of this fully armed and operational battle station. However, an empty/uninitialized `Config` is _not_ a valid one! In time, you will learn to use the force of `certmagic.NewDefault()` as I have.
#### Defaults
The default `Config` value is called `certmagic.Default`. Change its fields to suit your needs, then call `certmagic.NewDefault()` when you need a valid `Config` value. In other words, `certmagic.Default` is a template and is not valid for use directly.
You can set the default values easily, for example: `certmagic.Default.Issuer = ...`.
Similarly, to configure ACME-specific defaults, use `certmagic.DefaultACME`.
The high-level functions in this package (`HTTPS()`, `Listen()`, `ManageSync()`, and `ManageAsync()`) use the default config exclusively. This is how most of you will interact with the package. This is suitable when all your certificates are managed the same way. However, if you need to manage certificates differently depending on their name, you will need to make your own cache and configs (keep reading).
#### Providing an email address
Although not strictly required, this is highly recommended best practice. It allows you to receive expiration emails if your certificates are expiring for some reason, and also allows the CA's engineers to potentially get in touch with you if something is wrong. I recommend setting `certmagic.DefaultACME.Email` or always setting the `Email` field of a new `Config` struct.
#### Rate limiting
To avoid firehosing the CA's servers, CertMagic has built-in rate limiting. Currently, its default limit is up to 10 transactions (obtain or renew) every 1 minute (sliding window). This can be changed by setting the `RateLimitEvents` and `RateLimitEventsWindow` variables, if desired.
The CA may still enforce their own rate limits, and there's nothing (well, nothing ethical) CertMagic can do to bypass them for you.
Additionally, CertMagic will retry failed validations with exponential backoff for up to 30 days, with a reasonable maximum interval between attempts (an "attempt" means trying each enabled challenge type once).
### Development and Testing
Note that Let's Encrypt imposes [strict rate limits](https://letsencrypt.org/docs/rate-limits/) at its production endpoint, so using it while developing your application may lock you out for a few days if you aren't careful!
While developing your application and testing it, use [their staging endpoint](https://letsencrypt.org/docs/staging-environment/) which has much higher rate limits. Even then, don't hammer it: but it's much safer for when you're testing. When deploying, though, use their production CA because their staging CA doesn't issue trusted certificates.
To use staging, set `certmagic.DefaultACME.CA = certmagic.LetsEncryptStagingCA` or set `CA` of every `ACMEIssuer` struct.
### Examples
There are many ways to use this library. We'll start with the highest-level (simplest) and work down (more control).
All these high-level examples use `certmagic.Default` and `certmagic.DefaultACME` for the config and the default cache and storage for serving up certificates.
First, we'll follow best practices and do the following:
```go
// read and agree to your CA's legal documents
certmagic.DefaultACME.Agreed = true
// provide an email address
certmagic.DefaultACME.Email = "you@yours.com"
// use the staging endpoint while we're developing
certmagic.DefaultACME.CA = certmagic.LetsEncryptStagingCA
```
For fully-functional program examples, check out [this Twitter thread](https://twitter.com/mholt6/status/1073103805112147968) (or read it [unrolled into a single post](https://threadreaderapp.com/thread/1073103805112147968.html)). (Note that the package API has changed slightly since these posts.)
#### Serving HTTP handlers with HTTPS
```go
err := certmagic.HTTPS([]string{"example.com", "www.example.com"}, mux)
if err != nil {
return err
}
```
This starts HTTP and HTTPS listeners and redirects HTTP to HTTPS!
#### Starting a TLS listener
```go
ln, err := certmagic.Listen([]string{"example.com"})
if err != nil {
return err
}
```
#### Getting a tls.Config
```go
tlsConfig, err := certmagic.TLS([]string{"example.com"})
if err != nil {
return err
}
```
#### Advanced use
For more control (particularly, if you need a different way of managing each certificate), you'll make and use a `Cache` and a `Config` like so:
```go
cache := certmagic.NewCache(certmagic.CacheOptions{
GetConfigForCert: func(cert certmagic.Certificate) (*certmagic.Config, error) {
// do whatever you need to do to get the right
// configuration for this certificate; keep in
// mind that this config value is used as a
// template, and will be completed with any
// defaults that are set in the Default config
return &certmagic.Config{
// ...
}, nil
},
...
})
magic := certmagic.New(cache, certmagic.Config{
// any customizations you need go here
})
myACME := certmagic.NewACMEIssuer(magic, certmagic.ACMEIssuer{
CA: certmagic.LetsEncryptStagingCA,
Email: "you@yours.com",
Agreed: true,
// plus any other customizations you need
})
magic.Issuer = myACME
// this obtains certificates or renews them if necessary
err := magic.ManageSync(context.TODO(), []string{"example.com", "sub.example.com"})
if err != nil {
return err
}
// to use its certificates and solve the TLS-ALPN challenge,
// you can get a TLS config to use in a TLS listener!
tlsConfig := magic.TLSConfig()
// be sure to customize NextProtos if serving a specific
// application protocol after the TLS handshake, for example:
tlsConfig.NextProtos = append([]string{"h2", "http/1.1"}, tlsConfig.NextProtos...)
//// OR ////
// if you already have a TLS config you don't want to replace,
// we can simply set its GetCertificate field and append the
// TLS-ALPN challenge protocol to the NextProtos
myTLSConfig.GetCertificate = magic.GetCertificate
myTLSConfig.NextProtos = append(myTLSConfig.NextProtos, tlsalpn01.ACMETLS1Protocol)
// the HTTP challenge has to be handled by your HTTP server;
// if you don't have one, you should have disabled it earlier
// when you made the certmagic.Config
httpMux = myACME.HTTPChallengeHandler(httpMux)
```
Great! This example grants you much more flexibility for advanced programs. However, _the vast majority of you will only use the high-level functions described earlier_, especially since you can still customize them by setting the package-level `Default` config.
### Wildcard certificates
At time of writing (December 2018), Let's Encrypt only issues wildcard certificates with the DNS challenge. You can easily enable the DNS challenge with CertMagic for numerous providers (see the relevant section in the docs).
### Behind a load balancer (or in a cluster)
CertMagic runs effectively behind load balancers and/or in cluster/fleet environments. In other words, you can have 10 or 1,000 servers all serving the same domain names, all sharing certificates and OCSP staples.
To do so, simply ensure that each instance is using the same Storage. That is the sole criteria for determining whether an instance is part of a cluster.
The default Storage is implemented using the file system, so mounting the same shared folder is sufficient (see [Storage](#storage) for more on that)! If you need an alternate Storage implementation, feel free to use one, provided that all the instances use the _same_ one. :)
See [Storage](#storage) and the associated [pkg.go.dev](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Storage) for more information!
## The ACME Challenges
This section describes how to solve the ACME challenges. Challenges are how you demonstrate to the certificate authority some control over your domain name, thus authorizing them to grant you a certificate for that name. [The great innovation of ACME](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) is that verification by CAs can now be automated, rather than having to click links in emails (who ever thought that was a good idea??).
If you're using the high-level convenience functions like `HTTPS()`, `Listen()`, or `TLS()`, the HTTP and/or TLS-ALPN challenges are solved for you because they also start listeners. However, if you're making a `Config` and you start your own server manually, you'll need to be sure the ACME challenges can be solved so certificates can be renewed.
The HTTP and TLS-ALPN challenges are the defaults because they don't require configuration from you, but they require that your server is accessible from external IPs on low ports. If that is not possible in your situation, you can enable the DNS challenge, which will disable the HTTP and TLS-ALPN challenges and use the DNS challenge exclusively.
Technically, only one challenge needs to be enabled for things to work, but using multiple is good for reliability in case a challenge is discontinued by the CA. This happened to the TLS-SNI challenge in early 2018&mdash;many popular ACME clients such as Traefik and Autocert broke, resulting in downtime for some sites, until new releases were made and patches deployed, because they used only one challenge; Caddy, however&mdash;this library's forerunner&mdash;was unaffected because it also used the HTTP challenge. If multiple challenges are enabled, they are chosen randomly to help prevent false reliance on a single challenge type. And if one fails, any remaining enabled challenges are tried before giving up.
### HTTP Challenge
Per the ACME spec, the HTTP challenge requires port 80, or at least packet forwarding from port 80. It works by serving a specific HTTP response that only the genuine server would have to a normal HTTP request at a special endpoint.
If you are running an HTTP server, solving this challenge is very easy: just wrap your handler in `HTTPChallengeHandler` _or_ call `SolveHTTPChallenge()` inside your own `ServeHTTP()` method.
For example, if you're using the standard library:
```go
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
fmt.Fprintf(w, "Lookit my cool website over HTTPS!")
})
http.ListenAndServe(":80", myACME.HTTPChallengeHandler(mux))
```
If wrapping your handler is not a good solution, try this inside your `ServeHTTP()` instead:
```go
magic := certmagic.NewDefault()
myACME := certmagic.NewACMEIssuer(magic, certmagic.DefaultACME)
func ServeHTTP(w http.ResponseWriter, req *http.Request) {
if myACME.HandleHTTPChallenge(w, r) {
return // challenge handled; nothing else to do
}
...
}
```
If you are not running an HTTP server, you should disable the HTTP challenge _or_ run an HTTP server whose sole job it is to solve the HTTP challenge.
### TLS-ALPN Challenge
Per the ACME spec, the TLS-ALPN challenge requires port 443, or at least packet forwarding from port 443. It works by providing a special certificate using a standard TLS extension, Application Layer Protocol Negotiation (ALPN), having a special value. This is the most convenient challenge type because it usually requires no extra configuration and uses the standard TLS port which is where the certificates are used, also.
This challenge is easy to solve: just use the provided `tls.Config` when you make your TLS listener:
```go
// use this to configure a TLS listener
tlsConfig := magic.TLSConfig()
```
Or make two simple changes to an existing `tls.Config`:
```go
myTLSConfig.GetCertificate = magic.GetCertificate
myTLSConfig.NextProtos = append(myTLSConfig.NextProtos, tlsalpn01.ACMETLS1Protocol}
```
Then just make sure your TLS listener is listening on port 443:
```go
ln, err := tls.Listen("tcp", ":443", myTLSConfig)
```
### DNS Challenge
The DNS challenge is perhaps the most useful challenge because it allows you to obtain certificates without your server needing to be publicly accessible on the Internet, and it's the only challenge by which Let's Encrypt will issue wildcard certificates.
This challenge works by setting a special record in the domain's zone. To do this automatically, your DNS provider needs to offer an API by which changes can be made to domain names, and the changes need to take effect immediately for best results. CertMagic supports [all DNS providers with `libdns` implementations](https://github.com/libdns)! It always cleans up the temporary record after the challenge completes.
To enable it, just set the `DNS01Solver` field on a `certmagic.ACMEIssuer` struct, or set the default `certmagic.ACMEIssuer.DNS01Solver` variable. For example, if my domains' DNS was served by Cloudflare:
```go
import "github.com/libdns/cloudflare"
certmagic.DefaultACME.DNS01Solver = &certmagic.DNS01Solver{
DNSProvider: &cloudflare.Provider{
APIToken: "topsecret",
},
}
```
Now the DNS challenge will be used by default, and I can obtain certificates for wildcard domains, too. Enabling the DNS challenge disables the other challenges for that `certmagic.ACMEIssuer` instance.
## On-Demand TLS
Normally, certificates are obtained and renewed before a listener starts serving, and then those certificates are maintained throughout the lifetime of the program. In other words, the certificate names are static. But sometimes you don't know all the names ahead of time, or you don't want to manage all the certificates up front. This is where On-Demand TLS shines.
Originally invented for use in Caddy (which was the first program to use such technology), On-Demand TLS makes it possible and easy to serve certificates for arbitrary or specific names during the lifetime of the server. When a TLS handshake is received, CertMagic will read the Server Name Indication (SNI) value and either load and present that certificate in the ServerHello, or if one does not exist, it will obtain it from a CA right then-and-there.
Of course, this has some obvious security implications. You don't want to DoS a CA or allow arbitrary clients to fill your storage with spammy TLS handshakes. That's why, when you enable On-Demand issuance, you should set limits or policy to allow getting certificates. CertMagic has an implicit whitelist built-in which is sufficient for nearly everyone, but also has a more advanced way to control on-demand issuance.
The simplest way to enable on-demand issuance is to set the OnDemand field of a Config (or the default package-level value):
```go
certmagic.Default.OnDemand = new(certmagic.OnDemandConfig)
```
By setting this to a non-nil value, on-demand TLS is enabled for that config. For convenient security, CertMagic's high-level abstraction functions such as `HTTPS()`, `TLS()`, `ManageSync()`, `ManageAsync()`, and `Listen()` (which all accept a list of domain names) will whitelist those names automatically so only certificates for those names can be obtained when using the Default config. Usually this is sufficient for most users.
However, if you require advanced control over which domains can be issued certificates on-demand (for example, if you do not know which domain names you are managing, or just need to defer their operations until later), you should implement your own DecisionFunc:
```go
// if the decision function returns an error, a certificate
// may not be obtained for that name at that time
certmagic.Default.OnDemand = &certmagic.OnDemandConfig{
DecisionFunc: func(name string) error {
if name != "example.com" {
return fmt.Errorf("not allowed")
}
return nil
},
}
```
The [pkg.go.dev](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#OnDemandConfig) describes how to use this in full detail, so please check it out!
## Storage
CertMagic relies on storage to store certificates and other TLS assets (OCSP staple cache, coordinating locks, etc). Persistent storage is a requirement when using CertMagic: ephemeral storage will likely lead to rate limiting on the CA-side as CertMagic will always have to get new certificates.
By default, CertMagic stores assets on the local file system in `$HOME/.local/share/certmagic` (and honors `$XDG_DATA_HOME` if set). CertMagic will create the directory if it does not exist. If writes are denied, things will not be happy, so make sure CertMagic can write to it!
The notion of a "cluster" or "fleet" of instances that may be serving the same site and sharing certificates, etc, is tied to storage. Simply, any instances that use the same storage facilities are considered part of the cluster. So if you deploy 100 instances of CertMagic behind a load balancer, they are all part of the same cluster if they share the same storage configuration. Sharing storage could be mounting a shared folder, or implementing some other distributed storage system such as a database server or KV store.
The easiest way to change the storage being used is to set `certmagic.Default.Storage` to a value that satisfies the [Storage interface](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Storage). Keep in mind that a valid `Storage` must be able to implement some operations atomically in order to provide locking and synchronization.
If you write a Storage implementation, please add it to the [project wiki](https://github.com/caddyserver/certmagic/wiki/Storage-Implementations) so people can find it!
## Cache
All of the certificates in use are de-duplicated and cached in memory for optimal performance at handshake-time. This cache must be backed by persistent storage as described above.
Most applications will not need to interact with certificate caches directly. Usually, the closest you will come is to set the package-wide `certmagic.Default.Storage` variable (before attempting to create any Configs) which defines how the cache is persisted. However, if your use case requires using different storage facilities for different Configs (that's highly unlikely and NOT recommended! Even Caddy doesn't get that crazy), you will need to call `certmagic.NewCache()` and pass in the storage you want to use, then get new `Config` structs with `certmagic.NewWithCache()` and pass in the cache.
Again, if you're needing to do this, you've probably over-complicated your application design.
## FAQ
### Can I use some of my own certificates while using CertMagic?
Yes, just call the relevant method on the `Config` to add your own certificate to the cache:
- [`CacheUnmanagedCertificatePEMBytes()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedCertificatePEMBytes)
- [`CacheUnmanagedCertificatePEMFile()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedCertificatePEMFile)
- [`CacheUnmanagedTLSCertificate()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedTLSCertificate)
Keep in mind that unmanaged certificates are (obviously) not renewed for you, so you'll have to replace them when you do. However, OCSP stapling is performed even for unmanaged certificates that qualify.
### Does CertMagic obtain SAN certificates?
Technically all certificates these days are SAN certificates because CommonName is deprecated. But if you're asking whether CertMagic issues and manages certificates with multiple SANs, the answer is no. But it does support serving them, if you provide your own.
### How can I listen on ports 80 and 443? Do I have to run as root?
On Linux, you can use `setcap` to grant your binary the permission to bind low ports:
```bash
$ sudo setcap cap_net_bind_service=+ep /path/to/your/binary
```
and then you will not need to run with root privileges.
## Contributing
We welcome your contributions! Please see our **[contributing guidelines](https://github.com/caddyserver/certmagic/blob/master/.github/CONTRIBUTING.md)** for instructions.
## Project History
CertMagic is the core of Caddy's advanced TLS automation code, extracted into a library. The underlying ACME client implementation is [ACMEz](https://github.com/mholt/acmez). CertMagic's code was originally a central part of Caddy even before Let's Encrypt entered public beta in 2015.
In the years since then, Caddy's TLS automation techniques have been widely adopted, tried and tested in production, and served millions of sites and secured trillions of connections.
Now, CertMagic is _the actual library used by Caddy_. It's incredibly powerful and feature-rich, but also easy to use for simple Go programs: one line of code can enable fully-automated HTTPS applications with HTTP->HTTPS redirects.
Caddy is known for its robust HTTPS+ACME features. When ACME certificate authorities have had outages, in some cases Caddy was the only major client that didn't experience any downtime. Caddy can weather OCSP outages lasting days, or CA outages lasting weeks, without taking your sites offline.
Caddy was also the first to sport "on-demand" issuance technology, which obtains certificates during the first TLS handshake for an allowed SNI name.
Consequently, CertMagic brings all these (and more) features and capabilities right into your own Go programs.
You can [watch a 2016 dotGo talk](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) by the author of this library about using ACME to automate certificate management in Go programs:
[![Matthew Holt speaking at dotGo 2016 about ACME in Go](https://user-images.githubusercontent.com/1128849/49921557-2d506780-fe6b-11e8-97bf-6053b6b4eb48.png)](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme)
## Credits and License
CertMagic is a project by [Matthew Holt](https://twitter.com/mholt6), who is the author; and various contributors, who are credited in the commit history of either CertMagic or Caddy.
CertMagic is licensed under Apache 2.0, an open source license. For convenience, its main points are summarized as follows (but this is no replacement for the actual license text):
- The author owns the copyright to this code
- Use, distribute, and modify the software freely
- Private and internal use is allowed
- License text and copyright notices must stay intact and be included with distributions
- Any and all changes to the code must be documented

417
vendor/github.com/caddyserver/certmagic/account.go generated vendored Normal file
View File

@@ -0,0 +1,417 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"bufio"
"bytes"
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path"
"sort"
"strings"
"sync"
"github.com/mholt/acmez/acme"
)
// getAccount either loads or creates a new account, depending on if
// an account can be found in storage for the given CA + email combo.
func (am *ACMEIssuer) getAccount(ctx context.Context, ca, email string) (acme.Account, error) {
acct, err := am.loadAccount(ctx, ca, email)
if errors.Is(err, fs.ErrNotExist) {
return am.newAccount(email)
}
return acct, err
}
// loadAccount loads an account from storage, but does not create a new one.
func (am *ACMEIssuer) loadAccount(ctx context.Context, ca, email string) (acme.Account, error) {
regBytes, err := am.config.Storage.Load(ctx, am.storageKeyUserReg(ca, email))
if err != nil {
return acme.Account{}, err
}
keyBytes, err := am.config.Storage.Load(ctx, am.storageKeyUserPrivateKey(ca, email))
if err != nil {
return acme.Account{}, err
}
var acct acme.Account
err = json.Unmarshal(regBytes, &acct)
if err != nil {
return acct, err
}
acct.PrivateKey, err = PEMDecodePrivateKey(keyBytes)
if err != nil {
return acct, fmt.Errorf("could not decode account's private key: %v", err)
}
return acct, nil
}
// newAccount generates a new private key for a new ACME account, but
// it does not register or save the account.
func (*ACMEIssuer) newAccount(email string) (acme.Account, error) {
var acct acme.Account
if email != "" {
acct.Contact = []string{"mailto:" + email} // TODO: should we abstract the contact scheme?
}
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return acct, fmt.Errorf("generating private key: %v", err)
}
acct.PrivateKey = privateKey
return acct, nil
}
// GetAccount first tries loading the account with the associated private key from storage.
// If it does not exist in storage, it will be retrieved from the ACME server and added to storage.
// The account must already exist; it does not create a new account.
func (am *ACMEIssuer) GetAccount(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) {
account, err := am.loadAccountByKey(ctx, privateKeyPEM)
if errors.Is(err, fs.ErrNotExist) {
account, err = am.lookUpAccount(ctx, privateKeyPEM)
}
return account, err
}
// loadAccountByKey loads the account with the given private key from storage, if it exists.
// If it does not exist, an error of type fs.ErrNotExist is returned. This is not very efficient
// for lots of accounts.
func (am *ACMEIssuer) loadAccountByKey(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) {
accountList, err := am.config.Storage.List(ctx, am.storageKeyUsersPrefix(am.CA), false)
if err != nil {
return acme.Account{}, err
}
for _, accountFolderKey := range accountList {
email := path.Base(accountFolderKey)
keyBytes, err := am.config.Storage.Load(ctx, am.storageKeyUserPrivateKey(am.CA, email))
if err != nil {
return acme.Account{}, err
}
if bytes.Equal(bytes.TrimSpace(keyBytes), bytes.TrimSpace(privateKeyPEM)) {
return am.loadAccount(ctx, am.CA, email)
}
}
return acme.Account{}, fs.ErrNotExist
}
// lookUpAccount looks up the account associated with privateKeyPEM from the ACME server.
// If the account is found by the server, it will be saved to storage and returned.
func (am *ACMEIssuer) lookUpAccount(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) {
client, err := am.newACMEClient(false)
if err != nil {
return acme.Account{}, fmt.Errorf("creating ACME client: %v", err)
}
privateKey, err := PEMDecodePrivateKey([]byte(privateKeyPEM))
if err != nil {
return acme.Account{}, fmt.Errorf("decoding private key: %v", err)
}
// look up the account
account := acme.Account{PrivateKey: privateKey}
account, err = client.GetAccount(ctx, account)
if err != nil {
return acme.Account{}, fmt.Errorf("looking up account with server: %v", err)
}
// save the account details to storage
err = am.saveAccount(ctx, client.Directory, account)
if err != nil {
return account, fmt.Errorf("could not save account to storage: %v", err)
}
return account, nil
}
// saveAccount persists an ACME account's info and private key to storage.
// It does NOT register the account via ACME or prompt the user.
func (am *ACMEIssuer) saveAccount(ctx context.Context, ca string, account acme.Account) error {
regBytes, err := json.MarshalIndent(account, "", "\t")
if err != nil {
return err
}
keyBytes, err := PEMEncodePrivateKey(account.PrivateKey)
if err != nil {
return err
}
// extract primary contact (email), without scheme (e.g. "mailto:")
primaryContact := getPrimaryContact(account)
all := []keyValue{
{
key: am.storageKeyUserReg(ca, primaryContact),
value: regBytes,
},
{
key: am.storageKeyUserPrivateKey(ca, primaryContact),
value: keyBytes,
},
}
return storeTx(ctx, am.config.Storage, all)
}
// setEmail does everything it can to obtain an email address
// from the user within the scope of memory and storage to use
// for ACME TLS. If it cannot get an email address, it does nothing
// (If user is prompted, it will warn the user of
// the consequences of an empty email.) This function MAY prompt
// the user for input. If allowPrompts is false, the user
// will NOT be prompted and an empty email may be returned.
func (am *ACMEIssuer) setEmail(ctx context.Context, allowPrompts bool) error {
leEmail := am.Email
// First try package default email, or a discovered email address
if leEmail == "" {
leEmail = DefaultACME.Email
}
if leEmail == "" {
discoveredEmailMu.Lock()
leEmail = discoveredEmail
discoveredEmailMu.Unlock()
}
// Then try to get most recent user email from storage
var gotRecentEmail bool
if leEmail == "" {
leEmail, gotRecentEmail = am.mostRecentAccountEmail(ctx, am.CA)
}
if !gotRecentEmail && leEmail == "" && allowPrompts {
// Looks like there is no email address readily available,
// so we will have to ask the user if we can.
var err error
leEmail, err = am.promptUserForEmail()
if err != nil {
return err
}
// User might have just signified their agreement
am.mu.Lock()
am.agreed = DefaultACME.Agreed
am.mu.Unlock()
}
// Save the email for later and ensure it is consistent
// for repeated use; then update cfg with the email
leEmail = strings.TrimSpace(strings.ToLower(leEmail))
discoveredEmailMu.Lock()
if discoveredEmail == "" {
discoveredEmail = leEmail
}
discoveredEmailMu.Unlock()
// The unexported email field is the one we use
// because we have thread-safe control over it
am.mu.Lock()
am.email = leEmail
am.mu.Unlock()
return nil
}
// promptUserForEmail prompts the user for an email address
// and returns the email address they entered (which could
// be the empty string). If no error is returned, then Agreed
// will also be set to true, since continuing through the
// prompt signifies agreement.
func (am *ACMEIssuer) promptUserForEmail() (string, error) {
// prompt the user for an email address and terms agreement
reader := bufio.NewReader(stdin)
am.promptUserAgreement("")
fmt.Println("Please enter your email address to signify agreement and to be notified")
fmt.Println("in case of issues. You can leave it blank, but we don't recommend it.")
fmt.Print(" Email address: ")
leEmail, err := reader.ReadString('\n')
if err != nil && err != io.EOF {
return "", fmt.Errorf("reading email address: %v", err)
}
leEmail = strings.TrimSpace(leEmail)
DefaultACME.Agreed = true
return leEmail, nil
}
// promptUserAgreement simply outputs the standard user
// agreement prompt with the given agreement URL.
// It outputs a newline after the message.
func (am *ACMEIssuer) promptUserAgreement(agreementURL string) {
userAgreementPrompt := `Your sites will be served over HTTPS automatically using an automated CA.
By continuing, you agree to the CA's terms of service`
if agreementURL == "" {
fmt.Printf("\n\n%s.\n", userAgreementPrompt)
return
}
fmt.Printf("\n\n%s at:\n %s\n", userAgreementPrompt, agreementURL)
}
// askUserAgreement prompts the user to agree to the agreement
// at the given agreement URL via stdin. It returns whether the
// user agreed or not.
func (am *ACMEIssuer) askUserAgreement(agreementURL string) bool {
am.promptUserAgreement(agreementURL)
fmt.Print("Do you agree to the terms? (y/n): ")
reader := bufio.NewReader(stdin)
answer, err := reader.ReadString('\n')
if err != nil {
return false
}
answer = strings.ToLower(strings.TrimSpace(answer))
return answer == "y" || answer == "yes"
}
func storageKeyACMECAPrefix(issuerKey string) string {
return path.Join(prefixACME, StorageKeys.Safe(issuerKey))
}
func (am *ACMEIssuer) storageKeyCAPrefix(caURL string) string {
return storageKeyACMECAPrefix(am.issuerKey(caURL))
}
func (am *ACMEIssuer) storageKeyUsersPrefix(caURL string) string {
return path.Join(am.storageKeyCAPrefix(caURL), "users")
}
func (am *ACMEIssuer) storageKeyUserPrefix(caURL, email string) string {
if email == "" {
email = emptyEmail
}
return path.Join(am.storageKeyUsersPrefix(caURL), StorageKeys.Safe(email))
}
func (am *ACMEIssuer) storageKeyUserReg(caURL, email string) string {
return am.storageSafeUserKey(caURL, email, "registration", ".json")
}
func (am *ACMEIssuer) storageKeyUserPrivateKey(caURL, email string) string {
return am.storageSafeUserKey(caURL, email, "private", ".key")
}
// storageSafeUserKey returns a key for the given email, with the default
// filename, and the filename ending in the given extension.
func (am *ACMEIssuer) storageSafeUserKey(ca, email, defaultFilename, extension string) string {
if email == "" {
email = emptyEmail
}
email = strings.ToLower(email)
filename := am.emailUsername(email)
if filename == "" {
filename = defaultFilename
}
filename = StorageKeys.Safe(filename)
return path.Join(am.storageKeyUserPrefix(ca, email), filename+extension)
}
// emailUsername returns the username portion of an email address (part before
// '@') or the original input if it can't find the "@" symbol.
func (*ACMEIssuer) emailUsername(email string) string {
at := strings.Index(email, "@")
if at == -1 {
return email
} else if at == 0 {
return email[1:]
}
return email[:at]
}
// mostRecentAccountEmail finds the most recently-written account file
// in storage. Since this is part of a complex sequence to get a user
// account, errors here are discarded to simplify code flow in
// the caller, and errors are not important here anyway.
func (am *ACMEIssuer) mostRecentAccountEmail(ctx context.Context, caURL string) (string, bool) {
accountList, err := am.config.Storage.List(ctx, am.storageKeyUsersPrefix(caURL), false)
if err != nil || len(accountList) == 0 {
return "", false
}
// get all the key infos ahead of sorting, because
// we might filter some out
stats := make(map[string]KeyInfo)
for i := 0; i < len(accountList); i++ {
u := accountList[i]
keyInfo, err := am.config.Storage.Stat(ctx, u)
if err != nil {
continue
}
if keyInfo.IsTerminal {
// I found a bug when macOS created a .DS_Store file in
// the users folder, and CertMagic tried to use that as
// the user email because it was newer than the other one
// which existed... sure, this isn't a perfect fix but
// frankly one's OS shouldn't mess with the data folder
// in the first place.
accountList = append(accountList[:i], accountList[i+1:]...)
i--
continue
}
stats[u] = keyInfo
}
sort.Slice(accountList, func(i, j int) bool {
iInfo := stats[accountList[i]]
jInfo := stats[accountList[j]]
return jInfo.Modified.Before(iInfo.Modified)
})
if len(accountList) == 0 {
return "", false
}
account, err := am.getAccount(ctx, caURL, path.Base(accountList[0]))
if err != nil {
return "", false
}
return getPrimaryContact(account), true
}
// getPrimaryContact returns the first contact on the account (if any)
// without the scheme. (I guess we assume an email address.)
func getPrimaryContact(account acme.Account) string {
// TODO: should this be abstracted with some lower-level helper?
var primaryContact string
if len(account.Contact) > 0 {
primaryContact = account.Contact[0]
if idx := strings.Index(primaryContact, ":"); idx >= 0 {
primaryContact = primaryContact[idx+1:]
}
}
return primaryContact
}
// When an email address is not explicitly specified, we can remember
// the last one we discovered to avoid having to ask again later.
// (We used to store this in DefaultACME.Email but it was racey; see #127)
var (
discoveredEmail string
discoveredEmailMu sync.Mutex
)
// stdin is used to read the user's input if prompted;
// this is changed by tests during tests.
var stdin = io.ReadWriter(os.Stdin)
// The name of the folder for accounts where the email
// address was not provided; default 'username' if you will,
// but only for local/storage use, not with the CA.
const emptyEmail = "default"

347
vendor/github.com/caddyserver/certmagic/acmeclient.go generated vendored Normal file
View File

@@ -0,0 +1,347 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"crypto/x509"
"fmt"
weakrand "math/rand"
"net"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/mholt/acmez"
"github.com/mholt/acmez/acme"
"go.uber.org/zap"
)
func init() {
weakrand.Seed(time.Now().UnixNano())
}
// acmeClient holds state necessary to perform ACME operations
// for certificate management with an ACME account. Call
// ACMEIssuer.newACMEClientWithAccount() to get a valid one.
type acmeClient struct {
iss *ACMEIssuer
acmeClient *acmez.Client
account acme.Account
}
// newACMEClientWithAccount creates an ACME client ready to use with an account, including
// loading one from storage or registering a new account with the CA if necessary. If
// useTestCA is true, am.TestCA will be used if set; otherwise, the primary CA will be used.
func (iss *ACMEIssuer) newACMEClientWithAccount(ctx context.Context, useTestCA, interactive bool) (*acmeClient, error) {
// first, get underlying ACME client
client, err := iss.newACMEClient(useTestCA)
if err != nil {
return nil, err
}
// look up or create the ACME account
var account acme.Account
if iss.AccountKeyPEM != "" {
account, err = iss.GetAccount(ctx, []byte(iss.AccountKeyPEM))
} else {
account, err = iss.getAccount(ctx, client.Directory, iss.getEmail())
}
if err != nil {
return nil, fmt.Errorf("getting ACME account: %v", err)
}
// register account if it is new
if account.Status == "" {
if iss.NewAccountFunc != nil {
account, err = iss.NewAccountFunc(ctx, iss, account)
if err != nil {
return nil, fmt.Errorf("account pre-registration callback: %v", err)
}
}
// agree to terms
if interactive {
if !iss.isAgreed() {
var termsURL string
dir, err := client.GetDirectory(ctx)
if err != nil {
return nil, fmt.Errorf("getting directory: %w", err)
}
if dir.Meta != nil {
termsURL = dir.Meta.TermsOfService
}
if termsURL != "" {
agreed := iss.askUserAgreement(termsURL)
if !agreed {
return nil, fmt.Errorf("user must agree to CA terms")
}
iss.mu.Lock()
iss.agreed = agreed
iss.mu.Unlock()
}
}
} else {
// can't prompt a user who isn't there; they should
// have reviewed the terms beforehand
iss.mu.Lock()
iss.agreed = true
iss.mu.Unlock()
}
account.TermsOfServiceAgreed = iss.isAgreed()
// associate account with external binding, if configured
if iss.ExternalAccount != nil {
err := account.SetExternalAccountBinding(ctx, client.Client, *iss.ExternalAccount)
if err != nil {
return nil, err
}
}
// create account
account, err = client.NewAccount(ctx, account)
if err != nil {
return nil, fmt.Errorf("registering account %v with server: %w", account.Contact, err)
}
// persist the account to storage
err = iss.saveAccount(ctx, client.Directory, account)
if err != nil {
return nil, fmt.Errorf("could not save account %v: %v", account.Contact, err)
}
}
c := &acmeClient{
iss: iss,
acmeClient: client,
account: account,
}
return c, nil
}
// newACMEClient creates a new underlying ACME client using the settings in am,
// independent of any particular ACME account. If useTestCA is true, am.TestCA
// will be used if it is set; otherwise, the primary CA will be used.
func (iss *ACMEIssuer) newACMEClient(useTestCA bool) (*acmez.Client, error) {
// ensure defaults are filled in
var caURL string
if useTestCA {
caURL = iss.TestCA
}
if caURL == "" {
caURL = iss.CA
}
if caURL == "" {
caURL = DefaultACME.CA
}
certObtainTimeout := iss.CertObtainTimeout
if certObtainTimeout == 0 {
certObtainTimeout = DefaultACME.CertObtainTimeout
}
// ensure endpoint is secure (assume HTTPS if scheme is missing)
if !strings.Contains(caURL, "://") {
caURL = "https://" + caURL
}
u, err := url.Parse(caURL)
if err != nil {
return nil, err
}
if u.Scheme != "https" && !isLoopback(u.Host) && !isInternal(u.Host) {
return nil, fmt.Errorf("%s: insecure CA URL (HTTPS required)", caURL)
}
client := &acmez.Client{
Client: &acme.Client{
Directory: caURL,
PollTimeout: certObtainTimeout,
UserAgent: buildUAString(),
HTTPClient: iss.httpClient,
},
ChallengeSolvers: make(map[string]acmez.Solver),
}
if iss.Logger != nil {
client.Logger = iss.Logger.Named("acme_client")
}
// configure challenges (most of the time, DNS challenge is
// exclusive of other ones because it is usually only used
// in situations where the default challenges would fail)
if iss.DNS01Solver == nil {
// enable HTTP-01 challenge
if !iss.DisableHTTPChallenge {
useHTTPPort := HTTPChallengePort
if HTTPPort > 0 && HTTPPort != HTTPChallengePort {
useHTTPPort = HTTPPort
}
if iss.AltHTTPPort > 0 {
useHTTPPort = iss.AltHTTPPort
}
client.ChallengeSolvers[acme.ChallengeTypeHTTP01] = distributedSolver{
storage: iss.config.Storage,
storageKeyIssuerPrefix: iss.storageKeyCAPrefix(client.Directory),
solver: &httpSolver{
acmeIssuer: iss,
address: net.JoinHostPort(iss.ListenHost, strconv.Itoa(useHTTPPort)),
},
}
}
// enable TLS-ALPN-01 challenge
if !iss.DisableTLSALPNChallenge {
useTLSALPNPort := TLSALPNChallengePort
if HTTPSPort > 0 && HTTPSPort != TLSALPNChallengePort {
useTLSALPNPort = HTTPSPort
}
if iss.AltTLSALPNPort > 0 {
useTLSALPNPort = iss.AltTLSALPNPort
}
client.ChallengeSolvers[acme.ChallengeTypeTLSALPN01] = distributedSolver{
storage: iss.config.Storage,
storageKeyIssuerPrefix: iss.storageKeyCAPrefix(client.Directory),
solver: &tlsALPNSolver{
config: iss.config,
address: net.JoinHostPort(iss.ListenHost, strconv.Itoa(useTLSALPNPort)),
},
}
}
} else {
// use DNS challenge exclusively
client.ChallengeSolvers[acme.ChallengeTypeDNS01] = iss.DNS01Solver
}
// wrap solvers in our wrapper so that we can keep track of challenge
// info: this is useful for solving challenges globally as a process;
// for example, usually there is only one process that can solve the
// HTTP and TLS-ALPN challenges, and only one server in that process
// that can bind the necessary port(s), so if a server listening on
// a different port needed a certificate, it would have to know about
// the other server listening on that port, and somehow convey its
// challenge info or share its config, but this isn't always feasible;
// what the wrapper does is it accesses a global challenge memory so
// that unrelated servers in this process can all solve each others'
// challenges without having to know about each other - Caddy's admin
// endpoint uses this functionality since it and the HTTP/TLS modules
// do not know about each other
// (doing this here in a separate loop ensures that even if we expose
// solver config to users later, we will even wrap their own solvers)
for name, solver := range client.ChallengeSolvers {
client.ChallengeSolvers[name] = solverWrapper{solver}
}
return client, nil
}
func (c *acmeClient) throttle(ctx context.Context, names []string) error {
email := c.iss.getEmail()
// throttling is scoped to CA + account email
rateLimiterKey := c.acmeClient.Directory + "," + email
rateLimitersMu.Lock()
rl, ok := rateLimiters[rateLimiterKey]
if !ok {
rl = NewRateLimiter(RateLimitEvents, RateLimitEventsWindow)
rateLimiters[rateLimiterKey] = rl
// TODO: stop rate limiter when it is garbage-collected...
}
rateLimitersMu.Unlock()
if c.iss.Logger != nil {
c.iss.Logger.Info("waiting on internal rate limiter",
zap.Strings("identifiers", names),
zap.String("ca", c.acmeClient.Directory),
zap.String("account", email),
)
}
err := rl.Wait(ctx)
if err != nil {
return err
}
if c.iss.Logger != nil {
c.iss.Logger.Info("done waiting on internal rate limiter",
zap.Strings("identifiers", names),
zap.String("ca", c.acmeClient.Directory),
zap.String("account", email),
)
}
return nil
}
func (c *acmeClient) usingTestCA() bool {
return c.iss.TestCA != "" && c.acmeClient.Directory == c.iss.TestCA
}
func (c *acmeClient) revoke(ctx context.Context, cert *x509.Certificate, reason int) error {
return c.acmeClient.RevokeCertificate(ctx, c.account,
cert, c.account.PrivateKey, reason)
}
func buildUAString() string {
ua := "CertMagic"
if UserAgent != "" {
ua = UserAgent + " " + ua
}
return ua
}
// These internal rate limits are designed to prevent accidentally
// firehosing a CA's ACME endpoints. They are not intended to
// replace or replicate the CA's actual rate limits.
//
// Let's Encrypt's rate limits can be found here:
// https://letsencrypt.org/docs/rate-limits/
//
// Currently (as of December 2019), Let's Encrypt's most relevant
// rate limit for large deployments is 300 new orders per account
// per 3 hours (on average, or best case, that's about 1 every 36
// seconds, or 2 every 72 seconds, etc.); but it's not reasonable
// to try to assume that our internal state is the same as the CA's
// (due to process restarts, config changes, failed validations,
// etc.) and ultimately, only the CA's actual rate limiter is the
// authority. Thus, our own rate limiters do not attempt to enforce
// external rate limits. Doing so causes problems when the domains
// are not in our control (i.e. serving customer sites) and/or lots
// of domains fail validation: they clog our internal rate limiter
// and nearly starve out (or at least slow down) the other domains
// that need certificates. Failed transactions are already retried
// with exponential backoff, so adding in rate limiting can slow
// things down even more.
//
// Instead, the point of our internal rate limiter is to avoid
// hammering the CA's endpoint when there are thousands or even
// millions of certificates under management. Our goal is to
// allow small bursts in a relatively short timeframe so as to
// not block any one domain for too long, without unleashing
// thousands of requests to the CA at once.
var (
rateLimiters = make(map[string]*RingBufferRateLimiter)
rateLimitersMu sync.RWMutex
// RateLimitEvents is how many new events can be allowed
// in RateLimitEventsWindow.
RateLimitEvents = 10
// RateLimitEventsWindow is the size of the sliding
// window that throttles events.
RateLimitEventsWindow = 10 * time.Second
)
// Some default values passed down to the underlying ACME client.
var (
UserAgent string
HTTPTimeout = 30 * time.Second
)

529
vendor/github.com/caddyserver/certmagic/acmeissuer.go generated vendored Normal file
View File

@@ -0,0 +1,529 @@
package certmagic
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"net/http"
"net/url"
"sort"
"strings"
"sync"
"time"
"github.com/mholt/acmez"
"github.com/mholt/acmez/acme"
"go.uber.org/zap"
)
// ACMEIssuer gets certificates using ACME. It implements the PreChecker,
// Issuer, and Revoker interfaces.
//
// It is NOT VALID to use an ACMEIssuer without calling NewACMEIssuer().
// It fills in any default values from DefaultACME as well as setting up
// internal state that is necessary for valid use. Always call
// NewACMEIssuer() to get a valid ACMEIssuer value.
type ACMEIssuer struct {
// The endpoint of the directory for the ACME
// CA we are to use
CA string
// TestCA is the endpoint of the directory for
// an ACME CA to use to test domain validation,
// but any certs obtained from this CA are
// discarded
TestCA string
// The email address to use when creating or
// selecting an existing ACME server account
Email string
// The PEM-encoded private key of the ACME
// account to use; only needed if the account
// is already created on the server and
// can be looked up with the ACME protocol
AccountKeyPEM string
// Set to true if agreed to the CA's
// subscriber agreement
Agreed bool
// An optional external account to associate
// with this ACME account
ExternalAccount *acme.EAB
// Disable all HTTP challenges
DisableHTTPChallenge bool
// Disable all TLS-ALPN challenges
DisableTLSALPNChallenge bool
// The host (ONLY the host, not port) to listen
// on if necessary to start a listener to solve
// an ACME challenge
ListenHost string
// The alternate port to use for the ACME HTTP
// challenge; if non-empty, this port will be
// used instead of HTTPChallengePort to spin up
// a listener for the HTTP challenge
AltHTTPPort int
// The alternate port to use for the ACME
// TLS-ALPN challenge; the system must forward
// TLSALPNChallengePort to this port for
// challenge to succeed
AltTLSALPNPort int
// The solver for the dns-01 challenge;
// usually this is a DNS01Solver value
// from this package
DNS01Solver acmez.Solver
// TrustedRoots specifies a pool of root CA
// certificates to trust when communicating
// over a network to a peer.
TrustedRoots *x509.CertPool
// The maximum amount of time to allow for
// obtaining a certificate. If empty, the
// default from the underlying ACME lib is
// used. If set, it must not be too low so
// as to cancel challenges too early.
CertObtainTimeout time.Duration
// Address of custom DNS resolver to be used
// when communicating with ACME server
Resolver string
// Callback function that is called before a
// new ACME account is registered with the CA;
// it allows for last-second config changes
// of the ACMEIssuer and the Account.
// (TODO: this feature is still EXPERIMENTAL and subject to change)
NewAccountFunc func(context.Context, *ACMEIssuer, acme.Account) (acme.Account, error)
// Preferences for selecting alternate
// certificate chains
PreferredChains ChainPreference
// Set a logger to enable logging
Logger *zap.Logger
config *Config
httpClient *http.Client
// Some fields are changed on-the-fly during
// certificate management. For example, the
// email might be implicitly discovered if not
// explicitly configured, and agreement might
// happen during the flow. Changing the exported
// fields field is racey (issue #195) so we
// control unexported fields that we can
// synchronize properly.
email string
agreed bool
mu *sync.Mutex // protects the above grouped fields
}
// NewACMEIssuer constructs a valid ACMEIssuer based on a template
// configuration; any empty values will be filled in by defaults in
// DefaultACME, and if any required values are still empty, sensible
// defaults will be used.
//
// Typically, you'll create the Config first with New() or NewDefault(),
// then call NewACMEIssuer(), then assign the return value to the Issuers
// field of the Config.
func NewACMEIssuer(cfg *Config, template ACMEIssuer) *ACMEIssuer {
if cfg == nil {
panic("cannot make valid ACMEIssuer without an associated CertMagic config")
}
if template.CA == "" {
template.CA = DefaultACME.CA
}
if template.TestCA == "" && template.CA == DefaultACME.CA {
// only use the default test CA if the CA is also
// the default CA; no point in testing against
// Let's Encrypt's staging server if we are not
// using their production server too
template.TestCA = DefaultACME.TestCA
}
if template.Email == "" {
template.Email = DefaultACME.Email
}
if template.AccountKeyPEM == "" {
template.AccountKeyPEM = DefaultACME.AccountKeyPEM
}
if !template.Agreed {
template.Agreed = DefaultACME.Agreed
}
if template.ExternalAccount == nil {
template.ExternalAccount = DefaultACME.ExternalAccount
}
if !template.DisableHTTPChallenge {
template.DisableHTTPChallenge = DefaultACME.DisableHTTPChallenge
}
if !template.DisableTLSALPNChallenge {
template.DisableTLSALPNChallenge = DefaultACME.DisableTLSALPNChallenge
}
if template.ListenHost == "" {
template.ListenHost = DefaultACME.ListenHost
}
if template.AltHTTPPort == 0 {
template.AltHTTPPort = DefaultACME.AltHTTPPort
}
if template.AltTLSALPNPort == 0 {
template.AltTLSALPNPort = DefaultACME.AltTLSALPNPort
}
if template.DNS01Solver == nil {
template.DNS01Solver = DefaultACME.DNS01Solver
}
if template.TrustedRoots == nil {
template.TrustedRoots = DefaultACME.TrustedRoots
}
if template.CertObtainTimeout == 0 {
template.CertObtainTimeout = DefaultACME.CertObtainTimeout
}
if template.Resolver == "" {
template.Resolver = DefaultACME.Resolver
}
if template.NewAccountFunc == nil {
template.NewAccountFunc = DefaultACME.NewAccountFunc
}
if template.Logger == nil {
template.Logger = DefaultACME.Logger
}
template.config = cfg
template.mu = new(sync.Mutex)
// set up the dialer and transport / HTTP client
dialer := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 2 * time.Minute,
}
if template.Resolver != "" {
dialer.Resolver = &net.Resolver{
PreferGo: true,
Dial: func(ctx context.Context, network, _ string) (net.Conn, error) {
return (&net.Dialer{
Timeout: 15 * time.Second,
}).DialContext(ctx, network, template.Resolver)
},
}
}
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: dialer.DialContext,
TLSHandshakeTimeout: 30 * time.Second, // increase to 30s requested in #175
ResponseHeaderTimeout: 30 * time.Second, // increase to 30s requested in #175
ExpectContinueTimeout: 2 * time.Second,
ForceAttemptHTTP2: true,
}
if template.TrustedRoots != nil {
transport.TLSClientConfig = &tls.Config{
RootCAs: template.TrustedRoots,
}
}
template.httpClient = &http.Client{
Transport: transport,
Timeout: HTTPTimeout,
}
return &template
}
// IssuerKey returns the unique issuer key for the
// confgured CA endpoint.
func (am *ACMEIssuer) IssuerKey() string {
return am.issuerKey(am.CA)
}
func (*ACMEIssuer) issuerKey(ca string) string {
key := ca
if caURL, err := url.Parse(key); err == nil {
key = caURL.Host
if caURL.Path != "" {
// keep the path, but make sure it's a single
// component (i.e. no forward slashes, and for
// good measure, no backward slashes either)
const hyphen = "-"
repl := strings.NewReplacer(
"/", hyphen,
"\\", hyphen,
)
path := strings.Trim(repl.Replace(caURL.Path), hyphen)
if path != "" {
key += hyphen + path
}
}
}
return key
}
func (iss *ACMEIssuer) getEmail() string {
iss.mu.Lock()
defer iss.mu.Unlock()
return iss.email
}
func (iss *ACMEIssuer) isAgreed() bool {
iss.mu.Lock()
defer iss.mu.Unlock()
return iss.agreed
}
// PreCheck performs a few simple checks before obtaining or
// renewing a certificate with ACME, and returns whether this
// batch is eligible for certificates if using Let's Encrypt.
// It also ensures that an email address is available.
func (am *ACMEIssuer) PreCheck(ctx context.Context, names []string, interactive bool) error {
publicCA := strings.Contains(am.CA, "api.letsencrypt.org") || strings.Contains(am.CA, "acme.zerossl.com")
if publicCA {
for _, name := range names {
if !SubjectQualifiesForPublicCert(name) {
return fmt.Errorf("subject does not qualify for a public certificate: %s", name)
}
}
}
return am.setEmail(ctx, interactive)
}
// Issue implements the Issuer interface. It obtains a certificate for the given csr using
// the ACME configuration am.
func (am *ACMEIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*IssuedCertificate, error) {
if am.config == nil {
panic("missing config pointer (must use NewACMEIssuer)")
}
var isRetry bool
if attempts, ok := ctx.Value(AttemptsCtxKey).(*int); ok {
isRetry = *attempts > 0
}
cert, usedTestCA, err := am.doIssue(ctx, csr, isRetry)
if err != nil {
return nil, err
}
// important to note that usedTestCA is not necessarily the same as isRetry
// (usedTestCA can be true if the main CA and the test CA happen to be the same)
if isRetry && usedTestCA && am.CA != am.TestCA {
// succeeded with testing endpoint, so try again with production endpoint
// (only if the production endpoint is different from the testing endpoint)
// TODO: This logic is imperfect and could benefit from some refinement.
// The two CA endpoints likely have different states, which could cause one
// to succeed and the other to fail, even if it's not a validation error.
// Two common cases would be:
// 1) Rate limiter state. This is more likely to cause prod to fail while
// staging succeeds, since prod usually has tighter rate limits. Thus, if
// initial attempt failed in prod due to rate limit, first retry (on staging)
// might succeed, and then trying prod again right way would probably still
// fail; normally this would terminate retries but the right thing to do in
// this case is to back off and retry again later. We could refine this logic
// to stick with the production endpoint on retries unless the error changes.
// 2) Cached authorizations state. If a domain validates successfully with
// one endpoint, but then the other endpoint is used, it might fail, e.g. if
// DNS was just changed or is still propagating. In this case, the second CA
// should continue to be retried with backoff, without switching back to the
// other endpoint. This is more likely to happen if a user is testing with
// the staging CA as the main CA, then changes their configuration once they
// think they are ready for the production endpoint.
cert, _, err = am.doIssue(ctx, csr, false)
if err != nil {
// succeeded with test CA but failed just now with the production CA;
// either we are observing differing internal states of each CA that will
// work out with time, or there is a bug/misconfiguration somewhere
// externally; it is hard to tell which! one easy cue is whether the
// error is specifically a 429 (Too Many Requests); if so, we should
// probably keep retrying
var problem acme.Problem
if errors.As(err, &problem) {
if problem.Status == http.StatusTooManyRequests {
// DON'T abort retries; the test CA succeeded (even
// if it's cached, it recently succeeded!) so we just
// need to keep trying (with backoff) until this CA's
// rate limits expire...
// TODO: as mentioned in comment above, we would benefit
// by pinning the main CA at this point instead of
// needlessly retrying with the test CA first each time
return nil, err
}
}
return nil, ErrNoRetry{err}
}
}
return cert, err
}
func (am *ACMEIssuer) doIssue(ctx context.Context, csr *x509.CertificateRequest, useTestCA bool) (*IssuedCertificate, bool, error) {
client, err := am.newACMEClientWithAccount(ctx, useTestCA, false)
if err != nil {
return nil, false, err
}
usingTestCA := client.usingTestCA()
nameSet := namesFromCSR(csr)
if !useTestCA {
if err := client.throttle(ctx, nameSet); err != nil {
return nil, usingTestCA, err
}
}
certChains, err := client.acmeClient.ObtainCertificateUsingCSR(ctx, client.account, csr)
if err != nil {
return nil, usingTestCA, fmt.Errorf("%v %w (ca=%s)", nameSet, err, client.acmeClient.Directory)
}
if len(certChains) == 0 {
return nil, usingTestCA, fmt.Errorf("no certificate chains")
}
preferredChain := am.selectPreferredChain(certChains)
ic := &IssuedCertificate{
Certificate: preferredChain.ChainPEM,
Metadata: preferredChain,
}
return ic, usingTestCA, nil
}
// selectPreferredChain sorts and then filters the certificate chains to find the optimal
// chain preferred by the client. If there's only one chain, that is returned without any
// processing. If there are no matches, the first chain is returned.
func (am *ACMEIssuer) selectPreferredChain(certChains []acme.Certificate) acme.Certificate {
if len(certChains) == 1 {
if am.Logger != nil && (len(am.PreferredChains.AnyCommonName) > 0 || len(am.PreferredChains.RootCommonName) > 0) {
am.Logger.Debug("there is only one chain offered; selecting it regardless of preferences",
zap.String("chain_url", certChains[0].URL))
}
return certChains[0]
}
if am.PreferredChains.Smallest != nil {
if *am.PreferredChains.Smallest {
sort.Slice(certChains, func(i, j int) bool {
return len(certChains[i].ChainPEM) < len(certChains[j].ChainPEM)
})
} else {
sort.Slice(certChains, func(i, j int) bool {
return len(certChains[i].ChainPEM) > len(certChains[j].ChainPEM)
})
}
}
if len(am.PreferredChains.AnyCommonName) > 0 || len(am.PreferredChains.RootCommonName) > 0 {
// in order to inspect, we need to decode their PEM contents
decodedChains := make([][]*x509.Certificate, len(certChains))
for i, chain := range certChains {
certs, err := parseCertsFromPEMBundle(chain.ChainPEM)
if err != nil {
if am.Logger != nil {
am.Logger.Error("unable to parse PEM certificate chain",
zap.Int("chain", i),
zap.Error(err))
}
continue
}
decodedChains[i] = certs
}
if len(am.PreferredChains.AnyCommonName) > 0 {
for _, prefAnyCN := range am.PreferredChains.AnyCommonName {
for i, chain := range decodedChains {
for _, cert := range chain {
if cert.Issuer.CommonName == prefAnyCN {
if am.Logger != nil {
am.Logger.Debug("found preferred certificate chain by issuer common name",
zap.String("preference", prefAnyCN),
zap.Int("chain", i))
}
return certChains[i]
}
}
}
}
}
if len(am.PreferredChains.RootCommonName) > 0 {
for _, prefRootCN := range am.PreferredChains.RootCommonName {
for i, chain := range decodedChains {
if chain[len(chain)-1].Issuer.CommonName == prefRootCN {
if am.Logger != nil {
am.Logger.Debug("found preferred certificate chain by root common name",
zap.String("preference", prefRootCN),
zap.Int("chain", i))
}
return certChains[i]
}
}
}
}
if am.Logger != nil {
am.Logger.Warn("did not find chain matching preferences; using first")
}
}
return certChains[0]
}
// Revoke implements the Revoker interface. It revokes the given certificate.
func (am *ACMEIssuer) Revoke(ctx context.Context, cert CertificateResource, reason int) error {
client, err := am.newACMEClientWithAccount(ctx, false, false)
if err != nil {
return err
}
certs, err := parseCertsFromPEMBundle(cert.CertificatePEM)
if err != nil {
return err
}
return client.revoke(ctx, certs[0], reason)
}
// ChainPreference describes the client's preferred certificate chain,
// useful if the CA offers alternate chains. The first matching chain
// will be selected.
type ChainPreference struct {
// Prefer chains with the fewest number of bytes.
Smallest *bool
// Select first chain having a root with one of
// these common names.
RootCommonName []string
// Select first chain that has any issuer with one
// of these common names.
AnyCommonName []string
}
// DefaultACME specifies default settings to use for ACMEIssuers.
// Using this value is optional but can be convenient.
var DefaultACME = ACMEIssuer{
CA: LetsEncryptProductionCA,
TestCA: LetsEncryptStagingCA,
}
// Some well-known CA endpoints available to use.
const (
LetsEncryptStagingCA = "https://acme-staging-v02.api.letsencrypt.org/directory"
LetsEncryptProductionCA = "https://acme-v02.api.letsencrypt.org/directory"
ZeroSSLProductionCA = "https://acme.zerossl.com/v2/DV90"
)
// prefixACME is the storage key prefix used for ACME-specific assets.
const prefixACME = "acme"
// Interface guards
var (
_ PreChecker = (*ACMEIssuer)(nil)
_ Issuer = (*ACMEIssuer)(nil)
_ Revoker = (*ACMEIssuer)(nil)
)

187
vendor/github.com/caddyserver/certmagic/async.go generated vendored Normal file
View File

@@ -0,0 +1,187 @@
package certmagic
import (
"context"
"errors"
"log"
"runtime"
"sync"
"time"
"go.uber.org/zap"
)
var jm = &jobManager{maxConcurrentJobs: 1000}
type jobManager struct {
mu sync.Mutex
maxConcurrentJobs int
activeWorkers int
queue []namedJob
names map[string]struct{}
}
type namedJob struct {
name string
job func() error
logger *zap.Logger
}
// Submit enqueues the given job with the given name. If name is non-empty
// and a job with the same name is already enqueued or running, this is a
// no-op. If name is empty, no duplicate prevention will occur. The job
// manager will then run this job as soon as it is able.
func (jm *jobManager) Submit(logger *zap.Logger, name string, job func() error) {
jm.mu.Lock()
defer jm.mu.Unlock()
if jm.names == nil {
jm.names = make(map[string]struct{})
}
if name != "" {
// prevent duplicate jobs
if _, ok := jm.names[name]; ok {
return
}
jm.names[name] = struct{}{}
}
jm.queue = append(jm.queue, namedJob{name, job, logger})
if jm.activeWorkers < jm.maxConcurrentJobs {
jm.activeWorkers++
go jm.worker()
}
}
func (jm *jobManager) worker() {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic: certificate worker: %v\n%s", err, buf)
}
}()
for {
jm.mu.Lock()
if len(jm.queue) == 0 {
jm.activeWorkers--
jm.mu.Unlock()
return
}
next := jm.queue[0]
jm.queue = jm.queue[1:]
jm.mu.Unlock()
if err := next.job(); err != nil {
if next.logger != nil {
next.logger.Error("job failed", zap.Error(err))
}
}
if next.name != "" {
jm.mu.Lock()
delete(jm.names, next.name)
jm.mu.Unlock()
}
}
}
func doWithRetry(ctx context.Context, log *zap.Logger, f func(context.Context) error) error {
var attempts int
ctx = context.WithValue(ctx, AttemptsCtxKey, &attempts)
// the initial intervalIndex is -1, signaling
// that we should not wait for the first attempt
start, intervalIndex := time.Now(), -1
var err error
for time.Since(start) < maxRetryDuration {
var wait time.Duration
if intervalIndex >= 0 {
wait = retryIntervals[intervalIndex]
}
timer := time.NewTimer(wait)
select {
case <-ctx.Done():
timer.Stop()
return context.Canceled
case <-timer.C:
err = f(ctx)
attempts++
if err == nil || errors.Is(err, context.Canceled) {
return err
}
var errNoRetry ErrNoRetry
if errors.As(err, &errNoRetry) {
return err
}
if intervalIndex < len(retryIntervals)-1 {
intervalIndex++
}
if time.Since(start) < maxRetryDuration {
if log != nil {
log.Error("will retry",
zap.Error(err),
zap.Int("attempt", attempts),
zap.Duration("retrying_in", retryIntervals[intervalIndex]),
zap.Duration("elapsed", time.Since(start)),
zap.Duration("max_duration", maxRetryDuration))
}
} else {
if log != nil {
log.Error("final attempt; giving up",
zap.Error(err),
zap.Int("attempt", attempts),
zap.Duration("elapsed", time.Since(start)),
zap.Duration("max_duration", maxRetryDuration))
}
return nil
}
}
}
return err
}
// ErrNoRetry is an error type which signals
// to stop retries early.
type ErrNoRetry struct{ Err error }
// Unwrap makes it so that e wraps e.Err.
func (e ErrNoRetry) Unwrap() error { return e.Err }
func (e ErrNoRetry) Error() string { return e.Err.Error() }
type retryStateCtxKey struct{}
// AttemptsCtxKey is the context key for the value
// that holds the attempt counter. The value counts
// how many times the operation has been attempted.
// A value of 0 means first attempt.
var AttemptsCtxKey retryStateCtxKey
// retryIntervals are based on the idea of exponential
// backoff, but weighed a little more heavily to the
// front. We figure that intermittent errors would be
// resolved after the first retry, but any errors after
// that would probably require at least a few minutes
// to clear up: either for DNS to propagate, for the
// administrator to fix their DNS or network properties,
// or some other external factor needs to change. We
// chose intervals that we think will be most useful
// without introducing unnecessary delay. The last
// interval in this list will be used until the time
// of maxRetryDuration has elapsed.
var retryIntervals = []time.Duration{
1 * time.Minute,
2 * time.Minute,
2 * time.Minute,
5 * time.Minute, // elapsed: 10 min
10 * time.Minute,
20 * time.Minute,
20 * time.Minute, // elapsed: 1 hr
30 * time.Minute,
30 * time.Minute, // elapsed: 2 hr
1 * time.Hour,
3 * time.Hour, // elapsed: 6 hr
6 * time.Hour, // for up to maxRetryDuration
}
// maxRetryDuration is the maximum duration to try
// doing retries using the above intervals.
const maxRetryDuration = 24 * time.Hour * 30

364
vendor/github.com/caddyserver/certmagic/cache.go generated vendored Normal file
View File

@@ -0,0 +1,364 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"fmt"
weakrand "math/rand" // seeded elsewhere
"strings"
"sync"
"time"
"go.uber.org/zap"
)
// Cache is a structure that stores certificates in memory.
// A Cache indexes certificates by name for quick access
// during TLS handshakes, and avoids duplicating certificates
// in memory. Generally, there should only be one per process.
// However, that is not a strict requirement; but using more
// than one is a code smell, and may indicate an
// over-engineered design.
//
// An empty cache is INVALID and must not be used. Be sure
// to call NewCache to get a valid value.
//
// These should be very long-lived values and must not be
// copied. Before all references leave scope to be garbage
// collected, ensure you call Stop() to stop maintenance on
// the certificates stored in this cache and release locks.
//
// Caches are not usually manipulated directly; create a
// Config value with a pointer to a Cache, and then use
// the Config to interact with the cache. Caches are
// agnostic of any particular storage or ACME config,
// since each certificate may be managed and stored
// differently.
type Cache struct {
// User configuration of the cache
options CacheOptions
// The cache is keyed by certificate hash
cache map[string]Certificate
// cacheIndex is a map of SAN to cache key (cert hash)
cacheIndex map[string][]string
// Protects the cache and index maps
mu sync.RWMutex
// Close this channel to cancel asset maintenance
stopChan chan struct{}
// Used to signal when stopping is completed
doneChan chan struct{}
logger *zap.Logger
}
// NewCache returns a new, valid Cache for efficiently
// accessing certificates in memory. It also begins a
// maintenance goroutine to tend to the certificates
// in the cache. Call Stop() when you are done with the
// cache so it can clean up locks and stuff.
//
// Most users of this package will not need to call this
// because a default certificate cache is created for you.
// Only advanced use cases require creating a new cache.
//
// This function panics if opts.GetConfigForCert is not
// set. The reason is that a cache absolutely needs to
// be able to get a Config with which to manage TLS
// assets, and it is not safe to assume that the Default
// config is always the correct one, since you have
// created the cache yourself.
//
// See the godoc for Cache to use it properly. When
// no longer needed, caches should be stopped with
// Stop() to clean up resources even if the process
// is being terminated, so that it can clean up
// any locks for other processes to unblock!
func NewCache(opts CacheOptions) *Cache {
// assume default options if necessary
if opts.OCSPCheckInterval <= 0 {
opts.OCSPCheckInterval = DefaultOCSPCheckInterval
}
if opts.RenewCheckInterval <= 0 {
opts.RenewCheckInterval = DefaultRenewCheckInterval
}
if opts.Capacity < 0 {
opts.Capacity = 0
}
// this must be set, because we cannot not
// safely assume that the Default Config
// is always the correct one to use
if opts.GetConfigForCert == nil {
panic("cache must be initialized with a GetConfigForCert callback")
}
c := &Cache{
options: opts,
cache: make(map[string]Certificate),
cacheIndex: make(map[string][]string),
stopChan: make(chan struct{}),
doneChan: make(chan struct{}),
logger: opts.Logger,
}
go c.maintainAssets(0)
return c
}
// Stop stops the maintenance goroutine for
// certificates in certCache. It blocks until
// stopping is complete. Once a cache is
// stopped, it cannot be reused.
func (certCache *Cache) Stop() {
close(certCache.stopChan) // signal to stop
<-certCache.doneChan // wait for stop to complete
}
// CacheOptions is used to configure certificate caches.
// Once a cache has been created with certain options,
// those settings cannot be changed.
type CacheOptions struct {
// REQUIRED. A function that returns a configuration
// used for managing a certificate, or for accessing
// that certificate's asset storage (e.g. for
// OCSP staples, etc). The returned Config MUST
// be associated with the same Cache as the caller.
//
// The reason this is a callback function, dynamically
// returning a Config (instead of attaching a static
// pointer to a Config on each certificate) is because
// the config for how to manage a domain's certificate
// might change from maintenance to maintenance. The
// cache is so long-lived, we cannot assume that the
// host's situation will always be the same; e.g. the
// certificate might switch DNS providers, so the DNS
// challenge (if used) would need to be adjusted from
// the last time it was run ~8 weeks ago.
GetConfigForCert ConfigGetter
// How often to check certificates for renewal;
// if unset, DefaultOCSPCheckInterval will be used.
OCSPCheckInterval time.Duration
// How often to check certificates for renewal;
// if unset, DefaultRenewCheckInterval will be used.
RenewCheckInterval time.Duration
// Maximum number of certificates to allow in the cache.
// If reached, certificates will be randomly evicted to
// make room for new ones. 0 means unlimited.
Capacity int
// Set a logger to enable logging
Logger *zap.Logger
}
// ConfigGetter is a function that returns a prepared,
// valid config that should be used when managing the
// given certificate or its assets.
type ConfigGetter func(Certificate) (*Config, error)
// cacheCertificate calls unsyncedCacheCertificate with a write lock.
//
// This function is safe for concurrent use.
func (certCache *Cache) cacheCertificate(cert Certificate) {
certCache.mu.Lock()
certCache.unsyncedCacheCertificate(cert)
certCache.mu.Unlock()
}
// unsyncedCacheCertificate adds cert to the in-memory cache unless
// it already exists in the cache (according to cert.Hash). It
// updates the name index.
//
// This function is NOT safe for concurrent use. Callers MUST acquire
// a write lock on certCache.mu first.
func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) {
// no-op if this certificate already exists in the cache
if _, ok := certCache.cache[cert.hash]; ok {
if certCache.logger != nil {
certCache.logger.Debug("certificate already cached",
zap.Strings("subjects", cert.Names),
zap.Time("expiration", cert.Leaf.NotAfter),
zap.Bool("managed", cert.managed),
zap.String("issuer_key", cert.issuerKey),
zap.String("hash", cert.hash))
}
return
}
// if the cache is at capacity, make room for new cert
cacheSize := len(certCache.cache)
if certCache.options.Capacity > 0 && cacheSize >= certCache.options.Capacity {
// Go maps are "nondeterministic" but not actually random,
// so although we could just chop off the "front" of the
// map with less code, that is a heavily skewed eviction
// strategy; generating random numbers is cheap and
// ensures a much better distribution.
rnd := weakrand.Intn(cacheSize)
i := 0
for _, randomCert := range certCache.cache {
if i == rnd {
if certCache.logger != nil {
certCache.logger.Debug("cache full; evicting random certificate",
zap.Strings("removing_subjects", randomCert.Names),
zap.String("removing_hash", randomCert.hash),
zap.Strings("inserting_subjects", cert.Names),
zap.String("inserting_hash", cert.hash))
}
certCache.removeCertificate(randomCert)
break
}
i++
}
}
// store the certificate
certCache.cache[cert.hash] = cert
// update the index so we can access it by name
for _, name := range cert.Names {
certCache.cacheIndex[name] = append(certCache.cacheIndex[name], cert.hash)
}
if certCache.logger != nil {
certCache.logger.Debug("added certificate to cache",
zap.Strings("subjects", cert.Names),
zap.Time("expiration", cert.Leaf.NotAfter),
zap.Bool("managed", cert.managed),
zap.String("issuer_key", cert.issuerKey),
zap.String("hash", cert.hash),
zap.Int("cache_size", len(certCache.cache)),
zap.Int("cache_capacity", certCache.options.Capacity))
}
}
// removeCertificate removes cert from the cache.
//
// This function is NOT safe for concurrent use; callers
// MUST first acquire a write lock on certCache.mu.
func (certCache *Cache) removeCertificate(cert Certificate) {
// delete all mentions of this cert from the name index
for _, name := range cert.Names {
keyList := certCache.cacheIndex[name]
for i := 0; i < len(keyList); i++ {
if keyList[i] == cert.hash {
keyList = append(keyList[:i], keyList[i+1:]...)
i--
}
}
if len(keyList) == 0 {
delete(certCache.cacheIndex, name)
} else {
certCache.cacheIndex[name] = keyList
}
}
// delete the actual cert from the cache
delete(certCache.cache, cert.hash)
if certCache.logger != nil {
certCache.logger.Debug("removed certificate from cache",
zap.Strings("subjects", cert.Names),
zap.Time("expiration", cert.Leaf.NotAfter),
zap.Bool("managed", cert.managed),
zap.String("issuer_key", cert.issuerKey),
zap.String("hash", cert.hash),
zap.Int("cache_size", len(certCache.cache)),
zap.Int("cache_capacity", certCache.options.Capacity))
}
}
// replaceCertificate atomically replaces oldCert with newCert in
// the cache.
//
// This method is safe for concurrent use.
func (certCache *Cache) replaceCertificate(oldCert, newCert Certificate) {
certCache.mu.Lock()
certCache.removeCertificate(oldCert)
certCache.unsyncedCacheCertificate(newCert)
certCache.mu.Unlock()
if certCache.logger != nil {
certCache.logger.Info("replaced certificate in cache",
zap.Strings("subjects", newCert.Names),
zap.Time("new_expiration", newCert.Leaf.NotAfter))
}
}
func (certCache *Cache) getAllMatchingCerts(name string) []Certificate {
certCache.mu.RLock()
defer certCache.mu.RUnlock()
allCertKeys := certCache.cacheIndex[name]
certs := make([]Certificate, len(allCertKeys))
for i := range allCertKeys {
certs[i] = certCache.cache[allCertKeys[i]]
}
return certs
}
func (certCache *Cache) getAllCerts() []Certificate {
certCache.mu.RLock()
defer certCache.mu.RUnlock()
certs := make([]Certificate, 0, len(certCache.cache))
for _, cert := range certCache.cache {
certs = append(certs, cert)
}
return certs
}
func (certCache *Cache) getConfig(cert Certificate) (*Config, error) {
cfg, err := certCache.options.GetConfigForCert(cert)
if err != nil {
return nil, err
}
if cfg.certCache != nil && cfg.certCache != certCache {
return nil, fmt.Errorf("config returned for certificate %v is not nil and points to different cache; got %p, expected %p (this one)",
cert.Names, cfg.certCache, certCache)
}
return cfg, nil
}
// AllMatchingCertificates returns a list of all certificates that could
// be used to serve the given SNI name, including exact SAN matches and
// wildcard matches.
func (certCache *Cache) AllMatchingCertificates(name string) []Certificate {
// get exact matches first
certs := certCache.getAllMatchingCerts(name)
// then look for wildcard matches by replacing each
// label of the domain name with wildcards
labels := strings.Split(name, ".")
for i := range labels {
labels[i] = "*"
candidate := strings.Join(labels, ".")
certs = append(certs, certCache.getAllMatchingCerts(candidate)...)
}
return certs
}
var (
defaultCache *Cache
defaultCacheMu sync.Mutex
)

429
vendor/github.com/caddyserver/certmagic/certificates.go generated vendored Normal file
View File

@@ -0,0 +1,429 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"os"
"strings"
"time"
"go.uber.org/zap"
"golang.org/x/crypto/ocsp"
)
// Certificate is a tls.Certificate with associated metadata tacked on.
// Even if the metadata can be obtained by parsing the certificate,
// we are more efficient by extracting the metadata onto this struct,
// but at the cost of slightly higher memory use.
type Certificate struct {
tls.Certificate
// Names is the list of subject names this
// certificate is signed for.
Names []string
// Optional; user-provided, and arbitrary.
Tags []string
// OCSP contains the certificate's parsed OCSP response.
// It is not necessarily the response that is stapled
// (e.g. if the status is not Good), it is simply the
// most recent OCSP response we have for this certificate.
ocsp *ocsp.Response
// The hex-encoded hash of this cert's chain's bytes.
hash string
// Whether this certificate is under our management.
managed bool
// The unique string identifying the issuer of this certificate.
issuerKey string
}
// Empty returns true if the certificate struct is not filled out; at
// least the tls.Certificate.Certificate field is expected to be set.
func (cert Certificate) Empty() bool {
return len(cert.Certificate.Certificate) == 0
}
// NeedsRenewal returns true if the certificate is
// expiring soon (according to cfg) or has expired.
func (cert Certificate) NeedsRenewal(cfg *Config) bool {
return currentlyInRenewalWindow(cert.Leaf.NotBefore, cert.Leaf.NotAfter, cfg.RenewalWindowRatio)
}
// Expired returns true if the certificate has expired.
func (cert Certificate) Expired() bool {
if cert.Leaf == nil {
// ideally cert.Leaf would never be nil, but this can happen for
// "synthetic" certs like those made to solve the TLS-ALPN challenge
// which adds a special cert directly to the cache, since
// tls.X509KeyPair() discards the leaf; oh well
return false
}
return time.Now().After(cert.Leaf.NotAfter)
}
// currentlyInRenewalWindow returns true if the current time is
// within the renewal window, according to the given start/end
// dates and the ratio of the renewal window. If true is returned,
// the certificate being considered is due for renewal.
func currentlyInRenewalWindow(notBefore, notAfter time.Time, renewalWindowRatio float64) bool {
if notAfter.IsZero() {
return false
}
lifetime := notAfter.Sub(notBefore)
if renewalWindowRatio == 0 {
renewalWindowRatio = DefaultRenewalWindowRatio
}
renewalWindow := time.Duration(float64(lifetime) * renewalWindowRatio)
renewalWindowStart := notAfter.Add(-renewalWindow)
return time.Now().After(renewalWindowStart)
}
// HasTag returns true if cert.Tags has tag.
func (cert Certificate) HasTag(tag string) bool {
for _, t := range cert.Tags {
if t == tag {
return true
}
}
return false
}
// CacheManagedCertificate loads the certificate for domain into the
// cache, from the TLS storage for managed certificates. It returns a
// copy of the Certificate that was put into the cache.
//
// This is a lower-level method; normally you'll call Manage() instead.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheManagedCertificate(ctx context.Context, domain string) (Certificate, error) {
cert, err := cfg.loadManagedCertificate(ctx, domain)
if err != nil {
return cert, err
}
cfg.certCache.cacheCertificate(cert)
cfg.emit("cached_managed_cert", cert.Names)
return cert, nil
}
// loadManagedCertificate loads the managed certificate for domain from any
// of the configured issuers' storage locations, but it does not add it to
// the cache. It just loads from storage and returns it.
func (cfg *Config) loadManagedCertificate(ctx context.Context, domain string) (Certificate, error) {
certRes, err := cfg.loadCertResourceAnyIssuer(ctx, domain)
if err != nil {
return Certificate{}, err
}
cert, err := cfg.makeCertificateWithOCSP(ctx, certRes.CertificatePEM, certRes.PrivateKeyPEM)
if err != nil {
return cert, err
}
cert.managed = true
cert.issuerKey = certRes.issuerKey
return cert, nil
}
// CacheUnmanagedCertificatePEMFile loads a certificate for host using certFile
// and keyFile, which must be in PEM format. It stores the certificate in
// the in-memory cache.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheUnmanagedCertificatePEMFile(ctx context.Context, certFile, keyFile string, tags []string) error {
cert, err := cfg.makeCertificateFromDiskWithOCSP(ctx, cfg.Storage, certFile, keyFile)
if err != nil {
return err
}
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
cfg.emit("cached_unmanaged_cert", cert.Names)
return nil
}
// CacheUnmanagedTLSCertificate adds tlsCert to the certificate cache.
// It staples OCSP if possible.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheUnmanagedTLSCertificate(ctx context.Context, tlsCert tls.Certificate, tags []string) error {
var cert Certificate
err := fillCertFromLeaf(&cert, tlsCert)
if err != nil {
return err
}
err = stapleOCSP(ctx, cfg.OCSP, cfg.Storage, &cert, nil)
if err != nil && cfg.Logger != nil {
cfg.Logger.Warn("stapling OCSP", zap.Error(err))
}
cfg.emit("cached_unmanaged_cert", cert.Names)
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
return nil
}
// CacheUnmanagedCertificatePEMBytes makes a certificate out of the PEM bytes
// of the certificate and key, then caches it in memory.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheUnmanagedCertificatePEMBytes(ctx context.Context, certBytes, keyBytes []byte, tags []string) error {
cert, err := cfg.makeCertificateWithOCSP(ctx, certBytes, keyBytes)
if err != nil {
return err
}
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
cfg.emit("cached_unmanaged_cert", cert.Names)
return nil
}
// makeCertificateFromDiskWithOCSP makes a Certificate by loading the
// certificate and key files. It fills out all the fields in
// the certificate except for the Managed and OnDemand flags.
// (It is up to the caller to set those.) It staples OCSP.
func (cfg Config) makeCertificateFromDiskWithOCSP(ctx context.Context, storage Storage, certFile, keyFile string) (Certificate, error) {
certPEMBlock, err := os.ReadFile(certFile)
if err != nil {
return Certificate{}, err
}
keyPEMBlock, err := os.ReadFile(keyFile)
if err != nil {
return Certificate{}, err
}
return cfg.makeCertificateWithOCSP(ctx, certPEMBlock, keyPEMBlock)
}
// makeCertificateWithOCSP is the same as makeCertificate except that it also
// staples OCSP to the certificate.
func (cfg Config) makeCertificateWithOCSP(ctx context.Context, certPEMBlock, keyPEMBlock []byte) (Certificate, error) {
cert, err := makeCertificate(certPEMBlock, keyPEMBlock)
if err != nil {
return cert, err
}
err = stapleOCSP(ctx, cfg.OCSP, cfg.Storage, &cert, certPEMBlock)
if err != nil && cfg.Logger != nil {
cfg.Logger.Warn("stapling OCSP", zap.Error(err), zap.Strings("identifiers", cert.Names))
}
return cert, nil
}
// makeCertificate turns a certificate PEM bundle and a key PEM block into
// a Certificate with necessary metadata from parsing its bytes filled into
// its struct fields for convenience (except for the OnDemand and Managed
// flags; it is up to the caller to set those properties!). This function
// does NOT staple OCSP.
func makeCertificate(certPEMBlock, keyPEMBlock []byte) (Certificate, error) {
var cert Certificate
// Convert to a tls.Certificate
tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
if err != nil {
return cert, err
}
// Extract necessary metadata
err = fillCertFromLeaf(&cert, tlsCert)
if err != nil {
return cert, err
}
return cert, nil
}
// fillCertFromLeaf populates cert from tlsCert. If it succeeds, it
// guarantees that cert.Leaf is non-nil.
func fillCertFromLeaf(cert *Certificate, tlsCert tls.Certificate) error {
if len(tlsCert.Certificate) == 0 {
return fmt.Errorf("certificate is empty")
}
cert.Certificate = tlsCert
// the leaf cert should be the one for the site; we must set
// the tls.Certificate.Leaf field so that TLS handshakes are
// more efficient
leaf := cert.Certificate.Leaf
if leaf == nil {
var err error
leaf, err = x509.ParseCertificate(tlsCert.Certificate[0])
if err != nil {
return err
}
cert.Certificate.Leaf = leaf
}
// for convenience, we do want to assemble all the
// subjects on the certificate into one list
if leaf.Subject.CommonName != "" { // TODO: CommonName is deprecated
cert.Names = []string{strings.ToLower(leaf.Subject.CommonName)}
}
for _, name := range leaf.DNSNames {
if name != leaf.Subject.CommonName { // TODO: CommonName is deprecated
cert.Names = append(cert.Names, strings.ToLower(name))
}
}
for _, ip := range leaf.IPAddresses {
if ipStr := ip.String(); ipStr != leaf.Subject.CommonName { // TODO: CommonName is deprecated
cert.Names = append(cert.Names, strings.ToLower(ipStr))
}
}
for _, email := range leaf.EmailAddresses {
if email != leaf.Subject.CommonName { // TODO: CommonName is deprecated
cert.Names = append(cert.Names, strings.ToLower(email))
}
}
for _, u := range leaf.URIs {
if u.String() != leaf.Subject.CommonName { // TODO: CommonName is deprecated
cert.Names = append(cert.Names, u.String())
}
}
if len(cert.Names) == 0 {
return fmt.Errorf("certificate has no names")
}
cert.hash = hashCertificateChain(cert.Certificate.Certificate)
return nil
}
// managedCertInStorageExpiresSoon returns true if cert (being a
// managed certificate) is expiring within RenewDurationBefore.
// It returns false if there was an error checking the expiration
// of the certificate as found in storage, or if the certificate
// in storage is NOT expiring soon. A certificate that is expiring
// soon in our cache but is not expiring soon in storage probably
// means that another instance renewed the certificate in the
// meantime, and it would be a good idea to simply load the cert
// into our cache rather than repeating the renewal process again.
func (cfg *Config) managedCertInStorageExpiresSoon(ctx context.Context, cert Certificate) (bool, error) {
certRes, err := cfg.loadCertResourceAnyIssuer(ctx, cert.Names[0])
if err != nil {
return false, err
}
_, needsRenew := cfg.managedCertNeedsRenewal(certRes)
return needsRenew, nil
}
// reloadManagedCertificate reloads the certificate corresponding to the name(s)
// on oldCert into the cache, from storage. This also replaces the old certificate
// with the new one, so that all configurations that used the old cert now point
// to the new cert. It assumes that the new certificate for oldCert.Names[0] is
// already in storage. It returns the newly-loaded certificate if successful.
func (cfg *Config) reloadManagedCertificate(ctx context.Context, oldCert Certificate) (Certificate, error) {
if cfg.Logger != nil {
cfg.Logger.Info("reloading managed certificate", zap.Strings("identifiers", oldCert.Names))
}
newCert, err := cfg.loadManagedCertificate(ctx, oldCert.Names[0])
if err != nil {
return Certificate{}, fmt.Errorf("loading managed certificate for %v from storage: %v", oldCert.Names, err)
}
cfg.certCache.replaceCertificate(oldCert, newCert)
return newCert, nil
}
// SubjectQualifiesForCert returns true if subj is a name which,
// as a quick sanity check, looks like it could be the subject
// of a certificate. Requirements are:
// - must not be empty
// - must not start or end with a dot (RFC 1034; RFC 6066 section 3)
// - must not contain common accidental special characters
func SubjectQualifiesForCert(subj string) bool {
// must not be empty
return strings.TrimSpace(subj) != "" &&
// must not start or end with a dot
!strings.HasPrefix(subj, ".") &&
!strings.HasSuffix(subj, ".") &&
// if it has a wildcard, must be a left-most label (or exactly "*"
// which won't be trusted by browsers but still technically works)
(!strings.Contains(subj, "*") || strings.HasPrefix(subj, "*.") || subj == "*") &&
// must not contain other common special characters
!strings.ContainsAny(subj, "()[]{}<> \t\n\"\\!@#$%^&|;'+=")
}
// SubjectQualifiesForPublicCert returns true if the subject
// name appears eligible for automagic TLS with a public
// CA such as Let's Encrypt. For example: localhost and IP
// addresses are not eligible because we cannot obtain certs
// for those names with a public CA. Wildcard names are
// allowed, as long as they conform to CABF requirements (only
// one wildcard label, and it must be the left-most label).
func SubjectQualifiesForPublicCert(subj string) bool {
// must at least qualify for a certificate
return SubjectQualifiesForCert(subj) &&
// localhost, .localhost TLD, and .local TLD are ineligible
!SubjectIsInternal(subj) &&
// cannot be an IP address (as of yet), see
// https://community.letsencrypt.org/t/certificate-for-static-ip/84/2?u=mholt
!SubjectIsIP(subj) &&
// only one wildcard label allowed, and it must be left-most, with 3+ labels
(!strings.Contains(subj, "*") ||
(strings.Count(subj, "*") == 1 &&
strings.Count(subj, ".") > 1 &&
len(subj) > 2 &&
strings.HasPrefix(subj, "*.")))
}
// SubjectIsIP returns true if subj is an IP address.
func SubjectIsIP(subj string) bool {
return net.ParseIP(subj) != nil
}
// SubjectIsInternal returns true if subj is an internal-facing
// hostname or address.
func SubjectIsInternal(subj string) bool {
return subj == "localhost" ||
strings.HasSuffix(subj, ".localhost") ||
strings.HasSuffix(subj, ".local")
}
// MatchWildcard returns true if subject (a candidate DNS name)
// matches wildcard (a reference DNS name), mostly according to
// RFC 6125-compliant wildcard rules. See also RFC 2818 which
// states that IP addresses must match exactly, but this function
// does not attempt to distinguish IP addresses from internal or
// external DNS names that happen to look like IP addresses.
// It uses DNS wildcard matching logic and is case-insensitive.
// https://tools.ietf.org/html/rfc2818#section-3.1
func MatchWildcard(subject, wildcard string) bool {
subject, wildcard = strings.ToLower(subject), strings.ToLower(wildcard)
if subject == wildcard {
return true
}
if !strings.Contains(wildcard, "*") {
return false
}
labels := strings.Split(subject, ".")
for i := range labels {
if labels[i] == "" {
continue // invalid label
}
labels[i] = "*"
candidate := strings.Join(labels, ".")
if candidate == wildcard {
return true
}
}
return false
}

506
vendor/github.com/caddyserver/certmagic/certmagic.go generated vendored Normal file
View File

@@ -0,0 +1,506 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package certmagic automates the obtaining and renewal of TLS certificates,
// including TLS & HTTPS best practices such as robust OCSP stapling, caching,
// HTTP->HTTPS redirects, and more.
//
// Its high-level API serves your HTTP handlers over HTTPS if you simply give
// the domain name(s) and the http.Handler; CertMagic will create and run
// the HTTPS server for you, fully managing certificates during the lifetime
// of the server. Similarly, it can be used to start TLS listeners or return
// a ready-to-use tls.Config -- whatever layer you need TLS for, CertMagic
// makes it easy. See the HTTPS, Listen, and TLS functions for that.
//
// If you need more control, create a Cache using NewCache() and then make
// a Config using New(). You can then call Manage() on the config. But if
// you use this lower-level API, you'll have to be sure to solve the HTTP
// and TLS-ALPN challenges yourself (unless you disabled them or use the
// DNS challenge) by using the provided Config.GetCertificate function
// in your tls.Config and/or Config.HTTPChallangeHandler in your HTTP
// handler.
//
// See the package's README for more instruction.
package certmagic
import (
"context"
"crypto"
"crypto/tls"
"crypto/x509"
"fmt"
"log"
"net"
"net/http"
"sort"
"strings"
"sync"
"time"
)
// HTTPS serves mux for all domainNames using the HTTP
// and HTTPS ports, redirecting all HTTP requests to HTTPS.
// It uses the Default config and a background context.
//
// This high-level convenience function is opinionated and
// applies sane defaults for production use, including
// timeouts for HTTP requests and responses. To allow very
// long-lived connections, you should make your own
// http.Server values and use this package's Listen(), TLS(),
// or Config.TLSConfig() functions to customize to your needs.
// For example, servers which need to support large uploads or
// downloads with slow clients may need to use longer timeouts,
// thus this function is not suitable.
//
// Calling this function signifies your acceptance to
// the CA's Subscriber Agreement and/or Terms of Service.
func HTTPS(domainNames []string, mux http.Handler) error {
ctx := context.Background()
if mux == nil {
mux = http.DefaultServeMux
}
DefaultACME.Agreed = true
cfg := NewDefault()
err := cfg.ManageSync(ctx, domainNames)
if err != nil {
return err
}
httpWg.Add(1)
defer httpWg.Done()
// if we haven't made listeners yet, do so now,
// and clean them up when all servers are done
lnMu.Lock()
if httpLn == nil && httpsLn == nil {
httpLn, err = net.Listen("tcp", fmt.Sprintf(":%d", HTTPPort))
if err != nil {
lnMu.Unlock()
return err
}
tlsConfig := cfg.TLSConfig()
tlsConfig.NextProtos = append([]string{"h2", "http/1.1"}, tlsConfig.NextProtos...)
httpsLn, err = tls.Listen("tcp", fmt.Sprintf(":%d", HTTPSPort), tlsConfig)
if err != nil {
httpLn.Close()
httpLn = nil
lnMu.Unlock()
return err
}
go func() {
httpWg.Wait()
lnMu.Lock()
httpLn.Close()
httpsLn.Close()
lnMu.Unlock()
}()
}
hln, hsln := httpLn, httpsLn
lnMu.Unlock()
// create HTTP/S servers that are configured
// with sane default timeouts and appropriate
// handlers (the HTTP server solves the HTTP
// challenge and issues redirects to HTTPS,
// while the HTTPS server simply serves the
// user's handler)
httpServer := &http.Server{
ReadHeaderTimeout: 5 * time.Second,
ReadTimeout: 5 * time.Second,
WriteTimeout: 5 * time.Second,
IdleTimeout: 5 * time.Second,
BaseContext: func(listener net.Listener) context.Context { return ctx },
}
if len(cfg.Issuers) > 0 {
if am, ok := cfg.Issuers[0].(*ACMEIssuer); ok {
httpServer.Handler = am.HTTPChallengeHandler(http.HandlerFunc(httpRedirectHandler))
}
}
httpsServer := &http.Server{
ReadHeaderTimeout: 10 * time.Second,
ReadTimeout: 30 * time.Second,
WriteTimeout: 2 * time.Minute,
IdleTimeout: 5 * time.Minute,
Handler: mux,
BaseContext: func(listener net.Listener) context.Context { return ctx },
}
log.Printf("%v Serving HTTP->HTTPS on %s and %s",
domainNames, hln.Addr(), hsln.Addr())
go httpServer.Serve(hln)
return httpsServer.Serve(hsln)
}
func httpRedirectHandler(w http.ResponseWriter, r *http.Request) {
toURL := "https://"
// since we redirect to the standard HTTPS port, we
// do not need to include it in the redirect URL
requestHost := hostOnly(r.Host)
toURL += requestHost
toURL += r.URL.RequestURI()
// get rid of this disgusting unencrypted HTTP connection 🤢
w.Header().Set("Connection", "close")
http.Redirect(w, r, toURL, http.StatusMovedPermanently)
}
// TLS enables management of certificates for domainNames
// and returns a valid tls.Config. It uses the Default
// config.
//
// Because this is a convenience function that returns
// only a tls.Config, it does not assume HTTP is being
// served on the HTTP port, so the HTTP challenge is
// disabled (no HTTPChallengeHandler is necessary). The
// package variable Default is modified so that the
// HTTP challenge is disabled.
//
// Calling this function signifies your acceptance to
// the CA's Subscriber Agreement and/or Terms of Service.
func TLS(domainNames []string) (*tls.Config, error) {
DefaultACME.Agreed = true
DefaultACME.DisableHTTPChallenge = true
cfg := NewDefault()
return cfg.TLSConfig(), cfg.ManageSync(context.Background(), domainNames)
}
// Listen manages certificates for domainName and returns a
// TLS listener. It uses the Default config.
//
// Because this convenience function returns only a TLS-enabled
// listener and does not presume HTTP is also being served,
// the HTTP challenge will be disabled. The package variable
// Default is modified so that the HTTP challenge is disabled.
//
// Calling this function signifies your acceptance to
// the CA's Subscriber Agreement and/or Terms of Service.
func Listen(domainNames []string) (net.Listener, error) {
DefaultACME.Agreed = true
DefaultACME.DisableHTTPChallenge = true
cfg := NewDefault()
err := cfg.ManageSync(context.Background(), domainNames)
if err != nil {
return nil, err
}
return tls.Listen("tcp", fmt.Sprintf(":%d", HTTPSPort), cfg.TLSConfig())
}
// ManageSync obtains certificates for domainNames and keeps them
// renewed using the Default config.
//
// This is a slightly lower-level function; you will need to
// wire up support for the ACME challenges yourself. You can
// obtain a Config to help you do that by calling NewDefault().
//
// You will need to ensure that you use a TLS config that gets
// certificates from this Config and that the HTTP and TLS-ALPN
// challenges can be solved. The easiest way to do this is to
// use NewDefault().TLSConfig() as your TLS config and to wrap
// your HTTP handler with NewDefault().HTTPChallengeHandler().
// If you don't have an HTTP server, you will need to disable
// the HTTP challenge.
//
// If you already have a TLS config you want to use, you can
// simply set its GetCertificate field to
// NewDefault().GetCertificate.
//
// Calling this function signifies your acceptance to
// the CA's Subscriber Agreement and/or Terms of Service.
func ManageSync(ctx context.Context, domainNames []string) error {
DefaultACME.Agreed = true
return NewDefault().ManageSync(ctx, domainNames)
}
// ManageAsync is the same as ManageSync, except that
// certificates are managed asynchronously. This means
// that the function will return before certificates
// are ready, and errors that occur during certificate
// obtain or renew operations are only logged. It is
// vital that you monitor the logs if using this method,
// which is only recommended for automated/non-interactive
// environments.
func ManageAsync(ctx context.Context, domainNames []string) error {
DefaultACME.Agreed = true
return NewDefault().ManageAsync(ctx, domainNames)
}
// OnDemandConfig configures on-demand TLS (certificate
// operations as-needed, like during TLS handshakes,
// rather than immediately).
//
// When this package's high-level convenience functions
// are used (HTTPS, Manage, etc., where the Default
// config is used as a template), this struct regulates
// certificate operations using an implicit whitelist
// containing the names passed into those functions if
// no DecisionFunc is set. This ensures some degree of
// control by default to avoid certificate operations for
// aribtrary domain names. To override this whitelist,
// manually specify a DecisionFunc. To impose rate limits,
// specify your own DecisionFunc.
type OnDemandConfig struct {
// If set, this function will be called to determine
// whether a certificate can be obtained or renewed
// for the given name. If an error is returned, the
// request will be denied.
DecisionFunc func(name string) error
// List of whitelisted hostnames (SNI values) for
// deferred (on-demand) obtaining of certificates.
// Used only by higher-level functions in this
// package to persist the list of hostnames that
// the config is supposed to manage. This is done
// because it seems reasonable that if you say
// "Manage [domain names...]", then only those
// domain names should be able to have certs;
// we don't NEED this feature, but it makes sense
// for higher-level convenience functions to be
// able to retain their convenience (alternative
// is: the user manually creates a DecisionFunc
// that whitelists the same names it already
// passed into Manage) and without letting clients
// have their run of any domain names they want.
// Only enforced if len > 0.
hostWhitelist []string
}
func (o *OnDemandConfig) whitelistContains(name string) bool {
for _, n := range o.hostWhitelist {
if strings.EqualFold(n, name) {
return true
}
}
return false
}
// isLoopback returns true if the hostname of addr looks
// explicitly like a common local hostname. addr must only
// be a host or a host:port combination.
func isLoopback(addr string) bool {
host := hostOnly(addr)
return host == "localhost" ||
strings.Trim(host, "[]") == "::1" ||
strings.HasPrefix(host, "127.")
}
// isInternal returns true if the IP of addr
// belongs to a private network IP range. addr
// must only be an IP or an IP:port combination.
// Loopback addresses are considered false.
func isInternal(addr string) bool {
privateNetworks := []string{
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
"fc00::/7",
}
host := hostOnly(addr)
ip := net.ParseIP(host)
if ip == nil {
return false
}
for _, privateNetwork := range privateNetworks {
_, ipnet, _ := net.ParseCIDR(privateNetwork)
if ipnet.Contains(ip) {
return true
}
}
return false
}
// hostOnly returns only the host portion of hostport.
// If there is no port or if there is an error splitting
// the port off, the whole input string is returned.
func hostOnly(hostport string) string {
host, _, err := net.SplitHostPort(hostport)
if err != nil {
return hostport // OK; probably had no port to begin with
}
return host
}
// PreChecker is an interface that can be optionally implemented by
// Issuers. Pre-checks are performed before each call (or batch of
// identical calls) to Issue(), giving the issuer the option to ensure
// it has all the necessary information/state.
type PreChecker interface {
PreCheck(ctx context.Context, names []string, interactive bool) error
}
// Issuer is a type that can issue certificates.
type Issuer interface {
// Issue obtains a certificate for the given CSR. It
// must honor context cancellation if it is long-running.
// It can also use the context to find out if the current
// call is part of a retry, via AttemptsCtxKey.
Issue(ctx context.Context, request *x509.CertificateRequest) (*IssuedCertificate, error)
// IssuerKey must return a string that uniquely identifies
// this particular configuration of the Issuer such that
// any certificates obtained by this Issuer will be treated
// as identical if they have the same SANs.
//
// Certificates obtained from Issuers with the same IssuerKey
// will overwrite others with the same SANs. For example, an
// Issuer might be able to obtain certificates from different
// CAs, say A and B. It is likely that the CAs have different
// use cases and purposes (e.g. testing and production), so
// their respective certificates should not overwrite eaach
// other.
IssuerKey() string
}
// Revoker can revoke certificates. Reason codes are defined
// by RFC 5280 §5.3.1: https://tools.ietf.org/html/rfc5280#section-5.3.1
// and are available as constants in our ACME library.
type Revoker interface {
Revoke(ctx context.Context, cert CertificateResource, reason int) error
}
// Manager is a type that manages certificates (keeps them renewed) such
// that we can get certificates during TLS handshakes to immediately serve
// to clients.
//
// TODO: This is an EXPERIMENTAL API. It is subject to change/removal.
type Manager interface {
// GetCertificate returns the certificate to use to complete the handshake.
// Since this is called during every TLS handshake, it must be very fast and not block.
// Returning (nil, nil) is valid and is simply treated as a no-op.
GetCertificate(context.Context, *tls.ClientHelloInfo) (*tls.Certificate, error)
}
// KeyGenerator can generate a private key.
type KeyGenerator interface {
// GenerateKey generates a private key. The returned
// PrivateKey must be able to expose its associated
// public key.
GenerateKey() (crypto.PrivateKey, error)
}
// IssuedCertificate represents a certificate that was just issued.
type IssuedCertificate struct {
// The PEM-encoding of DER-encoded ASN.1 data.
Certificate []byte
// Any extra information to serialize alongside the
// certificate in storage.
Metadata interface{}
}
// CertificateResource associates a certificate with its private
// key and other useful information, for use in maintaining the
// certificate.
type CertificateResource struct {
// The list of names on the certificate;
// for convenience only.
SANs []string `json:"sans,omitempty"`
// The PEM-encoding of DER-encoded ASN.1 data
// for the cert or chain.
CertificatePEM []byte `json:"-"`
// The PEM-encoding of the certificate's private key.
PrivateKeyPEM []byte `json:"-"`
// Any extra information associated with the certificate,
// usually provided by the issuer implementation.
IssuerData interface{} `json:"issuer_data,omitempty"`
// The unique string identifying the issuer of the
// certificate; internally useful for storage access.
issuerKey string `json:"-"`
}
// NamesKey returns the list of SANs as a single string,
// truncated to some ridiculously long size limit. It
// can act as a key for the set of names on the resource.
func (cr *CertificateResource) NamesKey() string {
sort.Strings(cr.SANs)
result := strings.Join(cr.SANs, ",")
if len(result) > 1024 {
const trunc = "_trunc"
result = result[:1024-len(trunc)] + trunc
}
return result
}
// Default contains the package defaults for the
// various Config fields. This is used as a template
// when creating your own Configs with New() or
// NewDefault(), and it is also used as the Config
// by all the high-level functions in this package
// that abstract away most configuration (HTTPS(),
// TLS(), Listen(), etc).
//
// The fields of this value will be used for Config
// fields which are unset. Feel free to modify these
// defaults, but do not use this Config by itself: it
// is only a template. Valid configurations can be
// obtained by calling New() (if you have your own
// certificate cache) or NewDefault() (if you only
// need a single config and want to use the default
// cache).
//
// Even if the Issuers or Storage fields are not set,
// defaults will be applied in the call to New().
var Default = Config{
RenewalWindowRatio: DefaultRenewalWindowRatio,
Storage: defaultFileStorage,
KeySource: DefaultKeyGenerator,
}
const (
// HTTPChallengePort is the officially-designated port for
// the HTTP challenge according to the ACME spec.
HTTPChallengePort = 80
// TLSALPNChallengePort is the officially-designated port for
// the TLS-ALPN challenge according to the ACME spec.
TLSALPNChallengePort = 443
)
// Port variables must remain their defaults unless you
// forward packets from the defaults to whatever these
// are set to; otherwise ACME challenges will fail.
var (
// HTTPPort is the port on which to serve HTTP
// and, as such, the HTTP challenge (unless
// Default.AltHTTPPort is set).
HTTPPort = 80
// HTTPSPort is the port on which to serve HTTPS
// and, as such, the TLS-ALPN challenge
// (unless Default.AltTLSALPNPort is set).
HTTPSPort = 443
)
// Variables for conveniently serving HTTPS.
var (
httpLn, httpsLn net.Listener
lnMu sync.Mutex
httpWg sync.WaitGroup
)
// Maximum size for the stack trace when recovering from panics.
const stackTraceBufferSize = 1024 * 128

1136
vendor/github.com/caddyserver/certmagic/config.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

368
vendor/github.com/caddyserver/certmagic/crypto.go generated vendored Normal file
View File

@@ -0,0 +1,368 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"hash/fnv"
"io/fs"
"sort"
"strings"
"github.com/klauspost/cpuid/v2"
"go.uber.org/zap"
"golang.org/x/net/idna"
)
// PEMEncodePrivateKey marshals a private key into a PEM-encoded block.
// The private key must be one of *ecdsa.PrivateKey, *rsa.PrivateKey, or
// *ed25519.PrivateKey.
func PEMEncodePrivateKey(key crypto.PrivateKey) ([]byte, error) {
var pemType string
var keyBytes []byte
switch key := key.(type) {
case *ecdsa.PrivateKey:
var err error
pemType = "EC"
keyBytes, err = x509.MarshalECPrivateKey(key)
if err != nil {
return nil, err
}
case *rsa.PrivateKey:
pemType = "RSA"
keyBytes = x509.MarshalPKCS1PrivateKey(key)
case ed25519.PrivateKey:
var err error
pemType = "ED25519"
keyBytes, err = x509.MarshalPKCS8PrivateKey(key)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unsupported key type: %T", key)
}
pemKey := pem.Block{Type: pemType + " PRIVATE KEY", Bytes: keyBytes}
return pem.EncodeToMemory(&pemKey), nil
}
// PEMDecodePrivateKey loads a PEM-encoded ECC/RSA private key from an array of bytes.
// Borrowed from Go standard library, to handle various private key and PEM block types.
func PEMDecodePrivateKey(keyPEMBytes []byte) (crypto.Signer, error) {
// Modified from original:
// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L291-L308
// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L238
keyBlockDER, _ := pem.Decode(keyPEMBytes)
if keyBlockDER == nil {
return nil, fmt.Errorf("failed to decode PEM block containing private key")
}
if keyBlockDER.Type != "PRIVATE KEY" && !strings.HasSuffix(keyBlockDER.Type, " PRIVATE KEY") {
return nil, fmt.Errorf("unknown PEM header %q", keyBlockDER.Type)
}
if key, err := x509.ParsePKCS1PrivateKey(keyBlockDER.Bytes); err == nil {
return key, nil
}
if key, err := x509.ParsePKCS8PrivateKey(keyBlockDER.Bytes); err == nil {
switch key := key.(type) {
case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey:
return key.(crypto.Signer), nil
default:
return nil, fmt.Errorf("found unknown private key type in PKCS#8 wrapping: %T", key)
}
}
if key, err := x509.ParseECPrivateKey(keyBlockDER.Bytes); err == nil {
return key, nil
}
return nil, fmt.Errorf("unknown private key type")
}
// parseCertsFromPEMBundle parses a certificate bundle from top to bottom and returns
// a slice of x509 certificates. This function will error if no certificates are found.
func parseCertsFromPEMBundle(bundle []byte) ([]*x509.Certificate, error) {
var certificates []*x509.Certificate
var certDERBlock *pem.Block
for {
certDERBlock, bundle = pem.Decode(bundle)
if certDERBlock == nil {
break
}
if certDERBlock.Type == "CERTIFICATE" {
cert, err := x509.ParseCertificate(certDERBlock.Bytes)
if err != nil {
return nil, err
}
certificates = append(certificates, cert)
}
}
if len(certificates) == 0 {
return nil, fmt.Errorf("no certificates found in bundle")
}
return certificates, nil
}
// fastHash hashes input using a hashing algorithm that
// is fast, and returns the hash as a hex-encoded string.
// Do not use this for cryptographic purposes.
func fastHash(input []byte) string {
h := fnv.New32a()
h.Write(input)
return fmt.Sprintf("%x", h.Sum32())
}
// saveCertResource saves the certificate resource to disk. This
// includes the certificate file itself, the private key, and the
// metadata file.
func (cfg *Config) saveCertResource(ctx context.Context, issuer Issuer, cert CertificateResource) error {
metaBytes, err := json.MarshalIndent(cert, "", "\t")
if err != nil {
return fmt.Errorf("encoding certificate metadata: %v", err)
}
issuerKey := issuer.IssuerKey()
certKey := cert.NamesKey()
all := []keyValue{
{
key: StorageKeys.SitePrivateKey(issuerKey, certKey),
value: cert.PrivateKeyPEM,
},
{
key: StorageKeys.SiteCert(issuerKey, certKey),
value: cert.CertificatePEM,
},
{
key: StorageKeys.SiteMeta(issuerKey, certKey),
value: metaBytes,
},
}
return storeTx(ctx, cfg.Storage, all)
}
// loadCertResourceAnyIssuer loads and returns the certificate resource from any
// of the configured issuers. If multiple are found (e.g. if there are 3 issuers
// configured, and all 3 have a resource matching certNamesKey), then the newest
// (latest NotBefore date) resource will be chosen.
func (cfg *Config) loadCertResourceAnyIssuer(ctx context.Context, certNamesKey string) (CertificateResource, error) {
// we can save some extra decoding steps if there's only one issuer, since
// we don't need to compare potentially multiple available resources to
// select the best one, when there's only one choice anyway
if len(cfg.Issuers) == 1 {
return cfg.loadCertResource(ctx, cfg.Issuers[0], certNamesKey)
}
type decodedCertResource struct {
CertificateResource
issuer Issuer
decoded *x509.Certificate
}
var certResources []decodedCertResource
var lastErr error
// load and decode all certificate resources found with the
// configured issuers so we can sort by newest
for _, issuer := range cfg.Issuers {
certRes, err := cfg.loadCertResource(ctx, issuer, certNamesKey)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
// not a problem, but we need to remember the error
// in case we end up not finding any cert resources
// since we'll need an error to return in that case
lastErr = err
continue
}
return CertificateResource{}, err
}
certs, err := parseCertsFromPEMBundle(certRes.CertificatePEM)
if err != nil {
return CertificateResource{}, err
}
certResources = append(certResources, decodedCertResource{
CertificateResource: certRes,
issuer: issuer,
decoded: certs[0],
})
}
if len(certResources) == 0 {
if lastErr == nil {
lastErr = fmt.Errorf("no certificate resources found") // just in case; e.g. no Issuers configured
}
return CertificateResource{}, lastErr
}
// sort by date so the most recently issued comes first
sort.Slice(certResources, func(i, j int) bool {
return certResources[j].decoded.NotBefore.Before(certResources[i].decoded.NotBefore)
})
if cfg.Logger != nil {
cfg.Logger.Debug("loading managed certificate",
zap.String("domain", certNamesKey),
zap.Time("expiration", certResources[0].decoded.NotAfter),
zap.String("issuer_key", certResources[0].issuer.IssuerKey()),
zap.Any("storage", cfg.Storage),
)
}
return certResources[0].CertificateResource, nil
}
// loadCertResource loads a certificate resource from the given issuer's storage location.
func (cfg *Config) loadCertResource(ctx context.Context, issuer Issuer, certNamesKey string) (CertificateResource, error) {
certRes := CertificateResource{issuerKey: issuer.IssuerKey()}
normalizedName, err := idna.ToASCII(certNamesKey)
if err != nil {
return CertificateResource{}, fmt.Errorf("converting '%s' to ASCII: %v", certNamesKey, err)
}
keyBytes, err := cfg.Storage.Load(ctx, StorageKeys.SitePrivateKey(certRes.issuerKey, normalizedName))
if err != nil {
return CertificateResource{}, err
}
certRes.PrivateKeyPEM = keyBytes
certBytes, err := cfg.Storage.Load(ctx, StorageKeys.SiteCert(certRes.issuerKey, normalizedName))
if err != nil {
return CertificateResource{}, err
}
certRes.CertificatePEM = certBytes
metaBytes, err := cfg.Storage.Load(ctx, StorageKeys.SiteMeta(certRes.issuerKey, normalizedName))
if err != nil {
return CertificateResource{}, err
}
err = json.Unmarshal(metaBytes, &certRes)
if err != nil {
return CertificateResource{}, fmt.Errorf("decoding certificate metadata: %v", err)
}
return certRes, nil
}
// hashCertificateChain computes the unique hash of certChain,
// which is the chain of DER-encoded bytes. It returns the
// hex encoding of the hash.
func hashCertificateChain(certChain [][]byte) string {
h := sha256.New()
for _, certInChain := range certChain {
h.Write(certInChain)
}
return fmt.Sprintf("%x", h.Sum(nil))
}
func namesFromCSR(csr *x509.CertificateRequest) []string {
var nameSet []string
nameSet = append(nameSet, csr.DNSNames...)
nameSet = append(nameSet, csr.EmailAddresses...)
for _, v := range csr.IPAddresses {
nameSet = append(nameSet, v.String())
}
for _, v := range csr.URIs {
nameSet = append(nameSet, v.String())
}
return nameSet
}
// preferredDefaultCipherSuites returns an appropriate
// cipher suite to use depending on hardware support
// for AES-NI.
//
// See https://github.com/mholt/caddy/issues/1674
func preferredDefaultCipherSuites() []uint16 {
if cpuid.CPU.Supports(cpuid.AESNI) {
return defaultCiphersPreferAES
}
return defaultCiphersPreferChaCha
}
var (
defaultCiphersPreferAES = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
}
defaultCiphersPreferChaCha = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}
)
// StandardKeyGenerator is the standard, in-memory key source
// that uses crypto/rand.
type StandardKeyGenerator struct {
// The type of keys to generate.
KeyType KeyType
}
// GenerateKey generates a new private key according to kg.KeyType.
func (kg StandardKeyGenerator) GenerateKey() (crypto.PrivateKey, error) {
switch kg.KeyType {
case ED25519:
_, priv, err := ed25519.GenerateKey(rand.Reader)
return priv, err
case "", P256:
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
case P384:
return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
case RSA2048:
return rsa.GenerateKey(rand.Reader, 2048)
case RSA4096:
return rsa.GenerateKey(rand.Reader, 4096)
case RSA8192:
return rsa.GenerateKey(rand.Reader, 8192)
}
return nil, fmt.Errorf("unrecognized or unsupported key type: %s", kg.KeyType)
}
// DefaultKeyGenerator is the default key source.
var DefaultKeyGenerator = StandardKeyGenerator{KeyType: P256}
// KeyType enumerates the known/supported key types.
type KeyType string
// Constants for all key types we support.
const (
ED25519 = KeyType("ed25519")
P256 = KeyType("p256")
P384 = KeyType("p384")
RSA2048 = KeyType("rsa2048")
RSA4096 = KeyType("rsa4096")
RSA8192 = KeyType("rsa8192")
)

342
vendor/github.com/caddyserver/certmagic/dnsutil.go generated vendored Normal file
View File

@@ -0,0 +1,342 @@
package certmagic
import (
"errors"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/miekg/dns"
)
// Code in this file adapted from go-acme/lego, July 2020:
// https://github.com/go-acme/lego
// by Ludovic Fernandez and Dominik Menke
//
// It has been modified.
// findZoneByFQDN determines the zone apex for the given fqdn by recursing
// up the domain labels until the nameserver returns a SOA record in the
// answer section.
func findZoneByFQDN(fqdn string, nameservers []string) (string, error) {
if !strings.HasSuffix(fqdn, ".") {
fqdn += "."
}
soa, err := lookupSoaByFqdn(fqdn, nameservers)
if err != nil {
return "", err
}
return soa.zone, nil
}
func lookupSoaByFqdn(fqdn string, nameservers []string) (*soaCacheEntry, error) {
if !strings.HasSuffix(fqdn, ".") {
fqdn += "."
}
fqdnSOACacheMu.Lock()
defer fqdnSOACacheMu.Unlock()
// prefer cached version if fresh
if ent := fqdnSOACache[fqdn]; ent != nil && !ent.isExpired() {
return ent, nil
}
ent, err := fetchSoaByFqdn(fqdn, nameservers)
if err != nil {
return nil, err
}
// save result to cache, but don't allow
// the cache to grow out of control
if len(fqdnSOACache) >= 1000 {
for key := range fqdnSOACache {
delete(fqdnSOACache, key)
break
}
}
fqdnSOACache[fqdn] = ent
return ent, nil
}
func fetchSoaByFqdn(fqdn string, nameservers []string) (*soaCacheEntry, error) {
var err error
var in *dns.Msg
labelIndexes := dns.Split(fqdn)
for _, index := range labelIndexes {
domain := fqdn[index:]
in, err = dnsQuery(domain, dns.TypeSOA, nameservers, true)
if err != nil {
continue
}
if in == nil {
continue
}
switch in.Rcode {
case dns.RcodeSuccess:
// Check if we got a SOA RR in the answer section
if len(in.Answer) == 0 {
continue
}
// CNAME records cannot/should not exist at the root of a zone.
// So we skip a domain when a CNAME is found.
if dnsMsgContainsCNAME(in) {
continue
}
for _, ans := range in.Answer {
if soa, ok := ans.(*dns.SOA); ok {
return newSoaCacheEntry(soa), nil
}
}
case dns.RcodeNameError:
// NXDOMAIN
default:
// Any response code other than NOERROR and NXDOMAIN is treated as error
return nil, fmt.Errorf("unexpected response code '%s' for %s", dns.RcodeToString[in.Rcode], domain)
}
}
return nil, fmt.Errorf("could not find the start of authority for %s%s", fqdn, formatDNSError(in, err))
}
// dnsMsgContainsCNAME checks for a CNAME answer in msg
func dnsMsgContainsCNAME(msg *dns.Msg) bool {
for _, ans := range msg.Answer {
if _, ok := ans.(*dns.CNAME); ok {
return true
}
}
return false
}
func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) (*dns.Msg, error) {
m := createDNSMsg(fqdn, rtype, recursive)
var in *dns.Msg
var err error
for _, ns := range nameservers {
in, err = sendDNSQuery(m, ns)
if err == nil && len(in.Answer) > 0 {
break
}
}
return in, err
}
func createDNSMsg(fqdn string, rtype uint16, recursive bool) *dns.Msg {
m := new(dns.Msg)
m.SetQuestion(fqdn, rtype)
// See: https://caddy.community/t/hard-time-getting-a-response-on-a-dns-01-challenge/15721/16
m.SetEdns0(1232, false)
if !recursive {
m.RecursionDesired = false
}
return m
}
func sendDNSQuery(m *dns.Msg, ns string) (*dns.Msg, error) {
udp := &dns.Client{Net: "udp", Timeout: dnsTimeout}
in, _, err := udp.Exchange(m, ns)
// two kinds of errors we can handle by retrying with TCP:
// truncation and timeout; see https://github.com/caddyserver/caddy/issues/3639
truncated := in != nil && in.Truncated
timeoutErr := err != nil && strings.Contains(err.Error(), "timeout")
if truncated || timeoutErr {
tcp := &dns.Client{Net: "tcp", Timeout: dnsTimeout}
in, _, err = tcp.Exchange(m, ns)
}
return in, err
}
func formatDNSError(msg *dns.Msg, err error) string {
var parts []string
if msg != nil {
parts = append(parts, dns.RcodeToString[msg.Rcode])
}
if err != nil {
parts = append(parts, err.Error())
}
if len(parts) > 0 {
return ": " + strings.Join(parts, " ")
}
return ""
}
// soaCacheEntry holds a cached SOA record (only selected fields)
type soaCacheEntry struct {
zone string // zone apex (a domain name)
primaryNs string // primary nameserver for the zone apex
expires time.Time // time when this cache entry should be evicted
}
func newSoaCacheEntry(soa *dns.SOA) *soaCacheEntry {
return &soaCacheEntry{
zone: soa.Hdr.Name,
primaryNs: soa.Ns,
expires: time.Now().Add(time.Duration(soa.Refresh) * time.Second),
}
}
// isExpired checks whether a cache entry should be considered expired.
func (cache *soaCacheEntry) isExpired() bool {
return time.Now().After(cache.expires)
}
// systemOrDefaultNameservers attempts to get system nameservers from the
// resolv.conf file given by path before falling back to hard-coded defaults.
func systemOrDefaultNameservers(path string, defaults []string) []string {
config, err := dns.ClientConfigFromFile(path)
if err != nil || len(config.Servers) == 0 {
return defaults
}
return config.Servers
}
// populateNameserverPorts ensures that all nameservers have a port number.
func populateNameserverPorts(servers []string) {
for i := range servers {
_, port, _ := net.SplitHostPort(servers[i])
if port == "" {
servers[i] = net.JoinHostPort(servers[i], "53")
}
}
}
// checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers.
func checkDNSPropagation(fqdn, value string, resolvers []string) (bool, error) {
if !strings.HasSuffix(fqdn, ".") {
fqdn += "."
}
// Initial attempt to resolve at the recursive NS
r, err := dnsQuery(fqdn, dns.TypeTXT, resolvers, true)
if err != nil {
return false, err
}
if r.Rcode == dns.RcodeSuccess {
fqdn = updateDomainWithCName(r, fqdn)
}
authoritativeNss, err := lookupNameservers(fqdn, resolvers)
if err != nil {
return false, err
}
return checkAuthoritativeNss(fqdn, value, authoritativeNss)
}
// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record.
func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) {
for _, ns := range nameservers {
r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false)
if err != nil {
return false, err
}
if r.Rcode != dns.RcodeSuccess {
if r.Rcode == dns.RcodeNameError {
// if Present() succeeded, then it must show up eventually, or else
// something is really broken in the DNS provider or their API;
// no need for error here, simply have the caller try again
return false, nil
}
return false, fmt.Errorf("NS %s returned %s for %s", ns, dns.RcodeToString[r.Rcode], fqdn)
}
var found bool
for _, rr := range r.Answer {
if txt, ok := rr.(*dns.TXT); ok {
record := strings.Join(txt.Txt, "")
if record == value {
found = true
break
}
}
}
if !found {
return false, nil
}
}
return true, nil
}
// lookupNameservers returns the authoritative nameservers for the given fqdn.
func lookupNameservers(fqdn string, resolvers []string) ([]string, error) {
var authoritativeNss []string
zone, err := findZoneByFQDN(fqdn, resolvers)
if err != nil {
return nil, fmt.Errorf("could not determine the zone: %w", err)
}
r, err := dnsQuery(zone, dns.TypeNS, resolvers, true)
if err != nil {
return nil, err
}
for _, rr := range r.Answer {
if ns, ok := rr.(*dns.NS); ok {
authoritativeNss = append(authoritativeNss, strings.ToLower(ns.Ns))
}
}
if len(authoritativeNss) > 0 {
return authoritativeNss, nil
}
return nil, errors.New("could not determine authoritative nameservers")
}
// Update FQDN with CNAME if any
func updateDomainWithCName(r *dns.Msg, fqdn string) string {
for _, rr := range r.Answer {
if cn, ok := rr.(*dns.CNAME); ok {
if cn.Hdr.Name == fqdn {
return cn.Target
}
}
}
return fqdn
}
// recursiveNameservers are used to pre-check DNS propagation. It
// picks user-configured nameservers (custom) OR the defaults
// obtained from resolv.conf and defaultNameservers if none is
// configured and ensures that all server addresses have a port value.
func recursiveNameservers(custom []string) []string {
var servers []string
if len(custom) == 0 {
servers = systemOrDefaultNameservers(defaultResolvConf, defaultNameservers)
} else {
servers = make([]string, len(custom))
copy(servers, custom)
}
populateNameserverPorts(servers)
return servers
}
var defaultNameservers = []string{
"8.8.8.8:53",
"8.8.4.4:53",
"1.1.1.1:53",
"1.0.0.1:53",
}
var dnsTimeout = 10 * time.Second
var (
fqdnSOACache = map[string]*soaCacheEntry{}
fqdnSOACacheMu sync.Mutex
)
const defaultResolvConf = "/etc/resolv.conf"

404
vendor/github.com/caddyserver/certmagic/filestorage.go generated vendored Normal file
View File

@@ -0,0 +1,404 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"log"
"os"
"path"
"path/filepath"
"runtime"
"time"
)
// FileStorage facilitates forming file paths derived from a root
// directory. It is used to get file paths in a consistent,
// cross-platform way or persisting ACME assets on the file system.
// The presence of a lock file for a given key indicates a lock
// is held and is thus unavailable.
//
// Locks are created atomically by relying on the file system to
// enforce the O_EXCL flag. Acquirers that are forcefully terminated
// will not have a chance to clean up their locks before they exit,
// so locks may become stale. That is why, while a lock is actively
// held, the contents of the lockfile are updated with the current
// timestamp periodically. If another instance tries to acquire the
// lock but fails, it can see if the timestamp within is still fresh.
// If so, it patiently waits by polling occasionally. Otherwise,
// the stale lockfile is deleted, essentially forcing an unlock.
//
// While locking is atomic, unlocking is not perfectly atomic. File
// systems offer native atomic operations when creating files, but
// not necessarily when deleting them. It is theoretically possible
// for two instances to discover the same stale lock and both proceed
// to delete it, but if one instance is able to delete the lockfile
// and create a new one before the other one calls delete, then the
// new lock file created by the first instance will get deleted by
// mistake. This does mean that mutual exclusion is not guaranteed
// to be perfectly enforced in the presence of stale locks. One
// alternative is to lock the unlock operation by using ".unlock"
// files; and we did this for some time, but those files themselves
// may become stale, leading applications into infinite loops if
// they always expect the unlock file to be deleted by the instance
// that created it. We instead prefer the simpler solution that
// implies imperfect mutual exclusion if locks become stale, but
// that is probably less severe a consequence than infinite loops.
//
// See https://github.com/caddyserver/caddy/issues/4448 for discussion.
// See commit 468bfd25e452196b140148928cdd1f1a2285ae4b for where we
// switched away from using .unlock files.
type FileStorage struct {
Path string
}
// Exists returns true if key exists in s.
func (s *FileStorage) Exists(_ context.Context, key string) bool {
_, err := os.Stat(s.Filename(key))
return !errors.Is(err, fs.ErrNotExist)
}
// Store saves value at key.
func (s *FileStorage) Store(_ context.Context, key string, value []byte) error {
filename := s.Filename(key)
err := os.MkdirAll(filepath.Dir(filename), 0700)
if err != nil {
return err
}
return os.WriteFile(filename, value, 0600)
}
// Load retrieves the value at key.
func (s *FileStorage) Load(_ context.Context, key string) ([]byte, error) {
return os.ReadFile(s.Filename(key))
}
// Delete deletes the value at key.
func (s *FileStorage) Delete(_ context.Context, key string) error {
return os.Remove(s.Filename(key))
}
// List returns all keys that match prefix.
func (s *FileStorage) List(ctx context.Context, prefix string, recursive bool) ([]string, error) {
var keys []string
walkPrefix := s.Filename(prefix)
err := filepath.Walk(walkPrefix, func(fpath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info == nil {
return fmt.Errorf("%s: file info is nil", fpath)
}
if fpath == walkPrefix {
return nil
}
if ctxErr := ctx.Err(); ctxErr != nil {
return ctxErr
}
suffix, err := filepath.Rel(walkPrefix, fpath)
if err != nil {
return fmt.Errorf("%s: could not make path relative: %v", fpath, err)
}
keys = append(keys, path.Join(prefix, suffix))
if !recursive && info.IsDir() {
return filepath.SkipDir
}
return nil
})
return keys, err
}
// Stat returns information about key.
func (s *FileStorage) Stat(_ context.Context, key string) (KeyInfo, error) {
fi, err := os.Stat(s.Filename(key))
if err != nil {
return KeyInfo{}, err
}
return KeyInfo{
Key: key,
Modified: fi.ModTime(),
Size: fi.Size(),
IsTerminal: !fi.IsDir(),
}, nil
}
// Filename returns the key as a path on the file
// system prefixed by s.Path.
func (s *FileStorage) Filename(key string) string {
return filepath.Join(s.Path, filepath.FromSlash(key))
}
// Lock obtains a lock named by the given key. It blocks
// until the lock can be obtained or an error is returned.
func (s *FileStorage) Lock(ctx context.Context, key string) error {
filename := s.lockFilename(key)
for {
err := createLockfile(filename)
if err == nil {
// got the lock, yay
return nil
}
if !os.IsExist(err) {
// unexpected error
return fmt.Errorf("creating lock file: %v", err)
}
// lock file already exists
var meta lockMeta
f, err := os.Open(filename)
if err == nil {
err2 := json.NewDecoder(f).Decode(&meta)
f.Close()
if err2 != nil {
return fmt.Errorf("decoding lockfile contents: %w", err2)
}
}
switch {
case os.IsNotExist(err):
// must have just been removed; try again to create it
continue
case err != nil:
// unexpected error
return fmt.Errorf("accessing lock file: %v", err)
case fileLockIsStale(meta):
// lock file is stale - delete it and try again to obtain lock
// (NOTE: locking becomes imperfect if lock files are stale; known solutions
// either have potential to cause infinite loops, as in caddyserver/caddy#4448,
// or must give up on perfect mutual exclusivity; however, these cases are rare,
// so we prefer the simpler solution that avoids infinite loops)
log.Printf("[INFO][%s] Lock for '%s' is stale (created: %s, last update: %s); removing then retrying: %s",
s, key, meta.Created, meta.Updated, filename)
if err = os.Remove(filename); err != nil { // hopefully we can replace the lock file quickly!
if !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("unable to delete stale lock; deadlocked: %w", err)
}
}
continue
default:
// lockfile exists and is not stale;
// just wait a moment and try again,
// or return if context cancelled
select {
case <-time.After(fileLockPollInterval):
case <-ctx.Done():
return ctx.Err()
}
}
}
}
// Unlock releases the lock for name.
func (s *FileStorage) Unlock(_ context.Context, key string) error {
return os.Remove(s.lockFilename(key))
}
func (s *FileStorage) String() string {
return "FileStorage:" + s.Path
}
func (s *FileStorage) lockFilename(key string) string {
return filepath.Join(s.lockDir(), StorageKeys.Safe(key)+".lock")
}
func (s *FileStorage) lockDir() string {
return filepath.Join(s.Path, "locks")
}
func fileLockIsStale(meta lockMeta) bool {
ref := meta.Updated
if ref.IsZero() {
ref = meta.Created
}
// since updates are exactly every lockFreshnessInterval,
// add a grace period for the actual file read+write to
// take place
return time.Since(ref) > lockFreshnessInterval*2
}
// createLockfile atomically creates the lockfile
// identified by filename. A successfully created
// lockfile should be removed with removeLockfile.
func createLockfile(filename string) error {
err := atomicallyCreateFile(filename, true)
if err != nil {
return err
}
go keepLockfileFresh(filename)
return nil
}
// keepLockfileFresh continuously updates the lock file
// at filename with the current timestamp. It stops
// when the file disappears (happy path = lock released),
// or when there is an error at any point. Since it polls
// every lockFreshnessInterval, this function might
// not terminate until up to lockFreshnessInterval after
// the lock is released.
func keepLockfileFresh(filename string) {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic: active locking: %v\n%s", err, buf)
}
}()
for {
time.Sleep(lockFreshnessInterval)
done, err := updateLockfileFreshness(filename)
if err != nil {
log.Printf("[ERROR] Keeping lock file fresh: %v - terminating lock maintenance (lockfile: %s)", err, filename)
return
}
if done {
return
}
}
}
// updateLockfileFreshness updates the lock file at filename
// with the current timestamp. It returns true if the parent
// loop can terminate (i.e. no more need to update the lock).
func updateLockfileFreshness(filename string) (bool, error) {
f, err := os.OpenFile(filename, os.O_RDWR, 0644)
if os.IsNotExist(err) {
return true, nil // lock released
}
if err != nil {
return true, err
}
defer f.Close()
// read contents
metaBytes, err := io.ReadAll(io.LimitReader(f, 2048))
if err != nil {
return true, err
}
var meta lockMeta
if err := json.Unmarshal(metaBytes, &meta); err != nil {
return true, err
}
// truncate file and reset I/O offset to beginning
if err := f.Truncate(0); err != nil {
return true, err
}
if _, err := f.Seek(0, io.SeekStart); err != nil {
return true, err
}
// write updated timestamp
meta.Updated = time.Now()
if err = json.NewEncoder(f).Encode(meta); err != nil {
return false, err
}
// sync to device; we suspect that sometimes file systems
// (particularly AWS EFS) don't do this on their own,
// leaving the file empty when we close it; see
// https://github.com/caddyserver/caddy/issues/3954
return false, f.Sync()
}
// atomicallyCreateFile atomically creates the file
// identified by filename if it doesn't already exist.
func atomicallyCreateFile(filename string, writeLockInfo bool) error {
// no need to check this error, we only really care about the file creation error
_ = os.MkdirAll(filepath.Dir(filename), 0700)
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0644)
if err != nil {
return err
}
defer f.Close()
if writeLockInfo {
now := time.Now()
meta := lockMeta{
Created: now,
Updated: now,
}
if err := json.NewEncoder(f).Encode(meta); err != nil {
return err
}
// see https://github.com/caddyserver/caddy/issues/3954
if err := f.Sync(); err != nil {
return err
}
}
return nil
}
// homeDir returns the best guess of the current user's home
// directory from environment variables. If unknown, "." (the
// current directory) is returned instead.
func homeDir() string {
home := os.Getenv("HOME")
if home == "" && runtime.GOOS == "windows" {
drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH")
home = drive + path
if drive == "" || path == "" {
home = os.Getenv("USERPROFILE")
}
}
if home == "" {
home = "."
}
return home
}
func dataDir() string {
baseDir := filepath.Join(homeDir(), ".local", "share")
if xdgData := os.Getenv("XDG_DATA_HOME"); xdgData != "" {
baseDir = xdgData
}
return filepath.Join(baseDir, "certmagic")
}
// lockMeta is written into a lock file.
type lockMeta struct {
Created time.Time `json:"created,omitempty"`
Updated time.Time `json:"updated,omitempty"`
}
// lockFreshnessInterval is how often to update
// a lock's timestamp. Locks with a timestamp
// more than this duration in the past (plus a
// grace period for latency) can be considered
// stale.
const lockFreshnessInterval = 5 * time.Second
// fileLockPollInterval is how frequently
// to check the existence of a lock file
const fileLockPollInterval = 1 * time.Second
// Interface guard
var _ Storage = (*FileStorage)(nil)

817
vendor/github.com/caddyserver/certmagic/handshake.go generated vendored Normal file
View File

@@ -0,0 +1,817 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io/fs"
"net"
"strings"
"sync"
"time"
"github.com/mholt/acmez"
"go.uber.org/zap"
"golang.org/x/crypto/ocsp"
)
// GetCertificate gets a certificate to satisfy clientHello. In getting
// the certificate, it abides the rules and settings defined in the Config
// that matches clientHello.ServerName. It tries to get certificates in
// this order:
//
// 1. Exact match in the in-memory cache
// 2. Wildcard match in the in-memory cache
// 3. Managers (if any)
// 4. Storage (if on-demand is enabled)
// 5. Issuers (if on-demand is enabled)
//
// This method is safe for use as a tls.Config.GetCertificate callback.
func (cfg *Config) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
cfg.emit("tls_handshake_started", clientHello)
// special case: serve up the certificate for a TLS-ALPN ACME challenge
// (https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05)
for _, proto := range clientHello.SupportedProtos {
if proto == acmez.ACMETLS1Protocol {
challengeCert, distributed, err := cfg.getTLSALPNChallengeCert(clientHello)
if err != nil {
if cfg.Logger != nil {
cfg.Logger.Error("tls-alpn challenge",
zap.String("server_name", clientHello.ServerName),
zap.Error(err))
}
return nil, err
}
if cfg.Logger != nil {
cfg.Logger.Info("served key authentication certificate",
zap.String("server_name", clientHello.ServerName),
zap.String("challenge", "tls-alpn-01"),
zap.String("remote", clientHello.Conn.RemoteAddr().String()),
zap.Bool("distributed", distributed))
}
return challengeCert, nil
}
}
// get the certificate and serve it up
cert, err := cfg.getCertDuringHandshake(clientHello, true, true)
if err == nil {
cfg.emit("tls_handshake_completed", clientHello)
}
return &cert.Certificate, err
}
// getCertificateFromCache gets a certificate that matches name from the in-memory
// cache, according to the lookup table associated with cfg. The lookup then
// points to a certificate in the Instance certificate cache.
//
// The name is expected to already be normalized (e.g. lowercased).
//
// If there is no exact match for name, it will be checked against names of
// the form '*.example.com' (wildcard certificates) according to RFC 6125.
// If a match is found, matched will be true. If no matches are found, matched
// will be false and a "default" certificate will be returned with defaulted
// set to true. If defaulted is false, then no certificates were available.
//
// The logic in this function is adapted from the Go standard library,
// which is by the Go Authors.
//
// This function is safe for concurrent use.
func (cfg *Config) getCertificateFromCache(hello *tls.ClientHelloInfo) (cert Certificate, matched, defaulted bool) {
name := normalizedName(hello.ServerName)
if name == "" {
// if SNI is empty, prefer matching IP address
if hello.Conn != nil {
addr := localIPFromConn(hello.Conn)
cert, matched = cfg.selectCert(hello, addr)
if matched {
return
}
}
// fall back to a "default" certificate, if specified
if cfg.DefaultServerName != "" {
normDefault := normalizedName(cfg.DefaultServerName)
cert, defaulted = cfg.selectCert(hello, normDefault)
if defaulted {
return
}
}
} else {
// if SNI is specified, try an exact match first
cert, matched = cfg.selectCert(hello, name)
if matched {
return
}
// try replacing labels in the name with
// wildcards until we get a match
labels := strings.Split(name, ".")
for i := range labels {
labels[i] = "*"
candidate := strings.Join(labels, ".")
cert, matched = cfg.selectCert(hello, candidate)
if matched {
return
}
}
}
// otherwise, we're bingo on ammo; see issues
// caddyserver/caddy#2035 and caddyserver/caddy#1303 (any
// change to certificate matching behavior must
// account for hosts defined where the hostname
// is empty or a catch-all, like ":443" or
// "0.0.0.0:443")
return
}
// selectCert uses hello to select a certificate from the
// cache for name. If cfg.CertSelection is set, it will be
// used to make the decision. Otherwise, the first matching
// unexpired cert is returned. As a special case, if no
// certificates match name and cfg.CertSelection is set,
// then all certificates in the cache will be passed in
// for the cfg.CertSelection to make the final decision.
func (cfg *Config) selectCert(hello *tls.ClientHelloInfo, name string) (Certificate, bool) {
logger := loggerNamed(cfg.Logger, "handshake")
choices := cfg.certCache.getAllMatchingCerts(name)
if len(choices) == 0 {
if cfg.CertSelection == nil {
if logger != nil {
logger.Debug("no matching certificates and no custom selection logic", zap.String("identifier", name))
}
return Certificate{}, false
}
if logger != nil {
logger.Debug("no matching certificate; will choose from all certificates", zap.String("identifier", name))
}
choices = cfg.certCache.getAllCerts()
}
if logger != nil {
logger.Debug("choosing certificate",
zap.String("identifier", name),
zap.Int("num_choices", len(choices)))
}
if cfg.CertSelection == nil {
cert, err := DefaultCertificateSelector(hello, choices)
if logger != nil {
logger.Debug("default certificate selection results",
zap.Error(err),
zap.String("identifier", name),
zap.Strings("subjects", cert.Names),
zap.Bool("managed", cert.managed),
zap.String("issuer_key", cert.issuerKey),
zap.String("hash", cert.hash))
}
return cert, err == nil
}
cert, err := cfg.CertSelection.SelectCertificate(hello, choices)
if logger != nil {
logger.Debug("custom certificate selection results",
zap.Error(err),
zap.String("identifier", name),
zap.Strings("subjects", cert.Names),
zap.Bool("managed", cert.managed),
zap.String("issuer_key", cert.issuerKey),
zap.String("hash", cert.hash))
}
return cert, err == nil
}
// DefaultCertificateSelector is the default certificate selection logic
// given a choice of certificates. If there is at least one certificate in
// choices, it always returns a certificate without error. It chooses the
// first non-expired certificate that the client supports if possible,
// otherwise it returns an expired certificate that the client supports,
// otherwise it just returns the first certificate in the list of choices.
func DefaultCertificateSelector(hello *tls.ClientHelloInfo, choices []Certificate) (Certificate, error) {
if len(choices) == 0 {
return Certificate{}, fmt.Errorf("no certificates available")
}
now := time.Now()
best := choices[0]
for _, choice := range choices {
if err := hello.SupportsCertificate(&choice.Certificate); err != nil {
continue
}
best = choice // at least the client supports it...
if now.After(choice.Leaf.NotBefore) && now.Before(choice.Leaf.NotAfter) {
return choice, nil // ...and unexpired, great! "Certificate, I choose you!"
}
}
return best, nil // all matching certs are expired or incompatible, oh well
}
// getCertDuringHandshake will get a certificate for hello. It first tries
// the in-memory cache. If no exact certificate for hello is in the cache, the
// config most closely corresponding to hello (like a wildcard) will be loaded.
// If none could be matched from the cache, it invokes the configured certificate
// managers to get a certificate and uses the first one that returns a certificate.
// If no certificate managers return a value, and if the config allows it
// (OnDemand!=nil) and if loadIfNecessary == true, it goes to storage to load the
// cert into the cache and serve it. If it's not on disk and if
// obtainIfNecessary == true, the certificate will be obtained from the CA, cached,
// and served. If obtainIfNecessary == true, then loadIfNecessary must also be == true.
// An error will be returned if and only if no certificate is available.
//
// This function is safe for concurrent use.
func (cfg *Config) getCertDuringHandshake(hello *tls.ClientHelloInfo, loadIfNecessary, obtainIfNecessary bool) (Certificate, error) {
log := loggerNamed(cfg.Logger, "handshake")
ctx := context.TODO() // TODO: get a proper context? from somewhere...
// First check our in-memory cache to see if we've already loaded it
cert, matched, defaulted := cfg.getCertificateFromCache(hello)
if matched {
if log != nil {
log.Debug("matched certificate in cache",
zap.Strings("subjects", cert.Names),
zap.Bool("managed", cert.managed),
zap.Time("expiration", cert.Leaf.NotAfter),
zap.String("hash", cert.hash))
}
if cert.managed && cfg.OnDemand != nil && obtainIfNecessary {
// On-demand certificates are maintained in the background, but
// maintenance is triggered by handshakes instead of by a timer
// as in maintain.go.
return cfg.optionalMaintenance(ctx, loggerNamed(cfg.Logger, "on_demand"), cert, hello)
}
return cert, nil
}
// If an external Manager is configured, try to get it from them.
// Only continue to use our own logic if it returns empty+nil.
externalCert, err := cfg.getCertFromAnyCertManager(ctx, hello, log)
if err != nil {
return Certificate{}, err
}
if !externalCert.Empty() {
return externalCert, nil
}
name := cfg.getNameFromClientHello(hello)
// We might be able to load or obtain a needed certificate. Load from
// storage if OnDemand is enabled, or if there is the possibility that
// a statically-managed cert was evicted from a full cache.
cfg.certCache.mu.RLock()
cacheSize := len(cfg.certCache.cache)
cfg.certCache.mu.RUnlock()
// A cert might have still been evicted from the cache even if the cache
// is no longer completely full; this happens if the newly-loaded cert is
// itself evicted (perhaps due to being expired or unmanaged at this point).
// Hence, we use an "almost full" metric to allow for the cache to not be
// perfectly full while still being able to load needed certs from storage.
// See https://caddy.community/t/error-tls-alert-internal-error-592-again/13272
// and caddyserver/caddy#4320.
cacheCapacity := float64(cfg.certCache.options.Capacity)
cacheAlmostFull := cacheCapacity > 0 && float64(cacheSize) >= cacheCapacity*.9
loadDynamically := cfg.OnDemand != nil || cacheAlmostFull
if loadDynamically && loadIfNecessary {
// Then check to see if we have one on disk
// TODO: As suggested here, https://caddy.community/t/error-tls-alert-internal-error-592-again/13272/30?u=matt,
// it might be a good idea to check with the DecisionFunc or allowlist first before even loading the certificate
// from storage, since if we can't renew it, why should we even try serving it (it will just get evicted after
// we get a return value of false anyway)? See issue #174
loadedCert, err := cfg.CacheManagedCertificate(ctx, name)
if errors.Is(err, fs.ErrNotExist) {
// If no exact match, try a wildcard variant, which is something we can still use
labels := strings.Split(name, ".")
labels[0] = "*"
loadedCert, err = cfg.CacheManagedCertificate(ctx, strings.Join(labels, "."))
}
if err == nil {
if log != nil {
log.Debug("loaded certificate from storage",
zap.Strings("subjects", loadedCert.Names),
zap.Bool("managed", loadedCert.managed),
zap.Time("expiration", loadedCert.Leaf.NotAfter),
zap.String("hash", loadedCert.hash))
}
loadedCert, err = cfg.handshakeMaintenance(ctx, hello, loadedCert)
if err != nil {
if log != nil {
log.Error("maintaining newly-loaded certificate",
zap.String("server_name", name),
zap.Error(err))
}
}
return loadedCert, nil
}
if cfg.OnDemand != nil && obtainIfNecessary {
// By this point, we need to ask the CA for a certificate
return cfg.obtainOnDemandCertificate(ctx, hello)
}
}
// Fall back to the default certificate if there is one
if defaulted {
if log != nil {
log.Debug("fell back to default certificate",
zap.Strings("subjects", cert.Names),
zap.Bool("managed", cert.managed),
zap.Time("expiration", cert.Leaf.NotAfter),
zap.String("hash", cert.hash))
}
return cert, nil
}
if log != nil {
log.Debug("no certificate matching TLS ClientHello",
zap.String("server_name", hello.ServerName),
zap.String("remote", hello.Conn.RemoteAddr().String()),
zap.String("identifier", name),
zap.Uint16s("cipher_suites", hello.CipherSuites),
zap.Float64("cert_cache_fill", float64(cacheSize)/cacheCapacity), // may be approximate! because we are not within the lock
zap.Bool("load_if_necessary", loadIfNecessary),
zap.Bool("obtain_if_necessary", obtainIfNecessary),
zap.Bool("on_demand", cfg.OnDemand != nil))
}
return Certificate{}, fmt.Errorf("no certificate available for '%s'", name)
}
// optionalMaintenance will perform maintenance on the certificate (if necessary) and
// will return the resulting certificate. This should only be done if the certificate
// is managed, OnDemand is enabled, and the scope is allowed to obtain certificates.
func (cfg *Config) optionalMaintenance(ctx context.Context, log *zap.Logger, cert Certificate, hello *tls.ClientHelloInfo) (Certificate, error) {
newCert, err := cfg.handshakeMaintenance(ctx, hello, cert)
if err == nil {
return newCert, nil
}
if log != nil {
log.Error("renewing certificate on-demand failed",
zap.Strings("subjects", cert.Names),
zap.Time("not_after", cert.Leaf.NotAfter),
zap.Error(err))
}
if cert.Expired() {
return cert, err
}
// still has time remaining, so serve it anyway
return cert, nil
}
// checkIfCertShouldBeObtained checks to see if an on-demand TLS certificate
// should be obtained for a given domain based upon the config settings. If
// a non-nil error is returned, do not issue a new certificate for name.
func (cfg *Config) checkIfCertShouldBeObtained(name string) error {
if cfg.OnDemand == nil {
return fmt.Errorf("not configured for on-demand certificate issuance")
}
if !SubjectQualifiesForCert(name) {
return fmt.Errorf("subject name does not qualify for certificate: %s", name)
}
if cfg.OnDemand.DecisionFunc != nil {
return cfg.OnDemand.DecisionFunc(name)
}
if len(cfg.OnDemand.hostWhitelist) > 0 &&
!cfg.OnDemand.whitelistContains(name) {
return fmt.Errorf("certificate for '%s' is not managed", name)
}
return nil
}
// obtainOnDemandCertificate obtains a certificate for hello.
// If another goroutine has already started obtaining a cert for
// hello, it will wait and use what the other goroutine obtained.
//
// This function is safe for use by multiple concurrent goroutines.
func (cfg *Config) obtainOnDemandCertificate(ctx context.Context, hello *tls.ClientHelloInfo) (Certificate, error) {
log := loggerNamed(cfg.Logger, "on_demand")
name := cfg.getNameFromClientHello(hello)
getCertWithoutReobtaining := func() (Certificate, error) {
// very important to set the obtainIfNecessary argument to false, so we don't repeat this infinitely
return cfg.getCertDuringHandshake(hello, true, false)
}
// We must protect this process from happening concurrently, so synchronize.
obtainCertWaitChansMu.Lock()
wait, ok := obtainCertWaitChans[name]
if ok {
// lucky us -- another goroutine is already obtaining the certificate.
// wait for it to finish obtaining the cert and then we'll use it.
obtainCertWaitChansMu.Unlock()
// TODO: see if we can get a proper context in here, for true cancellation
timeout := time.NewTimer(2 * time.Minute)
select {
case <-timeout.C:
return Certificate{}, fmt.Errorf("timed out waiting to obtain certificate for %s", name)
case <-wait:
timeout.Stop()
}
return getCertWithoutReobtaining()
}
// looks like it's up to us to do all the work and obtain the cert.
// make a chan others can wait on if needed
wait = make(chan struct{})
obtainCertWaitChans[name] = wait
obtainCertWaitChansMu.Unlock()
unblockWaiters := func() {
obtainCertWaitChansMu.Lock()
close(wait)
delete(obtainCertWaitChans, name)
obtainCertWaitChansMu.Unlock()
}
// Make sure the certificate should be obtained based on config
err := cfg.checkIfCertShouldBeObtained(name)
if err != nil {
unblockWaiters()
return Certificate{}, err
}
if log != nil {
log.Info("obtaining new certificate", zap.String("server_name", name))
}
// TODO: we are only adding a timeout because we don't know if the context passed in is actually cancelable...
// (timeout duration is based on https://caddy.community/t/zerossl-dns-challenge-failing-often-route53-plugin/13822/24?u=matt)
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 180*time.Second)
defer cancel()
// Obtain the certificate
err = cfg.ObtainCertAsync(ctx, name)
// immediately unblock anyone waiting for it; doing this in
// a defer would risk deadlock because of the recursive call
// to getCertDuringHandshake below when we return!
unblockWaiters()
if err != nil {
// shucks; failed to solve challenge on-demand
return Certificate{}, err
}
// success; certificate was just placed on disk, so
// we need only restart serving the certificate
return getCertWithoutReobtaining()
}
// handshakeMaintenance performs a check on cert for expiration and OCSP validity.
// If necessary, it will renew the certificate and/or refresh the OCSP staple.
// OCSP stapling errors are not returned, only logged.
//
// This function is safe for use by multiple concurrent goroutines.
func (cfg *Config) handshakeMaintenance(ctx context.Context, hello *tls.ClientHelloInfo, cert Certificate) (Certificate, error) {
log := loggerNamed(cfg.Logger, "on_demand")
// Check OCSP staple validity
if cert.ocsp != nil && !freshOCSP(cert.ocsp) {
if log != nil {
log.Debug("OCSP response needs refreshing",
zap.Strings("identifiers", cert.Names),
zap.Int("ocsp_status", cert.ocsp.Status),
zap.Time("this_update", cert.ocsp.ThisUpdate),
zap.Time("next_update", cert.ocsp.NextUpdate))
}
err := stapleOCSP(ctx, cfg.OCSP, cfg.Storage, &cert, nil)
if err != nil {
// An error with OCSP stapling is not the end of the world, and in fact, is
// quite common considering not all certs have issuer URLs that support it.
if log != nil {
log.Warn("stapling OCSP",
zap.String("server_name", hello.ServerName),
zap.Error(err))
}
} else if log != nil {
if log != nil {
log.Debug("successfully stapled new OCSP response",
zap.Strings("identifiers", cert.Names),
zap.Int("ocsp_status", cert.ocsp.Status),
zap.Time("this_update", cert.ocsp.ThisUpdate),
zap.Time("next_update", cert.ocsp.NextUpdate))
}
}
// our copy of cert has the new OCSP staple, so replace it in the cache
cfg.certCache.mu.Lock()
cfg.certCache.cache[cert.hash] = cert
cfg.certCache.mu.Unlock()
}
// We attempt to replace any certificates that were revoked.
// Crucially, this happens OUTSIDE a lock on the certCache.
if certShouldBeForceRenewed(cert) {
if log != nil {
log.Warn("on-demand certificate's OCSP status is REVOKED; will try to forcefully renew",
zap.Strings("identifiers", cert.Names),
zap.Int("ocsp_status", cert.ocsp.Status),
zap.Time("revoked_at", cert.ocsp.RevokedAt),
zap.Time("this_update", cert.ocsp.ThisUpdate),
zap.Time("next_update", cert.ocsp.NextUpdate))
}
return cfg.renewDynamicCertificate(ctx, hello, cert)
}
// Check cert expiration
if currentlyInRenewalWindow(cert.Leaf.NotBefore, cert.Leaf.NotAfter, cfg.RenewalWindowRatio) {
return cfg.renewDynamicCertificate(ctx, hello, cert)
}
return cert, nil
}
// renewDynamicCertificate renews the certificate for name using cfg. It returns the
// certificate to use and an error, if any. name should already be lower-cased before
// calling this function. name is the name obtained directly from the handshake's
// ClientHello. If the certificate hasn't yet expired, currentCert will be returned
// and the renewal will happen in the background; otherwise this blocks until the
// certificate has been renewed, and returns the renewed certificate.
//
// If the certificate's OCSP status (currentCert.ocsp) is Revoked, it will be forcefully
// renewed even if it is not expiring.
//
// This function is safe for use by multiple concurrent goroutines.
func (cfg *Config) renewDynamicCertificate(ctx context.Context, hello *tls.ClientHelloInfo, currentCert Certificate) (Certificate, error) {
log := loggerNamed(cfg.Logger, "on_demand")
name := cfg.getNameFromClientHello(hello)
timeLeft := time.Until(currentCert.Leaf.NotAfter)
revoked := currentCert.ocsp != nil && currentCert.ocsp.Status == ocsp.Revoked
getCertWithoutReobtaining := func() (Certificate, error) {
// very important to set the obtainIfNecessary argument to false, so we don't repeat this infinitely
return cfg.getCertDuringHandshake(hello, true, false)
}
// see if another goroutine is already working on this certificate
obtainCertWaitChansMu.Lock()
wait, ok := obtainCertWaitChans[name]
if ok {
// lucky us -- another goroutine is already renewing the certificate
obtainCertWaitChansMu.Unlock()
// the current certificate hasn't expired, and another goroutine is already
// renewing it, so we might as well serve what we have without blocking, UNLESS
// we're forcing renewal, in which case the current certificate is not usable
if timeLeft > 0 && !revoked {
if log != nil {
log.Debug("certificate expires soon but is already being renewed; serving current certificate",
zap.Strings("subjects", currentCert.Names),
zap.Duration("remaining", timeLeft))
}
return currentCert, nil
}
// otherwise, we'll have to wait for the renewal to finish so we don't serve
// a revoked or expired certificate
if log != nil {
log.Debug("certificate has expired, but is already being renewed; waiting for renewal to complete",
zap.Strings("subjects", currentCert.Names),
zap.Time("expired", currentCert.Leaf.NotAfter),
zap.Bool("revoked", revoked))
}
// TODO: see if we can get a proper context in here, for true cancellation
timeout := time.NewTimer(2 * time.Minute)
select {
case <-timeout.C:
return Certificate{}, fmt.Errorf("timed out waiting for certificate renewal of %s", name)
case <-wait:
timeout.Stop()
}
return getCertWithoutReobtaining()
}
// looks like it's up to us to do all the work and renew the cert
wait = make(chan struct{})
obtainCertWaitChans[name] = wait
obtainCertWaitChansMu.Unlock()
unblockWaiters := func() {
obtainCertWaitChansMu.Lock()
close(wait)
delete(obtainCertWaitChans, name)
obtainCertWaitChansMu.Unlock()
}
if log != nil {
log.Info("attempting certificate renewal",
zap.String("server_name", name),
zap.Strings("subjects", currentCert.Names),
zap.Time("expiration", currentCert.Leaf.NotAfter),
zap.Duration("remaining", timeLeft),
zap.Bool("revoked", revoked))
}
// Make sure a certificate for this name should be obtained on-demand
err := cfg.checkIfCertShouldBeObtained(name)
if err != nil {
// if not, remove from cache (it will be deleted from storage later)
cfg.certCache.mu.Lock()
cfg.certCache.removeCertificate(currentCert)
cfg.certCache.mu.Unlock()
unblockWaiters()
return Certificate{}, err
}
// Renew and reload the certificate
renewAndReload := func(ctx context.Context, cancel context.CancelFunc) (Certificate, error) {
defer cancel()
// otherwise, renew with issuer, etc.
var newCert Certificate
if revoked {
newCert, err = cfg.forceRenew(ctx, log, currentCert)
} else {
err = cfg.RenewCertAsync(ctx, name, false)
if err == nil {
// even though the recursive nature of the dynamic cert loading
// would just call this function anyway, we do it here to
// make the replacement as atomic as possible.
newCert, err = cfg.CacheManagedCertificate(ctx, name)
if err != nil {
if log != nil {
log.Error("loading renewed certificate", zap.String("server_name", name), zap.Error(err))
}
} else {
// replace the old certificate with the new one
cfg.certCache.replaceCertificate(currentCert, newCert)
}
}
}
// immediately unblock anyone waiting for it; doing this in
// a defer would risk deadlock because of the recursive call
// to getCertDuringHandshake below when we return!
unblockWaiters()
if err != nil {
if log != nil {
log.Error("renewing and reloading certificate",
zap.String("server_name", name),
zap.Error(err),
zap.Bool("forced", revoked))
}
return newCert, err
}
return getCertWithoutReobtaining()
}
// if the certificate hasn't expired, we can serve what we have and renew in the background
if timeLeft > 0 {
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
go renewAndReload(ctx, cancel)
return currentCert, nil
}
// otherwise, we have to block while we renew an expired certificate
ctx, cancel := context.WithTimeout(ctx, 90*time.Second)
return renewAndReload(ctx, cancel)
}
// getCertFromAnyCertManager gets a certificate from cfg's Managers. If there are no Managers defined, this is
// a no-op that returns empty values. Otherwise, it gets a certificate for hello from the first Manager that
// returns a certificate and no error.
func (cfg *Config) getCertFromAnyCertManager(ctx context.Context, hello *tls.ClientHelloInfo, log *zap.Logger) (Certificate, error) {
// fast path if nothing to do
if len(cfg.Managers) == 0 {
return Certificate{}, nil
}
var upstreamCert *tls.Certificate
// try all the GetCertificate methods on external managers; use first one that returns a certificate
for i, certManager := range cfg.Managers {
var err error
upstreamCert, err = certManager.GetCertificate(ctx, hello)
if err != nil {
log.Error("getting certificate from external certificate manager",
zap.String("sni", hello.ServerName),
zap.Int("cert_manager", i),
zap.Error(err))
continue
}
if upstreamCert != nil {
break
}
}
if upstreamCert == nil {
if log != nil {
log.Debug("all external certificate managers yielded no certificates and no errors", zap.String("sni", hello.ServerName))
}
return Certificate{}, nil
}
var cert Certificate
err := fillCertFromLeaf(&cert, *upstreamCert)
if err != nil {
return Certificate{}, fmt.Errorf("external certificate manager: %s: filling cert from leaf: %v", hello.ServerName, err)
}
if log != nil {
log.Debug("using externally-managed certificate",
zap.String("sni", hello.ServerName),
zap.Strings("names", cert.Names),
zap.Time("expiration", cert.Leaf.NotAfter))
}
return cert, nil
}
// getTLSALPNChallengeCert is to be called when the clientHello pertains to
// a TLS-ALPN challenge and a certificate is required to solve it. This method gets
// the relevant challenge info and then returns the associated certificate (if any)
// or generates it anew if it's not available (as is the case when distributed
// solving). True is returned if the challenge is being solved distributed (there
// is no semantic difference with distributed solving; it is mainly for logging).
func (cfg *Config) getTLSALPNChallengeCert(clientHello *tls.ClientHelloInfo) (*tls.Certificate, bool, error) {
chalData, distributed, err := cfg.getChallengeInfo(clientHello.Context(), clientHello.ServerName)
if err != nil {
return nil, distributed, err
}
// fast path: we already created the certificate (this avoids having to re-create
// it at every handshake that tries to verify, e.g. multi-perspective validation)
if chalData.data != nil {
return chalData.data.(*tls.Certificate), distributed, nil
}
// otherwise, we can re-create the solution certificate, but it takes a few cycles
cert, err := acmez.TLSALPN01ChallengeCert(chalData.Challenge)
if err != nil {
return nil, distributed, fmt.Errorf("making TLS-ALPN challenge certificate: %v", err)
}
if cert == nil {
return nil, distributed, fmt.Errorf("got nil TLS-ALPN challenge certificate but no error")
}
return cert, distributed, nil
}
// getNameFromClientHello returns a normalized form of hello.ServerName.
// If hello.ServerName is empty (i.e. client did not use SNI), then the
// associated connection's local address is used to extract an IP address.
func (*Config) getNameFromClientHello(hello *tls.ClientHelloInfo) string {
if name := normalizedName(hello.ServerName); name != "" {
return name
}
return localIPFromConn(hello.Conn)
}
// localIPFromConn returns the host portion of c's local address
// and strips the scope ID if one exists (see RFC 4007).
func localIPFromConn(c net.Conn) string {
if c == nil {
return ""
}
localAddr := c.LocalAddr().String()
ip, _, err := net.SplitHostPort(localAddr)
if err != nil {
// OK; assume there was no port
ip = localAddr
}
// IPv6 addresses can have scope IDs, e.g. "fe80::4c3:3cff:fe4f:7e0b%eth0",
// but for our purposes, these are useless (unless a valid use case proves
// otherwise; see issue #3911)
if scopeIDStart := strings.Index(ip, "%"); scopeIDStart > -1 {
ip = ip[:scopeIDStart]
}
return ip
}
// normalizedName returns a cleaned form of serverName that is
// used for consistency when referring to a SNI value.
func normalizedName(serverName string) string {
return strings.ToLower(strings.TrimSpace(serverName))
}
// obtainCertWaitChans is used to coordinate obtaining certs for each hostname.
var obtainCertWaitChans = make(map[string]chan struct{})
var obtainCertWaitChansMu sync.Mutex

124
vendor/github.com/caddyserver/certmagic/httphandler.go generated vendored Normal file
View File

@@ -0,0 +1,124 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"net/http"
"strings"
"github.com/mholt/acmez/acme"
"go.uber.org/zap"
)
// HTTPChallengeHandler wraps h in a handler that can solve the ACME
// HTTP challenge. cfg is required, and it must have a certificate
// cache backed by a functional storage facility, since that is where
// the challenge state is stored between initiation and solution.
//
// If a request is not an ACME HTTP challenge, h will be invoked.
func (am *ACMEIssuer) HTTPChallengeHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if am.HandleHTTPChallenge(w, r) {
return
}
h.ServeHTTP(w, r)
})
}
// HandleHTTPChallenge uses am to solve challenge requests from an ACME
// server that were initiated by this instance or any other instance in
// this cluster (being, any instances using the same storage am does).
//
// If the HTTP challenge is disabled, this function is a no-op.
//
// If am is nil or if am does not have a certificate cache backed by
// usable storage, solving the HTTP challenge will fail.
//
// It returns true if it handled the request; if so, the response has
// already been written. If false is returned, this call was a no-op and
// the request has not been handled.
func (am *ACMEIssuer) HandleHTTPChallenge(w http.ResponseWriter, r *http.Request) bool {
if am == nil {
return false
}
if am.DisableHTTPChallenge {
return false
}
if !LooksLikeHTTPChallenge(r) {
return false
}
return am.distributedHTTPChallengeSolver(w, r)
}
// distributedHTTPChallengeSolver checks to see if this challenge
// request was initiated by this or another instance which uses the
// same storage as am does, and attempts to complete the challenge for
// it. It returns true if the request was handled; false otherwise.
func (am *ACMEIssuer) distributedHTTPChallengeSolver(w http.ResponseWriter, r *http.Request) bool {
if am == nil {
return false
}
host := hostOnly(r.Host)
chalInfo, distributed, err := am.config.getChallengeInfo(r.Context(), host)
if err != nil {
if am.Logger != nil {
am.Logger.Error("looking up info for HTTP challenge",
zap.String("host", host),
zap.Error(err))
}
return false
}
return solveHTTPChallenge(am.Logger, w, r, chalInfo.Challenge, distributed)
}
// solveHTTPChallenge solves the HTTP challenge using the given challenge information.
// If the challenge is being solved in a distributed fahsion, set distributed to true for logging purposes.
// It returns true the properties of the request check out in relation to the HTTP challenge.
// Most of this code borrowed from xenolf's built-in HTTP-01 challenge solver in March 2018.
func solveHTTPChallenge(logger *zap.Logger, w http.ResponseWriter, r *http.Request, challenge acme.Challenge, distributed bool) bool {
challengeReqPath := challenge.HTTP01ResourcePath()
if r.URL.Path == challengeReqPath &&
strings.EqualFold(hostOnly(r.Host), challenge.Identifier.Value) && // mitigate DNS rebinding attacks
r.Method == "GET" {
w.Header().Add("Content-Type", "text/plain")
w.Write([]byte(challenge.KeyAuthorization))
r.Close = true
if logger != nil {
logger.Info("served key authentication",
zap.String("identifier", challenge.Identifier.Value),
zap.String("challenge", "http-01"),
zap.String("remote", r.RemoteAddr),
zap.Bool("distributed", distributed))
}
return true
}
return false
}
// SolveHTTPChallenge solves the HTTP challenge. It should be used only on HTTP requests that are
// from ACME servers trying to validate an identifier (i.e. LooksLikeHTTPChallenge() == true). It
// returns true if the request criteria check out and it answered with key authentication, in which
// case no further handling of the request is necessary.
func SolveHTTPChallenge(logger *zap.Logger, w http.ResponseWriter, r *http.Request, challenge acme.Challenge) bool {
return solveHTTPChallenge(logger, w, r, challenge, false)
}
// LooksLikeHTTPChallenge returns true if r looks like an ACME
// HTTP challenge request from an ACME server.
func LooksLikeHTTPChallenge(r *http.Request) bool {
return r.Method == "GET" && strings.HasPrefix(r.URL.Path, challengeBasePath)
}
const challengeBasePath = "/.well-known/acme-challenge"

678
vendor/github.com/caddyserver/certmagic/maintain.go generated vendored Normal file
View File

@@ -0,0 +1,678 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"crypto/x509"
"encoding/pem"
"fmt"
"log"
"path"
"runtime"
"strings"
"time"
"github.com/mholt/acmez/acme"
"go.uber.org/zap"
"golang.org/x/crypto/ocsp"
)
// maintainAssets is a permanently-blocking function
// that loops indefinitely and, on a regular schedule, checks
// certificates for expiration and initiates a renewal of certs
// that are expiring soon. It also updates OCSP stapling. It
// should only be called once per cache. Panics are recovered,
// and if panicCount < 10, the function is called recursively,
// incrementing panicCount each time. Initial invocation should
// start panicCount at 0.
func (certCache *Cache) maintainAssets(panicCount int) {
log := loggerNamed(certCache.logger, "maintenance")
if log != nil {
log = log.With(zap.String("cache", fmt.Sprintf("%p", certCache)))
}
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
if log != nil {
log.Error("panic", zap.Any("error", err), zap.ByteString("stack", buf))
}
if panicCount < 10 {
certCache.maintainAssets(panicCount + 1)
}
}
}()
renewalTicker := time.NewTicker(certCache.options.RenewCheckInterval)
ocspTicker := time.NewTicker(certCache.options.OCSPCheckInterval)
if log != nil {
log.Info("started background certificate maintenance")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for {
select {
case <-renewalTicker.C:
err := certCache.RenewManagedCertificates(ctx)
if err != nil && log != nil {
log.Error("renewing managed certificates", zap.Error(err))
}
case <-ocspTicker.C:
certCache.updateOCSPStaples(ctx)
case <-certCache.stopChan:
renewalTicker.Stop()
ocspTicker.Stop()
if log != nil {
log.Info("stopped background certificate maintenance")
}
close(certCache.doneChan)
return
}
}
}
// RenewManagedCertificates renews managed certificates,
// including ones loaded on-demand. Note that this is done
// automatically on a regular basis; normally you will not
// need to call this. This method assumes non-interactive
// mode (i.e. operating in the background).
func (certCache *Cache) RenewManagedCertificates(ctx context.Context) error {
log := loggerNamed(certCache.logger, "maintenance")
// configs will hold a map of certificate name to the config
// to use when managing that certificate
configs := make(map[string]*Config)
// we use the queues for a very important reason: to do any and all
// operations that could require an exclusive write lock outside
// of the read lock! otherwise we get a deadlock, yikes. in other
// words, our first iteration through the certificate cache does NOT
// perform any operations--only queues them--so that more fine-grained
// write locks may be obtained during the actual operations.
var renewQueue, reloadQueue, deleteQueue []Certificate
certCache.mu.RLock()
for certKey, cert := range certCache.cache {
if !cert.managed {
continue
}
// the list of names on this cert should never be empty... programmer error?
if cert.Names == nil || len(cert.Names) == 0 {
if log != nil {
log.Warn("certificate has no names; removing from cache", zap.String("cert_key", certKey))
}
deleteQueue = append(deleteQueue, cert)
continue
}
// get the config associated with this certificate
cfg, err := certCache.getConfig(cert)
if err != nil {
if log != nil {
log.Error("unable to get configuration to manage certificate; unable to renew",
zap.Strings("identifiers", cert.Names),
zap.Error(err))
}
continue
}
if cfg == nil {
// this is bad if this happens, probably a programmer error (oops)
if log != nil {
log.Error("no configuration associated with certificate; unable to manage",
zap.Strings("identifiers", cert.Names))
}
continue
}
if cfg.OnDemand != nil {
continue
}
// if time is up or expires soon, we need to try to renew it
if cert.NeedsRenewal(cfg) {
configs[cert.Names[0]] = cfg
// see if the certificate in storage has already been renewed, possibly by another
// instance that didn't coordinate with this one; if so, just load it (this
// might happen if another instance already renewed it - kinda sloppy but checking disk
// first is a simple way to possibly drastically reduce rate limit problems)
storedCertExpiring, err := cfg.managedCertInStorageExpiresSoon(ctx, cert)
if err != nil {
// hmm, weird, but not a big deal, maybe it was deleted or something
if log != nil {
log.Warn("error while checking if stored certificate is also expiring soon",
zap.Strings("identifiers", cert.Names),
zap.Error(err))
}
} else if !storedCertExpiring {
// if the certificate is NOT expiring soon and there was no error, then we
// are good to just reload the certificate from storage instead of repeating
// a likely-unnecessary renewal procedure
reloadQueue = append(reloadQueue, cert)
continue
}
// the certificate in storage has not been renewed yet, so we will do it
// NOTE: It is super-important to note that the TLS-ALPN challenge requires
// a write lock on the cache in order to complete its challenge, so it is extra
// vital that this renew operation does not happen inside our read lock!
renewQueue = append(renewQueue, cert)
}
}
certCache.mu.RUnlock()
// Reload certificates that merely need to be updated in memory
for _, oldCert := range reloadQueue {
timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC())
if log != nil {
log.Info("certificate expires soon, but is already renewed in storage; reloading stored certificate",
zap.Strings("identifiers", oldCert.Names),
zap.Duration("remaining", timeLeft))
}
cfg := configs[oldCert.Names[0]]
// crucially, this happens OUTSIDE a lock on the certCache
_, err := cfg.reloadManagedCertificate(ctx, oldCert)
if err != nil {
if log != nil {
log.Error("loading renewed certificate",
zap.Strings("identifiers", oldCert.Names),
zap.Error(err))
}
continue
}
}
// Renewal queue
for _, oldCert := range renewQueue {
cfg := configs[oldCert.Names[0]]
err := certCache.queueRenewalTask(ctx, oldCert, cfg)
if err != nil {
if log != nil {
log.Error("queueing renewal task",
zap.Strings("identifiers", oldCert.Names),
zap.Error(err))
}
continue
}
}
// Deletion queue
certCache.mu.Lock()
for _, cert := range deleteQueue {
certCache.removeCertificate(cert)
}
certCache.mu.Unlock()
return nil
}
func (certCache *Cache) queueRenewalTask(ctx context.Context, oldCert Certificate, cfg *Config) error {
log := loggerNamed(certCache.logger, "maintenance")
timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC())
if log != nil {
log.Info("certificate expires soon; queuing for renewal",
zap.Strings("identifiers", oldCert.Names),
zap.Duration("remaining", timeLeft))
}
// Get the name which we should use to renew this certificate;
// we only support managing certificates with one name per cert,
// so this should be easy.
renewName := oldCert.Names[0]
// queue up this renewal job (is a no-op if already active or queued)
jm.Submit(cfg.Logger, "renew_"+renewName, func() error {
timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC())
if log != nil {
log.Info("attempting certificate renewal",
zap.Strings("identifiers", oldCert.Names),
zap.Duration("remaining", timeLeft))
}
// perform renewal - crucially, this happens OUTSIDE a lock on certCache
err := cfg.RenewCertAsync(ctx, renewName, false)
if err != nil {
if cfg.OnDemand != nil {
// loaded dynamically, remove dynamically
certCache.mu.Lock()
certCache.removeCertificate(oldCert)
certCache.mu.Unlock()
}
return fmt.Errorf("%v %v", oldCert.Names, err)
}
// successful renewal, so update in-memory cache by loading
// renewed certificate so it will be used with handshakes
_, err = cfg.reloadManagedCertificate(ctx, oldCert)
if err != nil {
return ErrNoRetry{fmt.Errorf("%v %v", oldCert.Names, err)}
}
return nil
})
return nil
}
// updateOCSPStaples updates the OCSP stapling in all
// eligible, cached certificates.
//
// OCSP maintenance strives to abide the relevant points on
// Ryan Sleevi's recommendations for good OCSP support:
// https://gist.github.com/sleevi/5efe9ef98961ecfb4da8
func (certCache *Cache) updateOCSPStaples(ctx context.Context) {
logger := loggerNamed(certCache.logger, "maintenance")
// temporary structures to store updates or tasks
// so that we can keep our locks short-lived
type ocspUpdate struct {
rawBytes []byte
parsed *ocsp.Response
}
type updateQueueEntry struct {
cert Certificate
certHash string
lastNextUpdate time.Time
cfg *Config
}
type renewQueueEntry struct {
oldCert Certificate
cfg *Config
}
updated := make(map[string]ocspUpdate)
var updateQueue []updateQueueEntry // certs that need a refreshed staple
var renewQueue []renewQueueEntry // certs that need to be renewed (due to revocation)
// obtain brief read lock during our scan to see which staples need updating
certCache.mu.RLock()
for certHash, cert := range certCache.cache {
// no point in updating OCSP for expired or "synthetic" certificates
if cert.Leaf == nil || cert.Expired() {
continue
}
cfg, err := certCache.getConfig(cert)
if err != nil {
if logger != nil {
logger.Error("unable to get automation config for certificate; maintenance for this certificate will likely fail",
zap.Strings("identifiers", cert.Names),
zap.Error(err))
}
continue
}
// always try to replace revoked certificates, even if OCSP response is still fresh
if certShouldBeForceRenewed(cert) {
renewQueue = append(renewQueue, renewQueueEntry{
oldCert: cert,
cfg: cfg,
})
continue
}
// if the status is not fresh, get a new one
var lastNextUpdate time.Time
if cert.ocsp != nil {
lastNextUpdate = cert.ocsp.NextUpdate
if cert.ocsp.Status != ocsp.Unknown && freshOCSP(cert.ocsp) {
// no need to update our staple if still fresh and not Unknown
continue
}
}
updateQueue = append(updateQueue, updateQueueEntry{cert, certHash, lastNextUpdate, cfg})
}
certCache.mu.RUnlock()
// perform updates outside of any lock on certCache
for _, qe := range updateQueue {
cert := qe.cert
certHash := qe.certHash
lastNextUpdate := qe.lastNextUpdate
if qe.cfg == nil {
// this is bad if this happens, probably a programmer error (oops)
if logger != nil {
logger.Error("no configuration associated with certificate; unable to manage OCSP staples",
zap.Strings("identifiers", cert.Names))
}
continue
}
err := stapleOCSP(ctx, qe.cfg.OCSP, qe.cfg.Storage, &cert, nil)
if err != nil {
if cert.ocsp != nil {
// if there was no staple before, that's fine; otherwise we should log the error
if logger != nil {
logger.Error("stapling OCSP",
zap.Strings("identifiers", cert.Names),
zap.Error(err))
}
}
continue
}
// By this point, we've obtained the latest OCSP response.
// If there was no staple before, or if the response is updated, make
// sure we apply the update to all names on the certificate if
// the status is still Good.
if cert.ocsp != nil && cert.ocsp.Status == ocsp.Good && (lastNextUpdate.IsZero() || lastNextUpdate != cert.ocsp.NextUpdate) {
if logger != nil {
logger.Info("advancing OCSP staple",
zap.Strings("identifiers", cert.Names),
zap.Time("from", lastNextUpdate),
zap.Time("to", cert.ocsp.NextUpdate))
}
updated[certHash] = ocspUpdate{rawBytes: cert.Certificate.OCSPStaple, parsed: cert.ocsp}
}
// If the updated staple shows that the certificate was revoked, we should immediately renew it
if certShouldBeForceRenewed(cert) {
renewQueue = append(renewQueue, renewQueueEntry{
oldCert: cert,
cfg: qe.cfg,
})
}
}
// These write locks should be brief since we have all the info we need now.
for certKey, update := range updated {
certCache.mu.Lock()
if cert, ok := certCache.cache[certKey]; ok {
cert.ocsp = update.parsed
cert.Certificate.OCSPStaple = update.rawBytes
certCache.cache[certKey] = cert
}
certCache.mu.Unlock()
}
// We attempt to replace any certificates that were revoked.
// Crucially, this happens OUTSIDE a lock on the certCache.
for _, renew := range renewQueue {
_, err := renew.cfg.forceRenew(ctx, logger, renew.oldCert)
if err != nil && logger != nil {
logger.Info("forcefully renewing certificate due to REVOKED status",
zap.Strings("identifiers", renew.oldCert.Names),
zap.Error(err))
}
}
}
// CleanStorageOptions specifies how to clean up a storage unit.
type CleanStorageOptions struct {
OCSPStaples bool
ExpiredCerts bool
ExpiredCertGracePeriod time.Duration
}
// CleanStorage removes assets which are no longer useful,
// according to opts.
func CleanStorage(ctx context.Context, storage Storage, opts CleanStorageOptions) {
if opts.OCSPStaples {
err := deleteOldOCSPStaples(ctx, storage)
if err != nil {
log.Printf("[ERROR] Deleting old OCSP staples: %v", err)
}
}
if opts.ExpiredCerts {
err := deleteExpiredCerts(ctx, storage, opts.ExpiredCertGracePeriod)
if err != nil {
log.Printf("[ERROR] Deleting expired certificates: %v", err)
}
}
// TODO: delete stale locks?
}
func deleteOldOCSPStaples(ctx context.Context, storage Storage) error {
ocspKeys, err := storage.List(ctx, prefixOCSP, false)
if err != nil {
// maybe just hasn't been created yet; no big deal
return nil
}
for _, key := range ocspKeys {
// if context was cancelled, quit early; otherwise proceed
select {
case <-ctx.Done():
return ctx.Err()
default:
}
ocspBytes, err := storage.Load(ctx, key)
if err != nil {
log.Printf("[ERROR] While deleting old OCSP staples, unable to load staple file: %v", err)
continue
}
resp, err := ocsp.ParseResponse(ocspBytes, nil)
if err != nil {
// contents are invalid; delete it
err = storage.Delete(ctx, key)
if err != nil {
log.Printf("[ERROR] Purging corrupt staple file %s: %v", key, err)
}
continue
}
if time.Now().After(resp.NextUpdate) {
// response has expired; delete it
err = storage.Delete(ctx, key)
if err != nil {
log.Printf("[ERROR] Purging expired staple file %s: %v", key, err)
}
}
}
return nil
}
func deleteExpiredCerts(ctx context.Context, storage Storage, gracePeriod time.Duration) error {
issuerKeys, err := storage.List(ctx, prefixCerts, false)
if err != nil {
// maybe just hasn't been created yet; no big deal
return nil
}
for _, issuerKey := range issuerKeys {
siteKeys, err := storage.List(ctx, issuerKey, false)
if err != nil {
log.Printf("[ERROR] Listing contents of %s: %v", issuerKey, err)
continue
}
for _, siteKey := range siteKeys {
// if context was cancelled, quit early; otherwise proceed
select {
case <-ctx.Done():
return ctx.Err()
default:
}
siteAssets, err := storage.List(ctx, siteKey, false)
if err != nil {
log.Printf("[ERROR] Listing contents of %s: %v", siteKey, err)
continue
}
for _, assetKey := range siteAssets {
if path.Ext(assetKey) != ".crt" {
continue
}
certFile, err := storage.Load(ctx, assetKey)
if err != nil {
return fmt.Errorf("loading certificate file %s: %v", assetKey, err)
}
block, _ := pem.Decode(certFile)
if block == nil || block.Type != "CERTIFICATE" {
return fmt.Errorf("certificate file %s does not contain PEM-encoded certificate", assetKey)
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return fmt.Errorf("certificate file %s is malformed; error parsing PEM: %v", assetKey, err)
}
if expiredTime := time.Since(cert.NotAfter); expiredTime >= gracePeriod {
log.Printf("[INFO] Certificate %s expired %s ago; cleaning up", assetKey, expiredTime)
baseName := strings.TrimSuffix(assetKey, ".crt")
for _, relatedAsset := range []string{
assetKey,
baseName + ".key",
baseName + ".json",
} {
log.Printf("[INFO] Deleting %s because resource expired", relatedAsset)
err := storage.Delete(ctx, relatedAsset)
if err != nil {
log.Printf("[ERROR] Cleaning up asset related to expired certificate for %s: %s: %v",
baseName, relatedAsset, err)
}
}
}
}
// update listing; if folder is empty, delete it
siteAssets, err = storage.List(ctx, siteKey, false)
if err != nil {
continue
}
if len(siteAssets) == 0 {
log.Printf("[INFO] Deleting %s because key is empty", siteKey)
err := storage.Delete(ctx, siteKey)
if err != nil {
return fmt.Errorf("deleting empty site folder %s: %v", siteKey, err)
}
}
}
}
return nil
}
// forceRenew forcefully renews cert and replaces it in the cache, and returns the new certificate. It is intended
// for use primarily in the case of cert revocation. This MUST NOT be called within a lock on cfg.certCacheMu.
func (cfg *Config) forceRenew(ctx context.Context, logger *zap.Logger, cert Certificate) (Certificate, error) {
if logger != nil {
if cert.ocsp != nil && cert.ocsp.Status == ocsp.Revoked {
logger.Warn("OCSP status for managed certificate is REVOKED; attempting to replace with new certificate",
zap.Strings("identifiers", cert.Names),
zap.Time("expiration", cert.Leaf.NotAfter))
} else {
logger.Warn("forcefully renewing certificate",
zap.Strings("identifiers", cert.Names),
zap.Time("expiration", cert.Leaf.NotAfter))
}
}
renewName := cert.Names[0]
// if revoked for key compromise, we can't be sure whether the storage of
// the key is still safe; however, we KNOW the old key is not safe, and we
// can only hope by the time of revocation that storage has been secured;
// key management is not something we want to get into, but in this case
// it seems prudent to replace the key - and since renewal requires reuse
// of a prior key, we can't do a "renew" to replace the cert if we need a
// new key, so we'll have to do an obtain instead
var obtainInsteadOfRenew bool
if cert.ocsp != nil && cert.ocsp.RevocationReason == acme.ReasonKeyCompromise {
err := cfg.moveCompromisedPrivateKey(ctx, cert, logger)
if err != nil && logger != nil {
logger.Error("could not remove compromised private key from use",
zap.Strings("identifiers", cert.Names),
zap.String("issuer", cert.issuerKey),
zap.Error(err))
}
obtainInsteadOfRenew = true
}
var err error
if obtainInsteadOfRenew {
err = cfg.ObtainCertAsync(ctx, renewName)
} else {
// notice that we force renewal; otherwise, it might see that the
// certificate isn't close to expiring and return, but we really
// need a replacement certificate! see issue #4191
err = cfg.RenewCertAsync(ctx, renewName, true)
}
if err != nil {
if cert.ocsp != nil && cert.ocsp.Status == ocsp.Revoked {
// probably better to not serve a revoked certificate at all
if logger != nil {
logger.Error("unable to obtain new to certificate after OCSP status of REVOKED; removing from cache",
zap.Strings("identifiers", cert.Names),
zap.Error(err))
}
cfg.certCache.mu.Lock()
cfg.certCache.removeCertificate(cert)
cfg.certCache.mu.Unlock()
}
return cert, fmt.Errorf("unable to forcefully get new certificate for %v: %w", cert.Names, err)
}
return cfg.reloadManagedCertificate(ctx, cert)
}
// moveCompromisedPrivateKey moves the private key for cert to a ".compromised" file
// by copying the data to the new file, then deleting the old one.
func (cfg *Config) moveCompromisedPrivateKey(ctx context.Context, cert Certificate, logger *zap.Logger) error {
privKeyStorageKey := StorageKeys.SitePrivateKey(cert.issuerKey, cert.Names[0])
privKeyPEM, err := cfg.Storage.Load(ctx, privKeyStorageKey)
if err != nil {
return err
}
compromisedPrivKeyStorageKey := privKeyStorageKey + ".compromised"
err = cfg.Storage.Store(ctx, compromisedPrivKeyStorageKey, privKeyPEM)
if err != nil {
// better safe than sorry: as a last resort, try deleting the key so it won't be reused
cfg.Storage.Delete(ctx, privKeyStorageKey)
return err
}
err = cfg.Storage.Delete(ctx, privKeyStorageKey)
if err != nil {
return err
}
logger.Info("removed certificate's compromised private key from use",
zap.String("storage_path", compromisedPrivKeyStorageKey),
zap.Strings("identifiers", cert.Names),
zap.String("issuer", cert.issuerKey))
return nil
}
// certShouldBeForceRenewed returns true if cert should be forcefully renewed
// (like if it is revoked according to its OCSP response).
func certShouldBeForceRenewed(cert Certificate) bool {
return cert.managed &&
len(cert.Names) > 0 &&
cert.ocsp != nil &&
cert.ocsp.Status == ocsp.Revoked
}
const (
// DefaultRenewCheckInterval is how often to check certificates for expiration.
// Scans are very lightweight, so this can be semi-frequent. This default should
// be smaller than <Minimum Cert Lifetime>*DefaultRenewalWindowRatio/3, which
// gives certificates plenty of chance to be renewed on time.
DefaultRenewCheckInterval = 10 * time.Minute
// DefaultRenewalWindowRatio is how much of a certificate's lifetime becomes the
// renewal window. The renewal window is the span of time at the end of the
// certificate's validity period in which it should be renewed. A default value
// of ~1/3 is pretty safe and recommended for most certificates.
DefaultRenewalWindowRatio = 1.0 / 3.0
// DefaultOCSPCheckInterval is how often to check if OCSP stapling needs updating.
DefaultOCSPCheckInterval = 1 * time.Hour
)

233
vendor/github.com/caddyserver/certmagic/ocsp.go generated vendored Normal file
View File

@@ -0,0 +1,233 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"bytes"
"context"
"crypto/x509"
"encoding/pem"
"fmt"
"io"
"log"
"net/http"
"time"
"golang.org/x/crypto/ocsp"
)
// stapleOCSP staples OCSP information to cert for hostname name.
// If you have it handy, you should pass in the PEM-encoded certificate
// bundle; otherwise the DER-encoded cert will have to be PEM-encoded.
// If you don't have the PEM blocks already, just pass in nil.
//
// If successful, the OCSP response will be set to cert's ocsp field,
// regardless of the OCSP status. It is only stapled, however, if the
// status is Good.
//
// Errors here are not necessarily fatal, it could just be that the
// certificate doesn't have an issuer URL.
func stapleOCSP(ctx context.Context, ocspConfig OCSPConfig, storage Storage, cert *Certificate, pemBundle []byte) error {
if ocspConfig.DisableStapling {
return nil
}
if pemBundle == nil {
// we need a PEM encoding only for some function calls below
bundle := new(bytes.Buffer)
for _, derBytes := range cert.Certificate.Certificate {
pem.Encode(bundle, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
}
pemBundle = bundle.Bytes()
}
var ocspBytes []byte
var ocspResp *ocsp.Response
var ocspErr error
var gotNewOCSP bool
// First try to load OCSP staple from storage and see if
// we can still use it.
ocspStapleKey := StorageKeys.OCSPStaple(cert, pemBundle)
cachedOCSP, err := storage.Load(ctx, ocspStapleKey)
if err == nil {
resp, err := ocsp.ParseResponse(cachedOCSP, nil)
if err == nil {
if freshOCSP(resp) {
// staple is still fresh; use it
ocspBytes = cachedOCSP
ocspResp = resp
}
} else {
// invalid contents; delete the file
// (we do this independently of the maintenance routine because
// in this case we know for sure this should be a staple file
// because we loaded it by name, whereas the maintenance routine
// just iterates the list of files, even if somehow a non-staple
// file gets in the folder. in this case we are sure it is corrupt.)
err := storage.Delete(ctx, ocspStapleKey)
if err != nil {
log.Printf("[WARNING] Unable to delete invalid OCSP staple file: %v", err)
}
}
}
// If we couldn't get a fresh staple by reading the cache,
// then we need to request it from the OCSP responder
if ocspResp == nil || len(ocspBytes) == 0 {
ocspBytes, ocspResp, ocspErr = getOCSPForCert(ocspConfig, pemBundle)
if ocspErr != nil {
// An error here is not a problem because a certificate may simply
// not contain a link to an OCSP server. But we should log it anyway.
// There's nothing else we can do to get OCSP for this certificate,
// so we can return here with the error.
return fmt.Errorf("no OCSP stapling for %v: %v", cert.Names, ocspErr)
}
gotNewOCSP = true
}
if ocspResp.NextUpdate.After(cert.Leaf.NotAfter) {
// uh oh, this OCSP response expires AFTER the certificate does, that's kinda bogus.
// it was the reason a lot of Symantec-validated sites (not Caddy) went down
// in October 2017. https://twitter.com/mattiasgeniar/status/919432824708648961
return fmt.Errorf("invalid: OCSP response for %v valid after certificate expiration (%s)",
cert.Names, cert.Leaf.NotAfter.Sub(ocspResp.NextUpdate))
}
// Attach the latest OCSP response to the certificate; this is NOT the same
// as stapling it, which we do below only if the status is Good, but it is
// useful to keep with the cert in order to act on it later (like if Revoked).
cert.ocsp = ocspResp
// If the response is good, staple it to the certificate. If the OCSP
// response was not loaded from storage, we persist it for next time.
if ocspResp.Status == ocsp.Good {
cert.Certificate.OCSPStaple = ocspBytes
if gotNewOCSP {
err := storage.Store(ctx, ocspStapleKey, ocspBytes)
if err != nil {
return fmt.Errorf("unable to write OCSP staple file for %v: %v", cert.Names, err)
}
}
}
return nil
}
// getOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response,
// the parsed response, and an error, if any. The returned []byte can be passed directly
// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the
// issued certificate, this function will try to get the issuer certificate from the
// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return
// values are nil, the OCSP status may be assumed OCSPUnknown.
//
// Borrowed from xenolf.
func getOCSPForCert(ocspConfig OCSPConfig, bundle []byte) ([]byte, *ocsp.Response, error) {
// TODO: Perhaps this should be synchronized too, with a Locker?
certificates, err := parseCertsFromPEMBundle(bundle)
if err != nil {
return nil, nil, err
}
// We expect the certificate slice to be ordered downwards the chain.
// SRV CRT -> CA. We need to pull the leaf and issuer certs out of it,
// which should always be the first two certificates. If there's no
// OCSP server listed in the leaf cert, there's nothing to do. And if
// we have only one certificate so far, we need to get the issuer cert.
issuedCert := certificates[0]
if len(issuedCert.OCSPServer) == 0 {
return nil, nil, fmt.Errorf("no OCSP server specified in certificate")
}
// apply override for responder URL
respURL := issuedCert.OCSPServer[0]
if len(ocspConfig.ResponderOverrides) > 0 {
if override, ok := ocspConfig.ResponderOverrides[respURL]; ok {
respURL = override
}
}
if respURL == "" {
return nil, nil, fmt.Errorf("override disables querying OCSP responder: %v", issuedCert.OCSPServer[0])
}
if len(certificates) == 1 {
if len(issuedCert.IssuingCertificateURL) == 0 {
return nil, nil, fmt.Errorf("no URL to issuing certificate")
}
resp, err := http.Get(issuedCert.IssuingCertificateURL[0])
if err != nil {
return nil, nil, fmt.Errorf("getting issuer certificate: %v", err)
}
defer resp.Body.Close()
issuerBytes, err := io.ReadAll(io.LimitReader(resp.Body, 1024*1024))
if err != nil {
return nil, nil, fmt.Errorf("reading issuer certificate: %v", err)
}
issuerCert, err := x509.ParseCertificate(issuerBytes)
if err != nil {
return nil, nil, fmt.Errorf("parsing issuer certificate: %v", err)
}
// insert it into the slice on position 0;
// we want it ordered right SRV CRT -> CA
certificates = append(certificates, issuerCert)
}
issuerCert := certificates[1]
ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil)
if err != nil {
return nil, nil, fmt.Errorf("creating OCSP request: %v", err)
}
reader := bytes.NewReader(ocspReq)
req, err := http.Post(respURL, "application/ocsp-request", reader)
if err != nil {
return nil, nil, fmt.Errorf("making OCSP request: %v", err)
}
defer req.Body.Close()
ocspResBytes, err := io.ReadAll(io.LimitReader(req.Body, 1024*1024))
if err != nil {
return nil, nil, fmt.Errorf("reading OCSP response: %v", err)
}
ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert)
if err != nil {
return nil, nil, fmt.Errorf("parsing OCSP response: %v", err)
}
return ocspResBytes, ocspRes, nil
}
// freshOCSP returns true if resp is still fresh,
// meaning that it is not expedient to get an
// updated response from the OCSP server.
func freshOCSP(resp *ocsp.Response) bool {
nextUpdate := resp.NextUpdate
// If there is an OCSP responder certificate, and it expires before the
// OCSP response, use its expiration date as the end of the OCSP
// response's validity period.
if resp.Certificate != nil && resp.Certificate.NotAfter.Before(nextUpdate) {
nextUpdate = resp.Certificate.NotAfter
}
// start checking OCSP staple about halfway through validity period for good measure
refreshTime := resp.ThisUpdate.Add(nextUpdate.Sub(resp.ThisUpdate) / 2)
return time.Now().Before(refreshTime)
}

243
vendor/github.com/caddyserver/certmagic/ratelimiter.go generated vendored Normal file
View File

@@ -0,0 +1,243 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"log"
"runtime"
"sync"
"time"
)
// NewRateLimiter returns a rate limiter that allows up to maxEvents
// in a sliding window of size window. If maxEvents and window are
// both 0, or if maxEvents is non-zero and window is 0, rate limiting
// is disabled. This function panics if maxEvents is less than 0 or
// if maxEvents is 0 and window is non-zero, which is considered to be
// an invalid configuration, as it would never allow events.
func NewRateLimiter(maxEvents int, window time.Duration) *RingBufferRateLimiter {
if maxEvents < 0 {
panic("maxEvents cannot be less than zero")
}
if maxEvents == 0 && window != 0 {
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
}
rbrl := &RingBufferRateLimiter{
window: window,
ring: make([]time.Time, maxEvents),
started: make(chan struct{}),
stopped: make(chan struct{}),
ticket: make(chan struct{}),
}
go rbrl.loop()
<-rbrl.started // make sure loop is ready to receive before we return
return rbrl
}
// RingBufferRateLimiter uses a ring to enforce rate limits
// consisting of a maximum number of events within a single
// sliding window of a given duration. An empty value is
// not valid; use NewRateLimiter to get one.
type RingBufferRateLimiter struct {
window time.Duration
ring []time.Time // maxEvents == len(ring)
cursor int // always points to the oldest timestamp
mu sync.Mutex // protects ring, cursor, and window
started chan struct{}
stopped chan struct{}
ticket chan struct{}
}
// Stop cleans up r's scheduling goroutine.
func (r *RingBufferRateLimiter) Stop() {
close(r.stopped)
}
func (r *RingBufferRateLimiter) loop() {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic: ring buffer rate limiter: %v\n%s", err, buf)
}
}()
for {
// if we've been stopped, return
select {
case <-r.stopped:
return
default:
}
if len(r.ring) == 0 {
if r.window == 0 {
// rate limiting is disabled; always allow immediately
r.permit()
continue
}
panic("invalid configuration: maxEvents = 0 and window != 0 does not allow any events")
}
// wait until next slot is available or until we've been stopped
r.mu.Lock()
then := r.ring[r.cursor].Add(r.window)
r.mu.Unlock()
waitDuration := time.Until(then)
waitTimer := time.NewTimer(waitDuration)
select {
case <-waitTimer.C:
r.permit()
case <-r.stopped:
waitTimer.Stop()
return
}
}
}
// Allow returns true if the event is allowed to
// happen right now. It does not wait. If the event
// is allowed, a ticket is claimed.
func (r *RingBufferRateLimiter) Allow() bool {
select {
case <-r.ticket:
return true
default:
return false
}
}
// Wait blocks until the event is allowed to occur. It returns an
// error if the context is cancelled.
func (r *RingBufferRateLimiter) Wait(ctx context.Context) error {
select {
case <-ctx.Done():
return context.Canceled
case <-r.ticket:
return nil
}
}
// MaxEvents returns the maximum number of events that
// are allowed within the sliding window.
func (r *RingBufferRateLimiter) MaxEvents() int {
r.mu.Lock()
defer r.mu.Unlock()
return len(r.ring)
}
// SetMaxEvents changes the maximum number of events that are
// allowed in the sliding window. If the new limit is lower,
// the oldest events will be forgotten. If the new limit is
// higher, the window will suddenly have capacity for new
// reservations. It panics if maxEvents is 0 and window size
// is not zero.
func (r *RingBufferRateLimiter) SetMaxEvents(maxEvents int) {
newRing := make([]time.Time, maxEvents)
r.mu.Lock()
defer r.mu.Unlock()
if r.window != 0 && maxEvents == 0 {
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
}
// only make the change if the new limit is different
if maxEvents == len(r.ring) {
return
}
// the new ring may be smaller; fast-forward to the
// oldest timestamp that will be kept in the new
// ring so the oldest ones are forgotten and the
// newest ones will be remembered
sizeDiff := len(r.ring) - maxEvents
for i := 0; i < sizeDiff; i++ {
r.advance()
}
if len(r.ring) > 0 {
// copy timestamps into the new ring until we
// have either copied all of them or have reached
// the capacity of the new ring
startCursor := r.cursor
for i := 0; i < len(newRing); i++ {
newRing[i] = r.ring[r.cursor]
r.advance()
if r.cursor == startCursor {
// new ring is larger than old one;
// "we've come full circle"
break
}
}
}
r.ring = newRing
r.cursor = 0
}
// Window returns the size of the sliding window.
func (r *RingBufferRateLimiter) Window() time.Duration {
r.mu.Lock()
defer r.mu.Unlock()
return r.window
}
// SetWindow changes r's sliding window duration to window.
// Goroutines that are already blocked on a call to Wait()
// will not be affected. It panics if window is non-zero
// but the max event limit is 0.
func (r *RingBufferRateLimiter) SetWindow(window time.Duration) {
r.mu.Lock()
defer r.mu.Unlock()
if window != 0 && len(r.ring) == 0 {
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
}
r.window = window
}
// permit allows one event through the throttle. This method
// blocks until a goroutine is waiting for a ticket or until
// the rate limiter is stopped.
func (r *RingBufferRateLimiter) permit() {
for {
select {
case r.started <- struct{}{}:
// notify parent goroutine that we've started; should
// only happen once, before constructor returns
continue
case <-r.stopped:
return
case r.ticket <- struct{}{}:
r.mu.Lock()
defer r.mu.Unlock()
if len(r.ring) > 0 {
r.ring[r.cursor] = time.Now()
r.advance()
}
return
}
}
}
// advance moves the cursor to the next position.
// It is NOT safe for concurrent use, so it must
// be called inside a lock on r.mu.
func (r *RingBufferRateLimiter) advance() {
r.cursor++
if r.cursor >= len(r.ring) {
r.cursor = 0
}
}

730
vendor/github.com/caddyserver/certmagic/solvers.go generated vendored Normal file
View File

@@ -0,0 +1,730 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"log"
"net"
"net/http"
"path"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/libdns/libdns"
"github.com/mholt/acmez"
"github.com/mholt/acmez/acme"
"github.com/miekg/dns"
)
// httpSolver solves the HTTP challenge. It must be
// associated with a config and an address to use
// for solving the challenge. If multiple httpSolvers
// are initialized concurrently, the first one to
// begin will start the server, and the last one to
// finish will stop the server. This solver must be
// wrapped by a distributedSolver to work properly,
// because the only way the HTTP challenge handler
// can access the keyAuth material is by loading it
// from storage, which is done by distributedSolver.
type httpSolver struct {
closed int32 // accessed atomically
acmeIssuer *ACMEIssuer
address string
}
// Present starts an HTTP server if none is already listening on s.address.
func (s *httpSolver) Present(ctx context.Context, _ acme.Challenge) error {
solversMu.Lock()
defer solversMu.Unlock()
si := getSolverInfo(s.address)
si.count++
if si.listener != nil {
return nil // already be served by us
}
// notice the unusual error handling here; we
// only continue to start a challenge server if
// we got a listener; in all other cases return
ln, err := robustTryListen(s.address)
if ln == nil {
return err
}
// successfully bound socket, so save listener and start key auth HTTP server
si.listener = ln
go s.serve(ctx, si)
return nil
}
// serve is an HTTP server that serves only HTTP challenge responses.
func (s *httpSolver) serve(ctx context.Context, si *solverInfo) {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic: http solver server: %v\n%s", err, buf)
}
}()
defer close(si.done)
httpServer := &http.Server{
Handler: s.acmeIssuer.HTTPChallengeHandler(http.NewServeMux()),
BaseContext: func(listener net.Listener) context.Context { return ctx },
}
httpServer.SetKeepAlivesEnabled(false)
err := httpServer.Serve(si.listener)
if err != nil && atomic.LoadInt32(&s.closed) != 1 {
log.Printf("[ERROR] key auth HTTP server: %v", err)
}
}
// CleanUp cleans up the HTTP server if it is the last one to finish.
func (s *httpSolver) CleanUp(ctx context.Context, _ acme.Challenge) error {
solversMu.Lock()
defer solversMu.Unlock()
si := getSolverInfo(s.address)
si.count--
if si.count == 0 {
// last one out turns off the lights
atomic.StoreInt32(&s.closed, 1)
if si.listener != nil {
si.listener.Close()
<-si.done
}
delete(solvers, s.address)
}
return nil
}
// tlsALPNSolver is a type that can solve TLS-ALPN challenges.
// It must have an associated config and address on which to
// serve the challenge.
type tlsALPNSolver struct {
config *Config
address string
}
// Present adds the certificate to the certificate cache and, if
// needed, starts a TLS server for answering TLS-ALPN challenges.
func (s *tlsALPNSolver) Present(ctx context.Context, chal acme.Challenge) error {
// we pre-generate the certificate for efficiency with multi-perspective
// validation, so it only has to be done once (at least, by this instance;
// distributed solving does not have that luxury, oh well) - update the
// challenge data in memory to be the generated certificate
cert, err := acmez.TLSALPN01ChallengeCert(chal)
if err != nil {
return err
}
key := challengeKey(chal)
activeChallengesMu.Lock()
chalData := activeChallenges[key]
chalData.data = cert
activeChallenges[key] = chalData
activeChallengesMu.Unlock()
// the rest of this function increments the
// challenge count for the solver at this
// listener address, and if necessary, starts
// a simple TLS server
solversMu.Lock()
defer solversMu.Unlock()
si := getSolverInfo(s.address)
si.count++
if si.listener != nil {
return nil // already be served by us
}
// notice the unusual error handling here; we
// only continue to start a challenge server if
// we got a listener; in all other cases return
ln, err := robustTryListen(s.address)
if ln == nil {
return err
}
// we were able to bind the socket, so make it into a TLS
// listener, store it with the solverInfo, and start the
// challenge server
si.listener = tls.NewListener(ln, s.config.TLSConfig())
go func() {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic: tls-alpn solver server: %v\n%s", err, buf)
}
}()
defer close(si.done)
for {
conn, err := si.listener.Accept()
if err != nil {
if atomic.LoadInt32(&si.closed) == 1 {
return
}
log.Printf("[ERROR] TLS-ALPN challenge server: accept: %v", err)
continue
}
go s.handleConn(conn)
}
}()
return nil
}
// handleConn completes the TLS handshake and then closes conn.
func (*tlsALPNSolver) handleConn(conn net.Conn) {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic: tls-alpn solver handler: %v\n%s", err, buf)
}
}()
defer conn.Close()
tlsConn, ok := conn.(*tls.Conn)
if !ok {
log.Printf("[ERROR] TLS-ALPN challenge server: expected tls.Conn but got %T: %#v", conn, conn)
return
}
err := tlsConn.Handshake()
if err != nil {
log.Printf("[ERROR] TLS-ALPN challenge server: handshake: %v", err)
return
}
}
// CleanUp removes the challenge certificate from the cache, and if
// it is the last one to finish, stops the TLS server.
func (s *tlsALPNSolver) CleanUp(ctx context.Context, chal acme.Challenge) error {
solversMu.Lock()
defer solversMu.Unlock()
si := getSolverInfo(s.address)
si.count--
if si.count == 0 {
// last one out turns off the lights
atomic.StoreInt32(&si.closed, 1)
if si.listener != nil {
si.listener.Close()
<-si.done
}
delete(solvers, s.address)
}
return nil
}
// DNS01Solver is a type that makes libdns providers usable as ACME dns-01
// challenge solvers. See https://github.com/libdns/libdns
//
// Note that challenges may be solved concurrently by some clients (such as
// acmez, which CertMagic uses), meaning that multiple TXT records may be
// created in a DNS zone simultaneously, and in some cases distinct TXT records
// may have the same name. For example, solving challenges for both example.com
// and *.example.com create a TXT record named _acme_challenge.example.com,
// but with different tokens as their values. This solver distinguishes
// between different records with the same name by looking at their values.
// DNS provider APIs and implementations of the libdns interfaces must also
// support multiple same-named TXT records.
type DNS01Solver struct {
// The implementation that interacts with the DNS
// provider to set or delete records. (REQUIRED)
DNSProvider ACMEDNSProvider
// The TTL for the temporary challenge records.
TTL time.Duration
// How long to wait before starting propagation checks.
// Default: 0 (no wait).
PropagationDelay time.Duration
// Maximum time to wait for temporary DNS record to appear.
// Set to -1 to disable propagation checks.
// Default: 2 minutes.
PropagationTimeout time.Duration
// Preferred DNS resolver(s) to use when doing DNS lookups.
Resolvers []string
// Override the domain to set the TXT record on. This is
// to delegate the challenge to a different domain. Note
// that the solver doesn't follow CNAME/NS record.
OverrideDomain string
// Remember DNS records while challenges are active; i.e.
// records we have presented and not yet cleaned up.
// This lets us clean them up quickly and efficiently.
// Keyed by domain name (specifically the ACME DNS name).
// The map value is a slice because there can be multiple
// concurrent challenges for different domains that have
// the same ACME DNS name, for example: example.com and
// *.example.com. We distinguish individual memories by
// the value of their TXT records, which should contain
// unique challenge tokens.
// See https://github.com/caddyserver/caddy/issues/3474.
txtRecords map[string][]dnsPresentMemory
txtRecordsMu sync.Mutex
}
// Present creates the DNS TXT record for the given ACME challenge.
func (s *DNS01Solver) Present(ctx context.Context, challenge acme.Challenge) error {
dnsName := challenge.DNS01TXTRecordName()
if s.OverrideDomain != "" {
dnsName = s.OverrideDomain
}
keyAuth := challenge.DNS01KeyAuthorization()
zone, err := findZoneByFQDN(dnsName, recursiveNameservers(s.Resolvers))
if err != nil {
return fmt.Errorf("could not determine zone for domain %q: %v", dnsName, err)
}
rec := libdns.Record{
Type: "TXT",
Name: libdns.RelativeName(dnsName+".", zone),
Value: keyAuth,
TTL: s.TTL,
}
results, err := s.DNSProvider.AppendRecords(ctx, zone, []libdns.Record{rec})
if err != nil {
return fmt.Errorf("adding temporary record for zone %q: %w", zone, err)
}
if len(results) != 1 {
return fmt.Errorf("expected one record, got %d: %v", len(results), results)
}
// remember the record and zone we got so we can clean up more efficiently
s.saveDNSPresentMemory(dnsPresentMemory{
dnsZone: zone,
dnsName: dnsName,
rec: results[0],
})
return nil
}
// Wait blocks until the TXT record created in Present() appears in
// authoritative lookups, i.e. until it has propagated, or until
// timeout, whichever is first.
func (s *DNS01Solver) Wait(ctx context.Context, challenge acme.Challenge) error {
// if configured to, pause before doing propagation checks
// (even if they are disabled, the wait might be desirable on its own)
if s.PropagationDelay > 0 {
select {
case <-time.After(s.PropagationDelay):
case <-ctx.Done():
return ctx.Err()
}
}
// skip propagation checks if configured to do so
if s.PropagationTimeout == -1 {
return nil
}
// prepare for the checks by determining what to look for
dnsName := challenge.DNS01TXTRecordName()
if s.OverrideDomain != "" {
dnsName = s.OverrideDomain
}
keyAuth := challenge.DNS01KeyAuthorization()
// timings
timeout := s.PropagationTimeout
if timeout == 0 {
timeout = 2 * time.Minute
}
const interval = 2 * time.Second
// how we'll do the checks
resolvers := recursiveNameservers(s.Resolvers)
var err error
start := time.Now()
for time.Since(start) < timeout {
select {
case <-time.After(interval):
case <-ctx.Done():
return ctx.Err()
}
var ready bool
ready, err = checkDNSPropagation(dnsName, keyAuth, resolvers)
if err != nil {
return fmt.Errorf("checking DNS propagation of %q: %w", dnsName, err)
}
if ready {
return nil
}
}
return fmt.Errorf("timed out waiting for record to fully propagate; verify DNS provider configuration is correct - last error: %v", err)
}
// CleanUp deletes the DNS TXT record created in Present().
func (s *DNS01Solver) CleanUp(ctx context.Context, challenge acme.Challenge) error {
dnsName := challenge.DNS01TXTRecordName()
if s.OverrideDomain != "" {
dnsName = s.OverrideDomain
}
keyAuth := challenge.DNS01KeyAuthorization()
// always forget about the record so we don't leak memory
defer s.deleteDNSPresentMemory(dnsName, keyAuth)
// recall the record we created and zone we looked up
memory, err := s.getDNSPresentMemory(dnsName, keyAuth)
if err != nil {
return err
}
// clean up the record
_, err = s.DNSProvider.DeleteRecords(ctx, memory.dnsZone, []libdns.Record{memory.rec})
if err != nil {
return fmt.Errorf("deleting temporary record for name %q in zone %q: %w", memory.dnsName, memory.dnsZone, err)
}
return nil
}
type dnsPresentMemory struct {
dnsZone string
dnsName string
rec libdns.Record
}
func (s *DNS01Solver) saveDNSPresentMemory(mem dnsPresentMemory) {
s.txtRecordsMu.Lock()
if s.txtRecords == nil {
s.txtRecords = make(map[string][]dnsPresentMemory)
}
s.txtRecords[mem.dnsName] = append(s.txtRecords[mem.dnsName], mem)
s.txtRecordsMu.Unlock()
}
func (s *DNS01Solver) getDNSPresentMemory(dnsName, keyAuth string) (dnsPresentMemory, error) {
s.txtRecordsMu.Lock()
defer s.txtRecordsMu.Unlock()
var memory dnsPresentMemory
for _, mem := range s.txtRecords[dnsName] {
if mem.rec.Value == keyAuth {
memory = mem
break
}
}
if memory.rec.Name == "" {
return dnsPresentMemory{}, fmt.Errorf("no memory of presenting a DNS record for %q (usually OK if presenting also failed)", dnsName)
}
return memory, nil
}
func (s *DNS01Solver) deleteDNSPresentMemory(dnsName, keyAuth string) {
s.txtRecordsMu.Lock()
defer s.txtRecordsMu.Unlock()
for i, mem := range s.txtRecords[dnsName] {
if mem.rec.Value == keyAuth {
s.txtRecords[dnsName] = append(s.txtRecords[dnsName][:i], s.txtRecords[dnsName][i+1:]...)
return
}
}
}
// ACMEDNSProvider defines the set of operations required for
// ACME challenges. A DNS provider must be able to append and
// delete records in order to solve ACME challenges. Find one
// you can use at https://github.com/libdns. If your provider
// isn't implemented yet, feel free to contribute!
type ACMEDNSProvider interface {
libdns.RecordAppender
libdns.RecordDeleter
}
// distributedSolver allows the ACME HTTP-01 and TLS-ALPN challenges
// to be solved by an instance other than the one which initiated it.
// This is useful behind load balancers or in other cluster/fleet
// configurations. The only requirement is that the instance which
// initiates the challenge shares the same storage and locker with
// the others in the cluster. The storage backing the certificate
// cache in distributedSolver.config is crucial.
//
// Obviously, the instance which completes the challenge must be
// serving on the HTTPChallengePort for the HTTP-01 challenge or the
// TLSALPNChallengePort for the TLS-ALPN-01 challenge (or have all
// the packets port-forwarded) to receive and handle the request. The
// server which receives the challenge must handle it by checking to
// see if the challenge token exists in storage, and if so, decode it
// and use it to serve up the correct response. HTTPChallengeHandler
// in this package as well as the GetCertificate method implemented
// by a Config support and even require this behavior.
//
// In short: the only two requirements for cluster operation are
// sharing sync and storage, and using the facilities provided by
// this package for solving the challenges.
type distributedSolver struct {
// The storage backing the distributed solver. It must be
// the same storage configuration as what is solving the
// challenge in order to be effective.
storage Storage
// The storage key prefix, associated with the issuer
// that is solving the challenge.
storageKeyIssuerPrefix string
// Since the distributedSolver is only a
// wrapper over an actual solver, place
// the actual solver here.
solver acmez.Solver
}
// Present invokes the underlying solver's Present method
// and also stores domain, token, and keyAuth to the storage
// backing the certificate cache of dhs.acmeIssuer.
func (dhs distributedSolver) Present(ctx context.Context, chal acme.Challenge) error {
infoBytes, err := json.Marshal(chal)
if err != nil {
return err
}
err = dhs.storage.Store(ctx, dhs.challengeTokensKey(challengeKey(chal)), infoBytes)
if err != nil {
return err
}
err = dhs.solver.Present(ctx, chal)
if err != nil {
return fmt.Errorf("presenting with embedded solver: %v", err)
}
return nil
}
// Wait wraps the underlying solver's Wait() method, if any. Implements acmez.Waiter.
func (dhs distributedSolver) Wait(ctx context.Context, challenge acme.Challenge) error {
if waiter, ok := dhs.solver.(acmez.Waiter); ok {
return waiter.Wait(ctx, challenge)
}
return nil
}
// CleanUp invokes the underlying solver's CleanUp method
// and also cleans up any assets saved to storage.
func (dhs distributedSolver) CleanUp(ctx context.Context, chal acme.Challenge) error {
err := dhs.storage.Delete(ctx, dhs.challengeTokensKey(challengeKey(chal)))
if err != nil {
return err
}
err = dhs.solver.CleanUp(ctx, chal)
if err != nil {
return fmt.Errorf("cleaning up embedded provider: %v", err)
}
return nil
}
// challengeTokensPrefix returns the key prefix for challenge info.
func (dhs distributedSolver) challengeTokensPrefix() string {
return path.Join(dhs.storageKeyIssuerPrefix, "challenge_tokens")
}
// challengeTokensKey returns the key to use to store and access
// challenge info for domain.
func (dhs distributedSolver) challengeTokensKey(domain string) string {
return path.Join(dhs.challengeTokensPrefix(), StorageKeys.Safe(domain)+".json")
}
// solverInfo associates a listener with the
// number of challenges currently using it.
type solverInfo struct {
closed int32 // accessed atomically
count int
listener net.Listener
done chan struct{} // used to signal when our own solver server is done
}
// getSolverInfo gets a valid solverInfo struct for address.
func getSolverInfo(address string) *solverInfo {
si, ok := solvers[address]
if !ok {
si = &solverInfo{done: make(chan struct{})}
solvers[address] = si
}
return si
}
// robustTryListen calls net.Listen for a TCP socket at addr.
// This function may return both a nil listener and a nil error!
// If it was able to bind the socket, it returns the listener
// and no error. If it wasn't able to bind the socket because
// the socket is already in use, then it returns a nil listener
// and nil error. If it had any other error, it returns the
// error. The intended error handling logic for this function
// is to proceed if the returned listener is not nil; otherwise
// return err (which may also be nil). In other words, this
// function ignores errors if the socket is already in use,
// which is useful for our challenge servers, where we assume
// that whatever is already listening can solve the challenges.
func robustTryListen(addr string) (net.Listener, error) {
var listenErr error
for i := 0; i < 2; i++ {
// doesn't hurt to sleep briefly before the second
// attempt in case the OS has timing issues
if i > 0 {
time.Sleep(100 * time.Millisecond)
}
// if we can bind the socket right away, great!
var ln net.Listener
ln, listenErr = net.Listen("tcp", addr)
if listenErr == nil {
return ln, nil
}
// if it failed just because the socket is already in use, we
// have no choice but to assume that whatever is using the socket
// can answer the challenge already, so we ignore the error
connectErr := dialTCPSocket(addr)
if connectErr == nil {
return nil, nil
}
// hmm, we couldn't connect to the socket, so something else must
// be wrong, right? wrong!! we've had reports across multiple OSes
// now that sometimes connections fail even though the OS told us
// that the address was already in use; either the listener is
// fluctuating between open and closed very, very quickly, or the
// OS is inconsistent and contradicting itself; I have been unable
// to reproduce this, so I'm now resorting to hard-coding substring
// matching in error messages as a really hacky and unreliable
// safeguard against this, until we can idenify exactly what was
// happening; see the following threads for more info:
// https://caddy.community/t/caddy-retry-error/7317
// https://caddy.community/t/v2-upgrade-to-caddy2-failing-with-errors/7423
if strings.Contains(listenErr.Error(), "address already in use") ||
strings.Contains(listenErr.Error(), "one usage of each socket address") {
log.Printf("[WARNING] OS reports a contradiction: %v - but we cannot connect to it, with this error: %v; continuing anyway 🤞 (I don't know what causes this... if you do, please help?)", listenErr, connectErr)
return nil, nil
}
}
return nil, fmt.Errorf("could not start listener for challenge server at %s: %v", addr, listenErr)
}
// dialTCPSocket connects to a TCP address just for the sake of
// seeing if it is open. It returns a nil error if a TCP connection
// can successfully be made to addr within a short timeout.
func dialTCPSocket(addr string) error {
conn, err := net.DialTimeout("tcp", addr, 250*time.Millisecond)
if err == nil {
conn.Close()
}
return err
}
// GetACMEChallenge returns an active ACME challenge for the given identifier,
// or false if no active challenge for that identifier is known.
func GetACMEChallenge(identifier string) (Challenge, bool) {
activeChallengesMu.Lock()
chalData, ok := activeChallenges[identifier]
activeChallengesMu.Unlock()
return chalData, ok
}
// The active challenge solvers, keyed by listener address,
// and protected by a mutex. Note that the creation of
// solver listeners and the incrementing of their counts
// are atomic operations guarded by this mutex.
var (
solvers = make(map[string]*solverInfo)
solversMu sync.Mutex
)
// activeChallenges holds information about all known, currently-active
// ACME challenges, keyed by identifier. CertMagic guarantees that
// challenges for the same identifier do not overlap, by its locking
// mechanisms; thus if a challenge comes in for a certain identifier,
// we can be confident that if this process initiated the challenge,
// the correct information to solve it is in this map. (It may have
// alternatively been initiated by another instance in a cluster, in
// which case the distributed solver will take care of that.)
var (
activeChallenges = make(map[string]Challenge)
activeChallengesMu sync.Mutex
)
// Challenge is an ACME challenge, but optionally paired with
// data that can make it easier or more efficient to solve.
type Challenge struct {
acme.Challenge
data interface{}
}
// challengeKey returns the map key for a given challenge; it is the identifier
// unless it is an IP address using the TLS-ALPN challenge.
func challengeKey(chal acme.Challenge) string {
if chal.Type == acme.ChallengeTypeTLSALPN01 && chal.Identifier.Type == "ip" {
reversed, err := dns.ReverseAddr(chal.Identifier.Value)
if err == nil {
return reversed[:len(reversed)-1] // strip off '.'
}
}
return chal.Identifier.Value
}
// solverWrapper should be used to wrap all challenge solvers so that
// we can add the challenge info to memory; this makes challenges globally
// solvable by a single HTTP or TLS server even if multiple servers with
// different configurations/scopes need to get certificates.
type solverWrapper struct{ acmez.Solver }
func (sw solverWrapper) Present(ctx context.Context, chal acme.Challenge) error {
activeChallengesMu.Lock()
activeChallenges[challengeKey(chal)] = Challenge{Challenge: chal}
activeChallengesMu.Unlock()
return sw.Solver.Present(ctx, chal)
}
func (sw solverWrapper) Wait(ctx context.Context, chal acme.Challenge) error {
if waiter, ok := sw.Solver.(acmez.Waiter); ok {
return waiter.Wait(ctx, chal)
}
return nil
}
func (sw solverWrapper) CleanUp(ctx context.Context, chal acme.Challenge) error {
activeChallengesMu.Lock()
delete(activeChallenges, challengeKey(chal))
activeChallengesMu.Unlock()
return sw.Solver.CleanUp(ctx, chal)
}
// Interface guards
var (
_ acmez.Solver = (*solverWrapper)(nil)
_ acmez.Waiter = (*solverWrapper)(nil)
_ acmez.Waiter = (*distributedSolver)(nil)
)

280
vendor/github.com/caddyserver/certmagic/storage.go generated vendored Normal file
View File

@@ -0,0 +1,280 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"path"
"regexp"
"strings"
"sync"
"time"
"go.uber.org/zap"
)
// Storage is a type that implements a key-value store.
// Keys are prefix-based, with forward slash '/' as separators
// and without a leading slash.
//
// Processes running in a cluster will wish to use the
// same Storage value (its implementation and configuration)
// in order to share certificates and other TLS resources
// with the cluster.
//
// The Load, Delete, List, and Stat methods should return
// fs.ErrNotExist if the key does not exist.
//
// Implementations of Storage must be safe for concurrent use
// and honor context cancellations.
type Storage interface {
// Locker provides atomic synchronization
// operations, making Storage safe to share.
Locker
// Store puts value at key.
Store(ctx context.Context, key string, value []byte) error
// Load retrieves the value at key.
Load(ctx context.Context, key string) ([]byte, error)
// Delete deletes key. An error should be
// returned only if the key still exists
// when the method returns.
Delete(ctx context.Context, key string) error
// Exists returns true if the key exists
// and there was no error checking.
Exists(ctx context.Context, key string) bool
// List returns all keys that match prefix.
// If recursive is true, non-terminal keys
// will be enumerated (i.e. "directories"
// should be walked); otherwise, only keys
// prefixed exactly by prefix will be listed.
List(ctx context.Context, prefix string, recursive bool) ([]string, error)
// Stat returns information about key.
Stat(ctx context.Context, key string) (KeyInfo, error)
}
// Locker facilitates synchronization of certificate tasks across
// machines and networks.
type Locker interface {
// Lock acquires the lock for key, blocking until the lock
// can be obtained or an error is returned. Note that, even
// after acquiring a lock, an idempotent operation may have
// already been performed by another process that acquired
// the lock before - so always check to make sure idempotent
// operations still need to be performed after acquiring the
// lock.
//
// The actual implementation of obtaining of a lock must be
// an atomic operation so that multiple Lock calls at the
// same time always results in only one caller receiving the
// lock at any given time.
//
// To prevent deadlocks, all implementations (where this concern
// is relevant) should put a reasonable expiration on the lock in
// case Unlock is unable to be called due to some sort of network
// failure or system crash. Additionally, implementations should
// honor context cancellation as much as possible (in case the
// caller wishes to give up and free resources before the lock
// can be obtained).
Lock(ctx context.Context, key string) error
// Unlock releases the lock for key. This method must ONLY be
// called after a successful call to Lock, and only after the
// critical section is finished, even if it errored or timed
// out. Unlock cleans up any resources allocated during Lock.
Unlock(ctx context.Context, key string) error
}
// KeyInfo holds information about a key in storage.
// Key and IsTerminal are required; Modified and Size
// are optional if the storage implementation is not
// able to get that information. Setting them will
// make certain operations more consistent or
// predictable, but it is not crucial to basic
// functionality.
type KeyInfo struct {
Key string
Modified time.Time
Size int64
IsTerminal bool // false for keys that only contain other keys (like directories)
}
// storeTx stores all the values or none at all.
func storeTx(ctx context.Context, s Storage, all []keyValue) error {
for i, kv := range all {
err := s.Store(ctx, kv.key, kv.value)
if err != nil {
for j := i - 1; j >= 0; j-- {
s.Delete(ctx, all[j].key)
}
return err
}
}
return nil
}
// keyValue pairs a key and a value.
type keyValue struct {
key string
value []byte
}
// KeyBuilder provides a namespace for methods that
// build keys and key prefixes, for addressing items
// in a Storage implementation.
type KeyBuilder struct{}
// CertsPrefix returns the storage key prefix for
// the given certificate issuer.
func (keys KeyBuilder) CertsPrefix(issuerKey string) string {
return path.Join(prefixCerts, keys.Safe(issuerKey))
}
// CertsSitePrefix returns a key prefix for items associated with
// the site given by domain using the given issuer key.
func (keys KeyBuilder) CertsSitePrefix(issuerKey, domain string) string {
return path.Join(keys.CertsPrefix(issuerKey), keys.Safe(domain))
}
// SiteCert returns the path to the certificate file for domain
// that is associated with the issuer with the given issuerKey.
func (keys KeyBuilder) SiteCert(issuerKey, domain string) string {
safeDomain := keys.Safe(domain)
return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".crt")
}
// SitePrivateKey returns the path to the private key file for domain
// that is associated with the certificate from the given issuer with
// the given issuerKey.
func (keys KeyBuilder) SitePrivateKey(issuerKey, domain string) string {
safeDomain := keys.Safe(domain)
return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".key")
}
// SiteMeta returns the path to the metadata file for domain that
// is associated with the certificate from the given issuer with
// the given issuerKey.
func (keys KeyBuilder) SiteMeta(issuerKey, domain string) string {
safeDomain := keys.Safe(domain)
return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".json")
}
// OCSPStaple returns a key for the OCSP staple associated
// with the given certificate. If you have the PEM bundle
// handy, pass that in to save an extra encoding step.
func (keys KeyBuilder) OCSPStaple(cert *Certificate, pemBundle []byte) string {
var ocspFileName string
if len(cert.Names) > 0 {
firstName := keys.Safe(cert.Names[0])
ocspFileName = firstName + "-"
}
ocspFileName += fastHash(pemBundle)
return path.Join(prefixOCSP, ocspFileName)
}
// Safe standardizes and sanitizes str for use as
// a single component of a storage key. This method
// is idempotent.
func (keys KeyBuilder) Safe(str string) string {
str = strings.ToLower(str)
str = strings.TrimSpace(str)
// replace a few specific characters
repl := strings.NewReplacer(
" ", "_",
"+", "_plus_",
"*", "wildcard_",
":", "-",
"..", "", // prevent directory traversal (regex allows single dots)
)
str = repl.Replace(str)
// finally remove all non-word characters
return safeKeyRE.ReplaceAllLiteralString(str, "")
}
// CleanUpOwnLocks immediately cleans up all
// current locks obtained by this process. Since
// this does not cancel the operations that
// the locks are synchronizing, this should be
// called only immediately before process exit.
// Errors are only reported if a logger is given.
func CleanUpOwnLocks(ctx context.Context, logger *zap.Logger) {
locksMu.Lock()
defer locksMu.Unlock()
for lockKey, storage := range locks {
err := storage.Unlock(ctx, lockKey)
if err == nil {
delete(locks, lockKey)
} else if logger != nil {
logger.Error("unable to clean up lock in storage backend",
zap.Any("storage", storage),
zap.String("lock_key", lockKey),
zap.Error(err),
)
}
}
}
func acquireLock(ctx context.Context, storage Storage, lockKey string) error {
err := storage.Lock(ctx, lockKey)
if err == nil {
locksMu.Lock()
locks[lockKey] = storage
locksMu.Unlock()
}
return err
}
func releaseLock(ctx context.Context, storage Storage, lockKey string) error {
err := storage.Unlock(ctx, lockKey)
if err == nil {
locksMu.Lock()
delete(locks, lockKey)
locksMu.Unlock()
}
return err
}
// locks stores a reference to all the current
// locks obtained by this process.
var locks = make(map[string]Storage)
var locksMu sync.Mutex
// StorageKeys provides methods for accessing
// keys and key prefixes for items in a Storage.
// Typically, you will not need to use this
// because accessing storage is abstracted away
// for most cases. Only use this if you need to
// directly access TLS assets in your application.
var StorageKeys KeyBuilder
const (
prefixCerts = "certificates"
prefixOCSP = "ocsp"
)
// safeKeyRE matches any undesirable characters in storage keys.
// Note that this allows dots, so you'll have to strip ".." manually.
var safeKeyRE = regexp.MustCompile(`[^\w@.-]`)
// defaultFileStorage is a convenient, default storage
// implementation using the local file system.
var defaultFileStorage = &FileStorage{Path: dataDir()}

24
vendor/github.com/klauspost/cpuid/v2/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

74
vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml generated vendored Normal file
View File

@@ -0,0 +1,74 @@
# This is an example goreleaser.yaml file with some sane defaults.
# Make sure to check the documentation at http://goreleaser.com
builds:
-
id: "cpuid"
binary: cpuid
main: ./cmd/cpuid/main.go
env:
- CGO_ENABLED=0
flags:
- -ldflags=-s -w
goos:
- aix
- linux
- freebsd
- netbsd
- windows
- darwin
goarch:
- 386
- amd64
- arm64
goarm:
- 7
archives:
-
id: cpuid
name_template: "cpuid-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
replacements:
aix: AIX
darwin: OSX
linux: Linux
windows: Windows
386: i386
amd64: x86_64
freebsd: FreeBSD
netbsd: NetBSD
format_overrides:
- goos: windows
format: zip
files:
- LICENSE
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
exclude:
- '^doc:'
- '^docs:'
- '^test:'
- '^tests:'
- '^Update\sREADME.md'
nfpms:
-
file_name_template: "cpuid_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
vendor: Klaus Post
homepage: https://github.com/klauspost/cpuid
maintainer: Klaus Post <klauspost@gmail.com>
description: CPUID Tool
license: BSD 3-Clause
formats:
- deb
- rpm
replacements:
darwin: Darwin
linux: Linux
freebsd: FreeBSD
amd64: x86_64

35
vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt generated vendored Normal file
View File

@@ -0,0 +1,35 @@
Developer Certificate of Origin
Version 1.1
Copyright (C) 2015- Klaus Post & Contributors.
Email: klauspost@gmail.com
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.

22
vendor/github.com/klauspost/cpuid/v2/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Klaus Post
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

137
vendor/github.com/klauspost/cpuid/v2/README.md generated vendored Normal file
View File

@@ -0,0 +1,137 @@
# cpuid
Package cpuid provides information about the CPU running the current program.
CPU features are detected on startup, and kept for fast access through the life of the application.
Currently x86 / x64 (AMD64/i386) and ARM (ARM64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
You can access the CPU information by accessing the shared CPU variable of the cpuid library.
Package home: https://github.com/klauspost/cpuid
[![PkgGoDev](https://pkg.go.dev/badge/github.com/klauspost/cpuid)](https://pkg.go.dev/github.com/klauspost/cpuid/v2)
[![Build Status][3]][4]
[3]: https://travis-ci.org/klauspost/cpuid.svg?branch=master
[4]: https://travis-ci.org/klauspost/cpuid
## installing
`go get -u github.com/klauspost/cpuid/v2` using modules.
Drop `v2` for others.
## example
```Go
package main
import (
"fmt"
"strings"
. "github.com/klauspost/cpuid/v2"
)
func main() {
// Print basic CPU information:
fmt.Println("Name:", CPU.BrandName)
fmt.Println("PhysicalCores:", CPU.PhysicalCores)
fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore)
fmt.Println("LogicalCores:", CPU.LogicalCores)
fmt.Println("Family", CPU.Family, "Model:", CPU.Model, "Vendor ID:", CPU.VendorID)
fmt.Println("Features:", strings.Join(CPU.FeatureSet(), ","))
fmt.Println("Cacheline bytes:", CPU.CacheLine)
fmt.Println("L1 Data Cache:", CPU.Cache.L1D, "bytes")
fmt.Println("L1 Instruction Cache:", CPU.Cache.L1I, "bytes")
fmt.Println("L2 Cache:", CPU.Cache.L2, "bytes")
fmt.Println("L3 Cache:", CPU.Cache.L3, "bytes")
fmt.Println("Frequency", CPU.Hz, "hz")
// Test if we have these specific features:
if CPU.Supports(SSE, SSE2) {
fmt.Println("We have Streaming SIMD 2 Extensions")
}
}
```
Sample output:
```
>go run main.go
Name: AMD Ryzen 9 3950X 16-Core Processor
PhysicalCores: 16
ThreadsPerCore: 2
LogicalCores: 32
Family 23 Model: 113 Vendor ID: AMD
Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CMOV,CX16,F16C,FMA3,HTT,HYPERVISOR,LZCNT,MMX,MMXEXT,NX,POPCNT,RDRAND,RDSEED,RDTSCP,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3
Cacheline bytes: 64
L1 Data Cache: 32768 bytes
L1 Instruction Cache: 32768 bytes
L2 Cache: 524288 bytes
L3 Cache: 16777216 bytes
Frequency 0 hz
We have Streaming SIMD 2 Extensions
```
# usage
The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features.
A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler.
Note that for some cpu/os combinations some features will not be detected.
`amd64` has rather good support and should work reliably on all platforms.
Note that hypervisors may not pass through all CPU features.
## arm64 feature detection
Not all operating systems provide ARM features directly
and there is no safe way to do so for the rest.
Currently `arm64/linux` and `arm64/freebsd` should be quite reliable.
`arm64/darwin` adds features expected from the M1 processor, but a lot remains undetected.
A `DetectARM()` can be used if you are able to control your deployment,
it will detect CPU features, but may crash if the OS doesn't intercept the calls.
A `-cpu.arm` flag for detecting unsafe ARM features can be added. See below.
Note that currently only features are detected on ARM,
no additional information is currently available.
## flags
It is possible to add flags that affects cpu detection.
For this the `Flags()` command is provided.
This must be called *before* `flag.Parse()` AND after the flags have been parsed `Detect()` must be called.
This means that any detection used in `init()` functions will not contain these flags.
Example:
```Go
package main
import (
"flag"
"fmt"
"strings"
"github.com/klauspost/cpuid/v2"
)
func main() {
cpuid.Flags()
flag.Parse()
cpuid.Detect()
// Test if we have these specific features:
if cpuid.CPU.Supports(cpuid.SSE, cpuid.SSE2) {
fmt.Println("We have Streaming SIMD 2 Extensions")
}
}
```
# license
This code is published under an MIT license. See LICENSE file for more information.

1132
vendor/github.com/klauspost/cpuid/v2/cpuid.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

47
vendor/github.com/klauspost/cpuid/v2/cpuid_386.s generated vendored Normal file
View File

@@ -0,0 +1,47 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
//+build 386,!gccgo,!noasm,!appengine
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuid(SB), 7, $0
XORL CX, CX
MOVL op+0(FP), AX
CPUID
MOVL AX, eax+4(FP)
MOVL BX, ebx+8(FP)
MOVL CX, ecx+12(FP)
MOVL DX, edx+16(FP)
RET
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuidex(SB), 7, $0
MOVL op+0(FP), AX
MOVL op2+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func xgetbv(index uint32) (eax, edx uint32)
TEXT ·asmXgetbv(SB), 7, $0
MOVL index+0(FP), CX
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
MOVL AX, eax+4(FP)
MOVL DX, edx+8(FP)
RET
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
TEXT ·asmRdtscpAsm(SB), 7, $0
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
MOVL AX, eax+0(FP)
MOVL BX, ebx+4(FP)
MOVL CX, ecx+8(FP)
MOVL DX, edx+12(FP)
RET
// func asmDarwinHasAVX512() bool
TEXT ·asmDarwinHasAVX512(SB), 7, $0
MOVL $0, eax+0(FP)
RET

72
vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,72 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
//+build amd64,!gccgo,!noasm,!appengine
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuid(SB), 7, $0
XORQ CX, CX
MOVL op+0(FP), AX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuidex(SB), 7, $0
MOVL op+0(FP), AX
MOVL op2+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func asmXgetbv(index uint32) (eax, edx uint32)
TEXT ·asmXgetbv(SB), 7, $0
MOVL index+0(FP), CX
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
MOVL AX, eax+8(FP)
MOVL DX, edx+12(FP)
RET
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
TEXT ·asmRdtscpAsm(SB), 7, $0
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
MOVL AX, eax+0(FP)
MOVL BX, ebx+4(FP)
MOVL CX, ecx+8(FP)
MOVL DX, edx+12(FP)
RET
// From https://go-review.googlesource.com/c/sys/+/285572/
// func asmDarwinHasAVX512() bool
TEXT ·asmDarwinHasAVX512(SB), 7, $0-1
MOVB $0, ret+0(FP) // default to false
#ifdef GOOS_darwin // return if not darwin
#ifdef GOARCH_amd64 // return if not amd64
// These values from:
// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h
#define commpage64_base_address 0x00007fffffe00000
#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010)
#define commpage64_version (commpage64_base_address+0x01E)
#define hasAVX512F 0x0000004000000000
MOVQ $commpage64_version, BX
MOVW (BX), AX
CMPW AX, $13 // versions < 13 do not support AVX512
JL no_avx512
MOVQ $commpage64_cpu_capabilities64, BX
MOVQ (BX), AX
MOVQ $hasAVX512F, CX
ANDQ CX, AX
JZ no_avx512
MOVB $1, ret+0(FP)
no_avx512:
#endif
#endif
RET

26
vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s generated vendored Normal file
View File

@@ -0,0 +1,26 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
//+build arm64,!gccgo,!noasm,!appengine
// See https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt
// func getMidr
TEXT ·getMidr(SB), 7, $0
WORD $0xd5380000 // mrs x0, midr_el1 /* Main ID Register */
MOVD R0, midr+0(FP)
RET
// func getProcFeatures
TEXT ·getProcFeatures(SB), 7, $0
WORD $0xd5380400 // mrs x0, id_aa64pfr0_el1 /* Processor Feature Register 0 */
MOVD R0, procFeatures+0(FP)
RET
// func getInstAttributes
TEXT ·getInstAttributes(SB), 7, $0
WORD $0xd5380600 // mrs x0, id_aa64isar0_el1 /* Instruction Set Attribute Register 0 */
WORD $0xd5380621 // mrs x1, id_aa64isar1_el1 /* Instruction Set Attribute Register 1 */
MOVD R0, instAttrReg0+0(FP)
MOVD R1, instAttrReg1+8(FP)
RET

247
vendor/github.com/klauspost/cpuid/v2/detect_arm64.go generated vendored Normal file
View File

@@ -0,0 +1,247 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
//go:build arm64 && !gccgo && !noasm && !appengine
// +build arm64,!gccgo,!noasm,!appengine
package cpuid
import "runtime"
func getMidr() (midr uint64)
func getProcFeatures() (procFeatures uint64)
func getInstAttributes() (instAttrReg0, instAttrReg1 uint64)
func initCPU() {
cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
xgetbv = func(uint32) (a, b uint32) { return 0, 0 }
rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 }
}
func addInfo(c *CPUInfo, safe bool) {
// Seems to be safe to assume on ARM64
c.CacheLine = 64
detectOS(c)
// ARM64 disabled since it may crash if interrupt is not intercepted by OS.
if safe && !c.Supports(ARMCPUID) && runtime.GOOS != "freebsd" {
return
}
midr := getMidr()
// MIDR_EL1 - Main ID Register
// https://developer.arm.com/docs/ddi0595/h/aarch64-system-registers/midr_el1
// x--------------------------------------------------x
// | Name | bits | visible |
// |--------------------------------------------------|
// | Implementer | [31-24] | y |
// |--------------------------------------------------|
// | Variant | [23-20] | y |
// |--------------------------------------------------|
// | Architecture | [19-16] | y |
// |--------------------------------------------------|
// | PartNum | [15-4] | y |
// |--------------------------------------------------|
// | Revision | [3-0] | y |
// x--------------------------------------------------x
switch (midr >> 24) & 0xff {
case 0xC0:
c.VendorString = "Ampere Computing"
c.VendorID = Ampere
case 0x41:
c.VendorString = "Arm Limited"
c.VendorID = ARM
case 0x42:
c.VendorString = "Broadcom Corporation"
c.VendorID = Broadcom
case 0x43:
c.VendorString = "Cavium Inc"
c.VendorID = Cavium
case 0x44:
c.VendorString = "Digital Equipment Corporation"
c.VendorID = DEC
case 0x46:
c.VendorString = "Fujitsu Ltd"
c.VendorID = Fujitsu
case 0x49:
c.VendorString = "Infineon Technologies AG"
c.VendorID = Infineon
case 0x4D:
c.VendorString = "Motorola or Freescale Semiconductor Inc"
c.VendorID = Motorola
case 0x4E:
c.VendorString = "NVIDIA Corporation"
c.VendorID = NVIDIA
case 0x50:
c.VendorString = "Applied Micro Circuits Corporation"
c.VendorID = AMCC
case 0x51:
c.VendorString = "Qualcomm Inc"
c.VendorID = Qualcomm
case 0x56:
c.VendorString = "Marvell International Ltd"
c.VendorID = Marvell
case 0x69:
c.VendorString = "Intel Corporation"
c.VendorID = Intel
}
// Lower 4 bits: Architecture
// Architecture Meaning
// 0b0001 Armv4.
// 0b0010 Armv4T.
// 0b0011 Armv5 (obsolete).
// 0b0100 Armv5T.
// 0b0101 Armv5TE.
// 0b0110 Armv5TEJ.
// 0b0111 Armv6.
// 0b1111 Architectural features are individually identified in the ID_* registers, see 'ID registers'.
// Upper 4 bit: Variant
// An IMPLEMENTATION DEFINED variant number.
// Typically, this field is used to distinguish between different product variants, or major revisions of a product.
c.Family = int(midr>>16) & 0xff
// PartNum, bits [15:4]
// An IMPLEMENTATION DEFINED primary part number for the device.
// On processors implemented by Arm, if the top four bits of the primary
// part number are 0x0 or 0x7, the variant and architecture are encoded differently.
// Revision, bits [3:0]
// An IMPLEMENTATION DEFINED revision number for the device.
c.Model = int(midr) & 0xffff
procFeatures := getProcFeatures()
// ID_AA64PFR0_EL1 - Processor Feature Register 0
// x--------------------------------------------------x
// | Name | bits | visible |
// |--------------------------------------------------|
// | DIT | [51-48] | y |
// |--------------------------------------------------|
// | SVE | [35-32] | y |
// |--------------------------------------------------|
// | GIC | [27-24] | n |
// |--------------------------------------------------|
// | AdvSIMD | [23-20] | y |
// |--------------------------------------------------|
// | FP | [19-16] | y |
// |--------------------------------------------------|
// | EL3 | [15-12] | n |
// |--------------------------------------------------|
// | EL2 | [11-8] | n |
// |--------------------------------------------------|
// | EL1 | [7-4] | n |
// |--------------------------------------------------|
// | EL0 | [3-0] | n |
// x--------------------------------------------------x
var f flagSet
// if procFeatures&(0xf<<48) != 0 {
// fmt.Println("DIT")
// }
f.setIf(procFeatures&(0xf<<32) != 0, SVE)
if procFeatures&(0xf<<20) != 15<<20 {
f.set(ASIMD)
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64pfr0_el1
// 0b0001 --> As for 0b0000, and also includes support for half-precision floating-point arithmetic.
f.setIf(procFeatures&(0xf<<20) == 1<<20, FPHP, ASIMDHP)
}
f.setIf(procFeatures&(0xf<<16) != 0, FP)
instAttrReg0, instAttrReg1 := getInstAttributes()
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
//
// ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0
// x--------------------------------------------------x
// | Name | bits | visible |
// |--------------------------------------------------|
// | TS | [55-52] | y |
// |--------------------------------------------------|
// | FHM | [51-48] | y |
// |--------------------------------------------------|
// | DP | [47-44] | y |
// |--------------------------------------------------|
// | SM4 | [43-40] | y |
// |--------------------------------------------------|
// | SM3 | [39-36] | y |
// |--------------------------------------------------|
// | SHA3 | [35-32] | y |
// |--------------------------------------------------|
// | RDM | [31-28] | y |
// |--------------------------------------------------|
// | ATOMICS | [23-20] | y |
// |--------------------------------------------------|
// | CRC32 | [19-16] | y |
// |--------------------------------------------------|
// | SHA2 | [15-12] | y |
// |--------------------------------------------------|
// | SHA1 | [11-8] | y |
// |--------------------------------------------------|
// | AES | [7-4] | y |
// x--------------------------------------------------x
// if instAttrReg0&(0xf<<52) != 0 {
// fmt.Println("TS")
// }
// if instAttrReg0&(0xf<<48) != 0 {
// fmt.Println("FHM")
// }
f.setIf(instAttrReg0&(0xf<<44) != 0, ASIMDDP)
f.setIf(instAttrReg0&(0xf<<40) != 0, SM4)
f.setIf(instAttrReg0&(0xf<<36) != 0, SM3)
f.setIf(instAttrReg0&(0xf<<32) != 0, SHA3)
f.setIf(instAttrReg0&(0xf<<28) != 0, ASIMDRDM)
f.setIf(instAttrReg0&(0xf<<20) != 0, ATOMICS)
f.setIf(instAttrReg0&(0xf<<16) != 0, CRC32)
f.setIf(instAttrReg0&(0xf<<12) != 0, SHA2)
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
// 0b0010 --> As 0b0001, plus SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented.
f.setIf(instAttrReg0&(0xf<<12) == 2<<12, SHA512)
f.setIf(instAttrReg0&(0xf<<8) != 0, SHA1)
f.setIf(instAttrReg0&(0xf<<4) != 0, AESARM)
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
// 0b0010 --> As for 0b0001, plus PMULL/PMULL2 instructions operating on 64-bit data quantities.
f.setIf(instAttrReg0&(0xf<<4) == 2<<4, PMULL)
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar1_el1
//
// ID_AA64ISAR1_EL1 - Instruction set attribute register 1
// x--------------------------------------------------x
// | Name | bits | visible |
// |--------------------------------------------------|
// | GPI | [31-28] | y |
// |--------------------------------------------------|
// | GPA | [27-24] | y |
// |--------------------------------------------------|
// | LRCPC | [23-20] | y |
// |--------------------------------------------------|
// | FCMA | [19-16] | y |
// |--------------------------------------------------|
// | JSCVT | [15-12] | y |
// |--------------------------------------------------|
// | API | [11-8] | y |
// |--------------------------------------------------|
// | APA | [7-4] | y |
// |--------------------------------------------------|
// | DPB | [3-0] | y |
// x--------------------------------------------------x
// if instAttrReg1&(0xf<<28) != 0 {
// fmt.Println("GPI")
// }
f.setIf(instAttrReg1&(0xf<<28) != 24, GPA)
f.setIf(instAttrReg1&(0xf<<20) != 0, LRCPC)
f.setIf(instAttrReg1&(0xf<<16) != 0, FCMA)
f.setIf(instAttrReg1&(0xf<<12) != 0, JSCVT)
// if instAttrReg1&(0xf<<8) != 0 {
// fmt.Println("API")
// }
// if instAttrReg1&(0xf<<4) != 0 {
// fmt.Println("APA")
// }
f.setIf(instAttrReg1&(0xf<<0) != 0, DCPOP)
// Store
c.featureSet.or(f)
}

15
vendor/github.com/klauspost/cpuid/v2/detect_ref.go generated vendored Normal file
View File

@@ -0,0 +1,15 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
//go:build (!amd64 && !386 && !arm64) || gccgo || noasm || appengine
// +build !amd64,!386,!arm64 gccgo noasm appengine
package cpuid
func initCPU() {
cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
xgetbv = func(uint32) (a, b uint32) { return 0, 0 }
rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 }
}
func addInfo(info *CPUInfo, safe bool) {}

36
vendor/github.com/klauspost/cpuid/v2/detect_x86.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
//go:build (386 && !gccgo && !noasm && !appengine) || (amd64 && !gccgo && !noasm && !appengine)
// +build 386,!gccgo,!noasm,!appengine amd64,!gccgo,!noasm,!appengine
package cpuid
func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
func asmXgetbv(index uint32) (eax, edx uint32)
func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
func asmDarwinHasAVX512() bool
func initCPU() {
cpuid = asmCpuid
cpuidex = asmCpuidex
xgetbv = asmXgetbv
rdtscpAsm = asmRdtscpAsm
darwinHasAVX512 = asmDarwinHasAVX512
}
func addInfo(c *CPUInfo, safe bool) {
c.maxFunc = maxFunctionID()
c.maxExFunc = maxExtendedFunction()
c.BrandName = brandName()
c.CacheLine = cacheLine()
c.Family, c.Model = familyModel()
c.featureSet = support()
c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC))
c.ThreadsPerCore = threadsPerCore()
c.LogicalCores = logicalCores()
c.PhysicalCores = physicalCores()
c.VendorID, c.VendorString = vendorID()
c.cacheSize()
c.frequencies()
}

View File

@@ -0,0 +1,196 @@
// Code generated by "stringer -type=FeatureID,Vendor"; DO NOT EDIT.
package cpuid
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[ADX-1]
_ = x[AESNI-2]
_ = x[AMD3DNOW-3]
_ = x[AMD3DNOWEXT-4]
_ = x[AMXBF16-5]
_ = x[AMXINT8-6]
_ = x[AMXTILE-7]
_ = x[AVX-8]
_ = x[AVX2-9]
_ = x[AVX512BF16-10]
_ = x[AVX512BITALG-11]
_ = x[AVX512BW-12]
_ = x[AVX512CD-13]
_ = x[AVX512DQ-14]
_ = x[AVX512ER-15]
_ = x[AVX512F-16]
_ = x[AVX512FP16-17]
_ = x[AVX512IFMA-18]
_ = x[AVX512PF-19]
_ = x[AVX512VBMI-20]
_ = x[AVX512VBMI2-21]
_ = x[AVX512VL-22]
_ = x[AVX512VNNI-23]
_ = x[AVX512VP2INTERSECT-24]
_ = x[AVX512VPOPCNTDQ-25]
_ = x[AVXSLOW-26]
_ = x[BMI1-27]
_ = x[BMI2-28]
_ = x[CETIBT-29]
_ = x[CETSS-30]
_ = x[CLDEMOTE-31]
_ = x[CLMUL-32]
_ = x[CLZERO-33]
_ = x[CMOV-34]
_ = x[CMPXCHG8-35]
_ = x[CPBOOST-36]
_ = x[CX16-37]
_ = x[ENQCMD-38]
_ = x[ERMS-39]
_ = x[F16C-40]
_ = x[FMA3-41]
_ = x[FMA4-42]
_ = x[FXSR-43]
_ = x[FXSROPT-44]
_ = x[GFNI-45]
_ = x[HLE-46]
_ = x[HTT-47]
_ = x[HWA-48]
_ = x[HYPERVISOR-49]
_ = x[IBPB-50]
_ = x[IBS-51]
_ = x[IBSBRNTRGT-52]
_ = x[IBSFETCHSAM-53]
_ = x[IBSFFV-54]
_ = x[IBSOPCNT-55]
_ = x[IBSOPCNTEXT-56]
_ = x[IBSOPSAM-57]
_ = x[IBSRDWROPCNT-58]
_ = x[IBSRIPINVALIDCHK-59]
_ = x[INT_WBINVD-60]
_ = x[INVLPGB-61]
_ = x[LAHF-62]
_ = x[LZCNT-63]
_ = x[MCAOVERFLOW-64]
_ = x[MCOMMIT-65]
_ = x[MMX-66]
_ = x[MMXEXT-67]
_ = x[MOVBE-68]
_ = x[MOVDIR64B-69]
_ = x[MOVDIRI-70]
_ = x[MPX-71]
_ = x[MSRIRC-72]
_ = x[NX-73]
_ = x[OSXSAVE-74]
_ = x[POPCNT-75]
_ = x[RDPRU-76]
_ = x[RDRAND-77]
_ = x[RDSEED-78]
_ = x[RDTSCP-79]
_ = x[RTM-80]
_ = x[RTM_ALWAYS_ABORT-81]
_ = x[SCE-82]
_ = x[SERIALIZE-83]
_ = x[SGX-84]
_ = x[SGXLC-85]
_ = x[SHA-86]
_ = x[SSE-87]
_ = x[SSE2-88]
_ = x[SSE3-89]
_ = x[SSE4-90]
_ = x[SSE42-91]
_ = x[SSE4A-92]
_ = x[SSSE3-93]
_ = x[STIBP-94]
_ = x[SUCCOR-95]
_ = x[TBM-96]
_ = x[TSXLDTRK-97]
_ = x[VAES-98]
_ = x[VMX-99]
_ = x[VPCLMULQDQ-100]
_ = x[WAITPKG-101]
_ = x[WBNOINVD-102]
_ = x[X87-103]
_ = x[XOP-104]
_ = x[XSAVE-105]
_ = x[AESARM-106]
_ = x[ARMCPUID-107]
_ = x[ASIMD-108]
_ = x[ASIMDDP-109]
_ = x[ASIMDHP-110]
_ = x[ASIMDRDM-111]
_ = x[ATOMICS-112]
_ = x[CRC32-113]
_ = x[DCPOP-114]
_ = x[EVTSTRM-115]
_ = x[FCMA-116]
_ = x[FP-117]
_ = x[FPHP-118]
_ = x[GPA-119]
_ = x[JSCVT-120]
_ = x[LRCPC-121]
_ = x[PMULL-122]
_ = x[SHA1-123]
_ = x[SHA2-124]
_ = x[SHA3-125]
_ = x[SHA512-126]
_ = x[SM3-127]
_ = x[SM4-128]
_ = x[SVE-129]
_ = x[lastID-130]
_ = x[firstID-0]
}
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPXCHG8CPBOOSTCX16ENQCMDERMSF16CFMA3FMA4FXSRFXSROPTGFNIHLEHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKINT_WBINVDINVLPGBLAHFLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMPXMSRIRCNXOSXSAVEPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSCESERIALIZESGXSGXLCSHASSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSUCCORTBMTSXLDTRKVAESVMXVPCLMULQDQWAITPKGWBNOINVDX87XOPXSAVEAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 234, 238, 244, 249, 257, 262, 268, 272, 280, 287, 291, 297, 301, 305, 309, 313, 317, 324, 328, 331, 334, 337, 347, 351, 354, 364, 375, 381, 389, 400, 408, 420, 436, 446, 453, 457, 462, 473, 480, 483, 489, 494, 503, 510, 513, 519, 521, 528, 534, 539, 545, 551, 557, 560, 576, 579, 588, 591, 596, 599, 602, 606, 610, 614, 619, 624, 629, 634, 640, 643, 651, 655, 658, 668, 675, 683, 686, 689, 694, 700, 708, 713, 720, 727, 735, 742, 747, 752, 759, 763, 765, 769, 772, 777, 782, 787, 791, 795, 799, 805, 808, 811, 814, 820}
func (i FeatureID) String() string {
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
return "FeatureID(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _FeatureID_name[_FeatureID_index[i]:_FeatureID_index[i+1]]
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[VendorUnknown-0]
_ = x[Intel-1]
_ = x[AMD-2]
_ = x[VIA-3]
_ = x[Transmeta-4]
_ = x[NSC-5]
_ = x[KVM-6]
_ = x[MSVM-7]
_ = x[VMware-8]
_ = x[XenHVM-9]
_ = x[Bhyve-10]
_ = x[Hygon-11]
_ = x[SiS-12]
_ = x[RDC-13]
_ = x[Ampere-14]
_ = x[ARM-15]
_ = x[Broadcom-16]
_ = x[Cavium-17]
_ = x[DEC-18]
_ = x[Fujitsu-19]
_ = x[Infineon-20]
_ = x[Motorola-21]
_ = x[NVIDIA-22]
_ = x[AMCC-23]
_ = x[Qualcomm-24]
_ = x[Marvell-25]
_ = x[lastVendor-26]
}
const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvelllastVendor"
var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 155}
func (i Vendor) String() string {
if i < 0 || i >= Vendor(len(_Vendor_index)-1) {
return "Vendor(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Vendor_name[_Vendor_index[i]:_Vendor_index[i+1]]
}

View File

@@ -0,0 +1,19 @@
// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file.
package cpuid
import "runtime"
func detectOS(c *CPUInfo) bool {
// There are no hw.optional sysctl values for the below features on Mac OS 11.0
// to detect their supported state dynamically. Assume the CPU features that
// Apple Silicon M1 supports to be available as a minimal set of features
// to all Go programs running on darwin/arm64.
// TODO: Add more if we know them.
c.featureSet.setIf(runtime.GOOS != "ios", AESARM, PMULL, SHA1, SHA2)
c.PhysicalCores = runtime.NumCPU()
// For now assuming 1 thread per core...
c.ThreadsPerCore = 1
c.LogicalCores = c.PhysicalCores
return true
}

130
vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go generated vendored Normal file
View File

@@ -0,0 +1,130 @@
// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file.
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file located
// here https://github.com/golang/sys/blob/master/LICENSE
package cpuid
import (
"encoding/binary"
"io/ioutil"
"runtime"
)
// HWCAP bits.
const (
hwcap_FP = 1 << 0
hwcap_ASIMD = 1 << 1
hwcap_EVTSTRM = 1 << 2
hwcap_AES = 1 << 3
hwcap_PMULL = 1 << 4
hwcap_SHA1 = 1 << 5
hwcap_SHA2 = 1 << 6
hwcap_CRC32 = 1 << 7
hwcap_ATOMICS = 1 << 8
hwcap_FPHP = 1 << 9
hwcap_ASIMDHP = 1 << 10
hwcap_CPUID = 1 << 11
hwcap_ASIMDRDM = 1 << 12
hwcap_JSCVT = 1 << 13
hwcap_FCMA = 1 << 14
hwcap_LRCPC = 1 << 15
hwcap_DCPOP = 1 << 16
hwcap_SHA3 = 1 << 17
hwcap_SM3 = 1 << 18
hwcap_SM4 = 1 << 19
hwcap_ASIMDDP = 1 << 20
hwcap_SHA512 = 1 << 21
hwcap_SVE = 1 << 22
hwcap_ASIMDFHM = 1 << 23
)
func detectOS(c *CPUInfo) bool {
// For now assuming no hyperthreading is reasonable.
c.LogicalCores = runtime.NumCPU()
c.PhysicalCores = c.LogicalCores
c.ThreadsPerCore = 1
if hwcap == 0 {
// We did not get values from the runtime.
// Try reading /proc/self/auxv
// From https://github.com/golang/sys
const (
_AT_HWCAP = 16
_AT_HWCAP2 = 26
uintSize = int(32 << (^uint(0) >> 63))
)
buf, err := ioutil.ReadFile("/proc/self/auxv")
if err != nil {
// e.g. on android /proc/self/auxv is not accessible, so silently
// ignore the error and leave Initialized = false. On some
// architectures (e.g. arm64) doinit() implements a fallback
// readout and will set Initialized = true again.
return false
}
bo := binary.LittleEndian
for len(buf) >= 2*(uintSize/8) {
var tag, val uint
switch uintSize {
case 32:
tag = uint(bo.Uint32(buf[0:]))
val = uint(bo.Uint32(buf[4:]))
buf = buf[8:]
case 64:
tag = uint(bo.Uint64(buf[0:]))
val = uint(bo.Uint64(buf[8:]))
buf = buf[16:]
}
switch tag {
case _AT_HWCAP:
hwcap = val
case _AT_HWCAP2:
// Not used
}
}
if hwcap == 0 {
return false
}
}
// HWCap was populated by the runtime from the auxiliary vector.
// Use HWCap information since reading aarch64 system registers
// is not supported in user space on older linux kernels.
c.featureSet.setIf(isSet(hwcap, hwcap_AES), AESARM)
c.featureSet.setIf(isSet(hwcap, hwcap_ASIMD), ASIMD)
c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDDP), ASIMDDP)
c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDHP), ASIMDHP)
c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDRDM), ASIMDRDM)
c.featureSet.setIf(isSet(hwcap, hwcap_CPUID), ARMCPUID)
c.featureSet.setIf(isSet(hwcap, hwcap_CRC32), CRC32)
c.featureSet.setIf(isSet(hwcap, hwcap_DCPOP), DCPOP)
c.featureSet.setIf(isSet(hwcap, hwcap_EVTSTRM), EVTSTRM)
c.featureSet.setIf(isSet(hwcap, hwcap_FCMA), FCMA)
c.featureSet.setIf(isSet(hwcap, hwcap_FP), FP)
c.featureSet.setIf(isSet(hwcap, hwcap_FPHP), FPHP)
c.featureSet.setIf(isSet(hwcap, hwcap_JSCVT), JSCVT)
c.featureSet.setIf(isSet(hwcap, hwcap_LRCPC), LRCPC)
c.featureSet.setIf(isSet(hwcap, hwcap_PMULL), PMULL)
c.featureSet.setIf(isSet(hwcap, hwcap_SHA1), SHA1)
c.featureSet.setIf(isSet(hwcap, hwcap_SHA2), SHA2)
c.featureSet.setIf(isSet(hwcap, hwcap_SHA3), SHA3)
c.featureSet.setIf(isSet(hwcap, hwcap_SHA512), SHA512)
c.featureSet.setIf(isSet(hwcap, hwcap_SM3), SM3)
c.featureSet.setIf(isSet(hwcap, hwcap_SM4), SM4)
c.featureSet.setIf(isSet(hwcap, hwcap_SVE), SVE)
// The Samsung S9+ kernel reports support for atomics, but not all cores
// actually support them, resulting in SIGILL. See issue #28431.
// TODO(elias.naur): Only disable the optimization on bad chipsets on android.
c.featureSet.setIf(isSet(hwcap, hwcap_ATOMICS) && runtime.GOOS != "android", ATOMICS)
return true
}
func isSet(hwc uint, value uint) bool {
return hwc&value != 0
}

16
vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go generated vendored Normal file
View File

@@ -0,0 +1,16 @@
// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file.
//go:build arm64 && !linux && !darwin
// +build arm64,!linux,!darwin
package cpuid
import "runtime"
func detectOS(c *CPUInfo) bool {
c.PhysicalCores = runtime.NumCPU()
// For now assuming 1 thread per core...
c.ThreadsPerCore = 1
c.LogicalCores = c.PhysicalCores
return false
}

View File

@@ -0,0 +1,8 @@
// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file.
//go:build nounsafe
// +build nounsafe
package cpuid
var hwcap uint

View File

@@ -0,0 +1,11 @@
// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file.
//go:build !nounsafe
// +build !nounsafe
package cpuid
import _ "unsafe" // needed for go:linkname
//go:linkname hwcap internal/cpu.HWCap
var hwcap uint

View File

@@ -0,0 +1,15 @@
#!/bin/sh
set -e
go tool dist list | while IFS=/ read os arch; do
echo "Checking $os/$arch..."
echo " normal"
GOARCH=$arch GOOS=$os go build -o /dev/null .
echo " noasm"
GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null .
echo " appengine"
GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null .
echo " noasm,appengine"
GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null .
done

1
vendor/github.com/libdns/libdns/.gitignore generated vendored Normal file
View File

@@ -0,0 +1 @@
_gitignore/

21
vendor/github.com/libdns/libdns/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020 Matthew Holt
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

87
vendor/github.com/libdns/libdns/README.md generated vendored Normal file
View File

@@ -0,0 +1,87 @@
libdns - Universal DNS provider APIs for Go
===========================================
<a href="https://pkg.go.dev/github.com/libdns/libdns"><img src="https://img.shields.io/badge/godoc-reference-blue.svg"></a>
**⚠️ Work-in-progress. Exported APIs are subject to change.**
`libdns` is a collection of free-range DNS provider client implementations written in Go! With libdns packages, your Go program can manage DNS records across any supported providers. A "provider" is a service or program that manages a DNS zone.
This repository defines the core interfaces that provider packages should implement. They are small and idiomatic Go interfaces with well-defined semantics.
The interfaces include:
- [`RecordGetter`](https://pkg.go.dev/github.com/libdns/libdns#RecordGetter) to list records.
- [`RecordAppender`](https://pkg.go.dev/github.com/libdns/libdns#RecordAppender) to append new records.
- [`RecordSetter`](https://pkg.go.dev/github.com/libdns/libdns#RecordSetter) to set (create or change existing) records.
- [`RecordDeleter`](https://pkg.go.dev/github.com/libdns/libdns#RecordDeleter) to delete records.
[See full godoc for detailed documentation.](https://pkg.go.dev/github.com/libdns/libdns)
## Example
To work with DNS records managed by Cloudflare, for example, we can use [libdns/cloudflare](https://pkg.go.dev/github.com/libdns/cloudflare):
```go
import (
"github.com/libdns/cloudflare"
"github.com/libdns/libdns"
)
ctx := context.TODO()
zone := "example.com."
// configure the DNS provider (choose any from github.com/libdns)
provider := cloudflare.Provider{APIToken: "topsecret"}
// list records
recs, err := provider.GetRecords(ctx, zone)
// create records (AppendRecords is similar)
newRecs, err := provider.SetRecords(ctx, zone, []libdns.Record{
Type: "A",
Name: "sub",
Value: "1.2.3.4",
})
// delete records (this example uses provider-assigned ID)
deletedRecs, err := provider.DeleteRecords(ctx, zone, []libdns.Record{
ID: "foobar",
})
// no matter which provider you use, the code stays the same!
// (some providers have caveats; see their package documentation)
```
## Implementing new providers
Providers are 100% written and maintained by the community! We all maintain just the packages for providers we use.
**[Instructions for adding new providers](https://github.com/libdns/libdns/wiki/Implementing-providers)** are on this repo's wiki. Please feel free to contribute.
## Similar projects
**[OctoDNS](https://github.com/github/octodns)** is a suite of tools written in Python for managing DNS. However, its approach is a bit heavy-handed when all you need are small, incremental changes to a zone:
> WARNING: OctoDNS assumes ownership of any domain you point it to. When you tell it to act it will do whatever is necessary to try and match up states including deleting any unexpected records. Be careful when playing around with OctoDNS.
This is incredibly useful when you are maintaining your own zone file, but risky when you just need incremental changes.
**[StackExchange/dnscontrol](https://github.com/StackExchange/dnscontrol)** is written in Go, but is similar to OctoDNS in that it tends to obliterate your entire zone and replace it with your input. Again, this is very useful if you are maintaining your own master list of records, but doesn't do well for simply adding or removing records.
**[go-acme/lego](https://github.com/go-acme/lego)** has support for a huge number of DNS providers (75+!), but their APIs are only capable of setting and deleting TXT records for ACME challenges.
**`libdns`** takes inspiration from the above projects but aims for a more generally-useful set of APIs that homogenize pretty well across providers. In contrast to the above projects, libdns can add, set, delete, and get arbitrary records from a zone without obliterating it (although syncing up an entire zone is also possible!). Its APIs also include context so long-running calls can be cancelled early, for example to accommodate on-line config changes downstream. libdns interfaces are also smaller and more composable. Additionally, libdns can grow to support a nearly infinite number of DNS providers without added bloat, because each provider implementation is a separate Go module, which keeps your builds lean and fast.
In summary, the goal is that libdns providers can do what the above libraries/tools can do, but with more flexibility: they can create and delete TXT records for ACME challenges, they can replace entire zones, but they can also do incremental changes or simply read records.
## Record abstraction
How records are represented across providers varies widely, and each kind of record has different fields and semantics. In time, our goal is for the `libdns.Record` type to be able to represent most of them as concisely and simply as possible, with the interface methods able to deliver on most of the possible zone operations.
Realistically, libdns should enable most common record manipulations, but may not be able to fit absolutely 100% of all possibilities with DNS in a provider-agnostic way. That is probably OK; and given the wide varieties in DNS record types and provider APIs, it would be unreasonable to expect otherwise. We are not aiming for 100% fulfillment of 100% of users' requirements; more like 100% fulfillment of ~90% of users' requirements.

129
vendor/github.com/libdns/libdns/libdns.go generated vendored Normal file
View File

@@ -0,0 +1,129 @@
// Package libdns defines core interfaces that should be implemented by DNS
// provider clients. They are small and idiomatic Go interfaces with
// well-defined semantics.
//
// Records are described independently of any particular zone, a convention
// that grants Record structs portability across zones. As such, record names
// are partially qualified, i.e. relative to the zone. For example, an A
// record called "sub" in zone "example.com." represents a fully-qualified
// domain name (FQDN) of "sub.example.com.". Implementations should expect
// that input records conform to this standard, while also ensuring that
// output records do; adjustments to record names may need to be made before
// or after provider API calls, for example, to maintain consistency with
// all other libdns provider implementations. Helper functions are available
// in this package to convert between relative and absolute names.
//
// Although zone names are a required input, libdns does not coerce any
// particular representation of DNS zones; only records. Since zone name and
// records are separate inputs in libdns interfaces, it is up to the caller
// to pair a zone's name with its records in a way that works for them.
//
// All interface implementations must be safe for concurrent/parallel use.
// For example, if AppendRecords() is called at the same time and two API
// requests are made to the provider at the same time, the result of both
// requests must be visible after they both complete; if the provider does
// not synchronize the writing of the zone file and one request overwrites
// the other, then the client implementation must take care to synchronize
// on behalf of the incompetent provider. This synchronization need not be
// global; for example: the scope of synchronization might only need to be
// within the same zone, allowing multiple requests at once as long as all
// of them are for different zones. (Exact logic depends on the provider.)
package libdns
import (
"context"
"strings"
"time"
)
// RecordGetter can get records from a DNS zone.
type RecordGetter interface {
// GetRecords returns all the records in the DNS zone.
//
// Implementations must honor context cancellation and be safe for
// concurrent use.
GetRecords(ctx context.Context, zone string) ([]Record, error)
}
// RecordAppender can non-destructively add new records to a DNS zone.
type RecordAppender interface {
// AppendRecords creates the requested records in the given zone
// and returns the populated records that were created. It never
// changes existing records.
//
// Implementations must honor context cancellation and be safe for
// concurrent use.
AppendRecords(ctx context.Context, zone string, recs []Record) ([]Record, error)
}
// RecordSetter can set new or update existing records in a DNS zone.
type RecordSetter interface {
// SetRecords updates the zone so that the records described in the
// input are reflected in the output. It may create or overwrite
// records or -- depending on the record type -- delete records to
// maintain parity with the input. No other records are affected.
// It returns the records which were set.
//
// Records that have an ID associating it with a particular resource
// on the provider will be directly replaced. If no ID is given, this
// method may use what information is given to do lookups and will
// ensure that only necessary changes are made to the zone.
//
// Implementations must honor context cancellation and be safe for
// concurrent use.
SetRecords(ctx context.Context, zone string, recs []Record) ([]Record, error)
}
// RecordDeleter can delete records from a DNS zone.
type RecordDeleter interface {
// DeleteRecords deletes the given records from the zone if they exist.
// It returns the records that were deleted.
//
// Records that have an ID to associate it with a particular resource on
// the provider will be directly deleted. If no ID is given, this method
// may use what information is given to do lookups and delete only
// matching records.
//
// Implementations must honor context cancellation and be safe for
// concurrent use.
DeleteRecords(ctx context.Context, zone string, recs []Record) ([]Record, error)
}
// Record is a generalized representation of a DNS record.
type Record struct {
// provider-specific metadata
ID string
// general record fields
Type string
Name string // partially-qualified (relative to zone)
Value string
TTL time.Duration
// type-dependent record fields
Priority int // used by MX, SRV, and URI records
}
// RelativeName makes fqdn relative to zone. For example, for a FQDN of
// "sub.example.com" and a zone of "example.com", it outputs "sub".
//
// If fqdn cannot be expressed relative to zone, the input fqdn is returned.
func RelativeName(fqdn, zone string) string {
return strings.TrimSuffix(strings.TrimSuffix(fqdn, zone), ".")
}
// AbsoluteName makes name into a fully-qualified domain name (FQDN) by
// prepending it to zone and tidying up the dots. For example, an input
// of name "sub" and zone "example.com." will return "sub.example.com.".
func AbsoluteName(name, zone string) string {
if zone == "" {
return strings.Trim(name, ".")
}
if name == "" || name == "@" {
return zone
}
if !strings.HasSuffix(name, ".") {
name += "."
}
return name + zone
}

1
vendor/github.com/mholt/acmez/.gitignore generated vendored Normal file
View File

@@ -0,0 +1 @@
_gitignore/

201
vendor/github.com/mholt/acmez/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

78
vendor/github.com/mholt/acmez/README.md generated vendored Normal file
View File

@@ -0,0 +1,78 @@
acmez - ACME client library for Go
==================================
[![godoc](https://pkg.go.dev/badge/github.com/mholt/acmez)](https://pkg.go.dev/github.com/mholt/acmez)
ACMEz ("ack-measy" or "acme-zee", whichever you prefer) is a fully-compliant [RFC 8555](https://tools.ietf.org/html/rfc8555) (ACME) implementation in pure Go. It is lightweight, has an elegant Go API, and its retry logic is highly robust against external errors. ACMEz is suitable for large-scale enterprise deployments.
**NOTE:** This module is for _getting_ certificates, not _managing_ certificates. Most users probably want certificate _management_ (keeping certificates renewed) rather than to interface directly with ACME. Developers who want to use certificates in their long-running Go programs should use [CertMagic](https://github.com/caddyserver/certmagic) instead; or, if their program is not written in Go, [Caddy](https://caddyserver.com/) can be used to manage certificates (even without running an HTTP or TLS server).
This module has two primary packages:
- **`acmez`** is a high-level wrapper for getting certificates. It implements the ACME order flow described in RFC 8555 including challenge solving using pluggable solvers.
- **`acme`** is a low-level RFC 8555 implementation that provides the fundamental ACME operations, mainly useful if you have advanced or niche requirements.
In other words, the `acmez` package is **porcelain** while the `acme` package is **plumbing** (to use git's terminology).
## Features
- Simple, elegant Go API
- Thoroughly documented with spec citations
- Robust to external errors
- Structured error values ("problems" as defined in RFC 7807)
- Smart retries (resilient against network and server hiccups)
- Challenge plasticity (randomized challenges, and will retry others if one fails)
- Context cancellation (suitable for high-frequency config changes or reloads)
- Highly flexible and customizable
- External Account Binding (EAB) support
- Tested with multiple ACME CAs (more than just Let's Encrypt)
- Supports niche aspects of RFC 8555 (such as alt cert chains and account key rollover)
- Efficient solving of large SAN lists (e.g. for slow DNS record propagation)
- Utility functions for solving challenges
- Helpers for RFC 8737 (tls-alpn-01 challenge)
## Examples
See the [`examples` folder](https://github.com/mholt/acmez/tree/master/examples) for tutorials on how to use either package. **Most users should follow the [porcelain guide](https://github.com/mholt/acmez/blob/master/examples/porcelain/main.go).**
## Challenge solvers
The `acmez` package is "bring-your-own-solver." It provides helper utilities for http-01, dns-01, and tls-alpn-01 challenges, but does not actually solve them for you. You must write or use an implementation of [`acmez.Solver`](https://pkg.go.dev/github.com/mholt/acmez#Solver) in order to get certificates. How this is done depends on your environment/situation.
However, you can find [a general-purpose dns-01 solver in CertMagic](https://pkg.go.dev/github.com/caddyserver/certmagic#DNS01Solver), which uses [libdns](https://github.com/libdns) packages to integrate with numerous DNS providers. You can use it like this:
```go
// minimal example using Cloudflare
solver := &certmagic.DNS01Solver{
DNSProvider: &cloudflare.Provider{APIToken: "topsecret"},
}
client := acmez.Client{
ChallengeSolvers: map[string]acmez.Solver{
acme.ChallengeTypeDNS01: solver,
},
// ...
}
```
If you're implementing a tls-alpn-01 solver, the `acmez` package can help. It has the constant [`ACMETLS1Protocol`](https://pkg.go.dev/github.com/mholt/acmez#pkg-constants) which you can use to identify challenge handshakes by inspecting the ClientHello's ALPN extension. Simply complete the handshake using a certificate from the [`acmez.TLSALPN01ChallengeCert()`](https://pkg.go.dev/github.com/mholt/acmez#TLSALPN01ChallengeCert) function to solve the challenge.
## History
In 2014, the ISRG was finishing the development of its automated CA infrastructure: the first of its kind to become publicly-trusted, under the name Let's Encrypt, which used a young protocol called ACME to automate domain validation and certificate issuance.
Meanwhile, a project called [Caddy](https://caddyserver.com) was being developed which would be the first and only web server to use HTTPS _automatically and by default_. To make that possible, another project called lego was commissioned by the Caddy project to become of the first-ever ACME client libraries, and the first client written in Go. It was made by Sebastian Erhart (xenolf), and on day 1 of Let's Encrypt's public beta, Caddy used lego to obtain its first certificate automatically at startup, making Caddy and lego the first-ever integrated ACME client.
Since then, Caddy has seen use in production longer than any other ACME client integration, and is well-known for being one of the most robust and reliable HTTPS implementations available today.
A few years later, Caddy's novel auto-HTTPS logic was extracted into a library called [CertMagic](https://github.com/caddyserver/certmagic) to be usable by any Go program. Caddy would continue to use CertMagic, which implemented the certificate _automation and management_ logic on top of the low-level certificate _obtain_ logic that lego provided.
Soon thereafter, the lego project shifted maintainership and the goals and vision of the project diverged from those of Caddy's use case of managing tens of thousands of certificates per instance. Eventually, [the original Caddy author announced work on a new ACME client library in Go](https://github.com/caddyserver/certmagic/issues/71) that exceeded Caddy's harsh requirements for large-scale enterprise deployments, lean builds, and simple API. This work finally came to fruition in 2020 as ACMEz.
---
(c) 2020 Matthew Holt

37
vendor/github.com/mholt/acmez/THIRD-PARTY generated vendored Normal file
View File

@@ -0,0 +1,37 @@
This document contains Third Party Software Notices and/or Additional
Terms and Conditions for licensed third party software components
included within this product.
==
https://github.com/golang/crypto/blob/master/acme/jws.go
https://github.com/golang/crypto/blob/master/acme/jws_test.go
(with modifications)
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

249
vendor/github.com/mholt/acmez/acme/account.go generated vendored Normal file
View File

@@ -0,0 +1,249 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acme
import (
"context"
"crypto"
"encoding/base64"
"encoding/json"
"fmt"
)
// Account represents a set of metadata associated with an account
// as defined by the ACME spec §7.1.2:
// https://tools.ietf.org/html/rfc8555#section-7.1.2
type Account struct {
// status (required, string): The status of this account. Possible
// values are "valid", "deactivated", and "revoked". The value
// "deactivated" should be used to indicate client-initiated
// deactivation whereas "revoked" should be used to indicate server-
// initiated deactivation. See Section 7.1.6.
Status string `json:"status"`
// contact (optional, array of string): An array of URLs that the
// server can use to contact the client for issues related to this
// account. For example, the server may wish to notify the client
// about server-initiated revocation or certificate expiration. For
// information on supported URL schemes, see Section 7.3.
Contact []string `json:"contact,omitempty"`
// termsOfServiceAgreed (optional, boolean): Including this field in a
// newAccount request, with a value of true, indicates the client's
// agreement with the terms of service. This field cannot be updated
// by the client.
TermsOfServiceAgreed bool `json:"termsOfServiceAgreed,omitempty"`
// externalAccountBinding (optional, object): Including this field in a
// newAccount request indicates approval by the holder of an existing
// non-ACME account to bind that account to this ACME account. This
// field is not updateable by the client (see Section 7.3.4).
//
// Use SetExternalAccountBinding() to set this field's value properly.
ExternalAccountBinding json.RawMessage `json:"externalAccountBinding,omitempty"`
// orders (required, string): A URL from which a list of orders
// submitted by this account can be fetched via a POST-as-GET
// request, as described in Section 7.1.2.1.
Orders string `json:"orders"`
// In response to new-account, "the server returns this account
// object in a 201 (Created) response, with the account URL
// in a Location header field." §7.3
//
// We transfer the value from the header to this field for
// storage and recall purposes.
Location string `json:"location,omitempty"`
// The private key to the account. Because it is secret, it is
// not serialized as JSON and must be stored separately (usually
// a PEM-encoded file).
PrivateKey crypto.Signer `json:"-"`
}
// SetExternalAccountBinding sets the ExternalAccountBinding field of the account.
// It only sets the field value; it does not register the account with the CA. (The
// client parameter is necessary because the EAB encoding depends on the directory.)
func (a *Account) SetExternalAccountBinding(ctx context.Context, client *Client, eab EAB) error {
if err := client.provision(ctx); err != nil {
return err
}
macKey, err := base64.RawURLEncoding.DecodeString(eab.MACKey)
if err != nil {
return fmt.Errorf("base64-decoding MAC key: %w", err)
}
eabJWS, err := jwsEncodeEAB(a.PrivateKey.Public(), macKey, keyID(eab.KeyID), client.dir.NewAccount)
if err != nil {
return fmt.Errorf("signing EAB content: %w", err)
}
a.ExternalAccountBinding = eabJWS
return nil
}
// NewAccount creates a new account on the ACME server.
//
// "A client creates a new account with the server by sending a POST
// request to the server's newAccount URL." §7.3
func (c *Client) NewAccount(ctx context.Context, account Account) (Account, error) {
if err := c.provision(ctx); err != nil {
return account, err
}
return c.postAccount(ctx, c.dir.NewAccount, accountObject{Account: account})
}
// GetAccount looks up an account on the ACME server.
//
// "If a client wishes to find the URL for an existing account and does
// not want an account to be created if one does not already exist, then
// it SHOULD do so by sending a POST request to the newAccount URL with
// a JWS whose payload has an 'onlyReturnExisting' field set to 'true'."
// §7.3.1
func (c *Client) GetAccount(ctx context.Context, account Account) (Account, error) {
if err := c.provision(ctx); err != nil {
return account, err
}
return c.postAccount(ctx, c.dir.NewAccount, accountObject{
Account: account,
OnlyReturnExisting: true,
})
}
// UpdateAccount updates account information on the ACME server.
//
// "If the client wishes to update this information in the future, it
// sends a POST request with updated information to the account URL.
// The server MUST ignore any updates to the 'orders' field,
// 'termsOfServiceAgreed' field (see Section 7.3.3), the 'status' field
// (except as allowed by Section 7.3.6), or any other fields it does not
// recognize." §7.3.2
//
// This method uses the account.Location value as the account URL.
func (c *Client) UpdateAccount(ctx context.Context, account Account) (Account, error) {
return c.postAccount(ctx, account.Location, accountObject{Account: account})
}
type keyChangeRequest struct {
Account string `json:"account"`
OldKey json.RawMessage `json:"oldKey"`
}
// AccountKeyRollover changes an account's associated key.
//
// "To change the key associated with an account, the client sends a
// request to the server containing signatures by both the old and new
// keys." §7.3.5
func (c *Client) AccountKeyRollover(ctx context.Context, account Account, newPrivateKey crypto.Signer) (Account, error) {
if err := c.provision(ctx); err != nil {
return account, err
}
oldPublicKeyJWK, err := jwkEncode(account.PrivateKey.Public())
if err != nil {
return account, fmt.Errorf("encoding old private key: %v", err)
}
keyChangeReq := keyChangeRequest{
Account: account.Location,
OldKey: []byte(oldPublicKeyJWK),
}
innerJWS, err := jwsEncodeJSON(keyChangeReq, newPrivateKey, "", "", c.dir.KeyChange)
if err != nil {
return account, fmt.Errorf("encoding inner JWS: %v", err)
}
_, err = c.httpPostJWS(ctx, account.PrivateKey, account.Location, c.dir.KeyChange, json.RawMessage(innerJWS), nil)
if err != nil {
return account, fmt.Errorf("rolling key on server: %w", err)
}
account.PrivateKey = newPrivateKey
return account, nil
}
func (c *Client) postAccount(ctx context.Context, endpoint string, account accountObject) (Account, error) {
// Normally, the account URL is the key ID ("kid")... except when the user
// is trying to get the correct account URL. In that case, we must ignore
// any existing URL we may have and not set the kid field on the request.
// Arguably, this is a user error (spec says "If client wishes to find the
// URL for an existing account", so why would the URL already be filled
// out?) but it's easy enough to infer their intent and make it work.
kid := account.Location
if account.OnlyReturnExisting {
kid = ""
}
resp, err := c.httpPostJWS(ctx, account.PrivateKey, kid, endpoint, account, &account.Account)
if err != nil {
return account.Account, err
}
account.Location = resp.Header.Get("Location")
return account.Account, nil
}
type accountObject struct {
Account
// If true, newAccount will be read-only, and Account.Location
// (which holds the account URL) must be empty.
OnlyReturnExisting bool `json:"onlyReturnExisting,omitempty"`
}
// EAB (External Account Binding) contains information
// necessary to bind or map an ACME account to some
// other account known by the CA.
//
// External account bindings are "used to associate an
// ACME account with an existing account in a non-ACME
// system, such as a CA customer database."
//
// "To enable ACME account binding, the CA operating the
// ACME server needs to provide the ACME client with a
// MAC key and a key identifier, using some mechanism
// outside of ACME." §7.3.4
type EAB struct {
// "The key identifier MUST be an ASCII string." §7.3.4
KeyID string `json:"key_id"`
// "The MAC key SHOULD be provided in base64url-encoded
// form, to maximize compatibility between non-ACME
// provisioning systems and ACME clients." §7.3.4
MACKey string `json:"mac_key"`
}
// Possible status values. From several spec sections:
// - Account §7.1.2 (valid, deactivated, revoked)
// - Order §7.1.3 (pending, ready, processing, valid, invalid)
// - Authorization §7.1.4 (pending, valid, invalid, deactivated, expired, revoked)
// - Challenge §7.1.5 (pending, processing, valid, invalid)
// - Status changes §7.1.6
const (
StatusPending = "pending"
StatusProcessing = "processing"
StatusValid = "valid"
StatusInvalid = "invalid"
StatusDeactivated = "deactivated"
StatusExpired = "expired"
StatusRevoked = "revoked"
StatusReady = "ready"
)

283
vendor/github.com/mholt/acmez/acme/authorization.go generated vendored Normal file
View File

@@ -0,0 +1,283 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acme
import (
"context"
"fmt"
"time"
)
// Authorization "represents a server's authorization for
// an account to represent an identifier. In addition to the
// identifier, an authorization includes several metadata fields, such
// as the status of the authorization (e.g., 'pending', 'valid', or
// 'revoked') and which challenges were used to validate possession of
// the identifier." §7.1.4
type Authorization struct {
// identifier (required, object): The identifier that the account is
// authorized to represent.
Identifier Identifier `json:"identifier"`
// status (required, string): The status of this authorization.
// Possible values are "pending", "valid", "invalid", "deactivated",
// "expired", and "revoked". See Section 7.1.6.
Status string `json:"status"`
// expires (optional, string): The timestamp after which the server
// will consider this authorization invalid, encoded in the format
// specified in [RFC3339]. This field is REQUIRED for objects with
// "valid" in the "status" field.
Expires time.Time `json:"expires,omitempty"`
// challenges (required, array of objects): For pending authorizations,
// the challenges that the client can fulfill in order to prove
// possession of the identifier. For valid authorizations, the
// challenge that was validated. For invalid authorizations, the
// challenge that was attempted and failed. Each array entry is an
// object with parameters required to validate the challenge. A
// client should attempt to fulfill one of these challenges, and a
// server should consider any one of the challenges sufficient to
// make the authorization valid.
Challenges []Challenge `json:"challenges"`
// wildcard (optional, boolean): This field MUST be present and true
// for authorizations created as a result of a newOrder request
// containing a DNS identifier with a value that was a wildcard
// domain name. For other authorizations, it MUST be absent.
// Wildcard domain names are described in Section 7.1.3.
Wildcard bool `json:"wildcard,omitempty"`
// "The server allocates a new URL for this authorization and returns a
// 201 (Created) response with the authorization URL in the Location
// header field" §7.4.1
//
// We transfer the value from the header to this field for storage and
// recall purposes.
Location string `json:"-"`
}
// IdentifierValue returns the Identifier.Value field, adjusted
// according to the Wildcard field.
func (authz Authorization) IdentifierValue() string {
if authz.Wildcard {
return "*." + authz.Identifier.Value
}
return authz.Identifier.Value
}
// fillChallengeFields populates extra fields in the challenge structs so that
// challenges can be solved without needing a bunch of unnecessary extra state.
func (authz *Authorization) fillChallengeFields(account Account) error {
accountThumbprint, err := jwkThumbprint(account.PrivateKey.Public())
if err != nil {
return fmt.Errorf("computing account JWK thumbprint: %v", err)
}
for i := 0; i < len(authz.Challenges); i++ {
authz.Challenges[i].Identifier = authz.Identifier
if authz.Challenges[i].KeyAuthorization == "" {
authz.Challenges[i].KeyAuthorization = authz.Challenges[i].Token + "." + accountThumbprint
}
}
return nil
}
// NewAuthorization creates a new authorization for an identifier using
// the newAuthz endpoint of the directory, if available. This function
// creates authzs out of the regular order flow.
//
// "Note that because the identifier in a pre-authorization request is
// the exact identifier to be included in the authorization object, pre-
// authorization cannot be used to authorize issuance of certificates
// containing wildcard domain names." §7.4.1
func (c *Client) NewAuthorization(ctx context.Context, account Account, id Identifier) (Authorization, error) {
if err := c.provision(ctx); err != nil {
return Authorization{}, err
}
if c.dir.NewAuthz == "" {
return Authorization{}, fmt.Errorf("server does not support newAuthz endpoint")
}
var authz Authorization
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, c.dir.NewAuthz, id, &authz)
if err != nil {
return authz, err
}
authz.Location = resp.Header.Get("Location")
err = authz.fillChallengeFields(account)
if err != nil {
return authz, err
}
return authz, nil
}
// GetAuthorization fetches an authorization object from the server.
//
// "Authorization resources are created by the server in response to
// newOrder or newAuthz requests submitted by an account key holder;
// their URLs are provided to the client in the responses to these
// requests."
//
// "When a client receives an order from the server in reply to a
// newOrder request, it downloads the authorization resources by sending
// POST-as-GET requests to the indicated URLs. If the client initiates
// authorization using a request to the newAuthz resource, it will have
// already received the pending authorization object in the response to
// that request." §7.5
func (c *Client) GetAuthorization(ctx context.Context, account Account, authzURL string) (Authorization, error) {
if err := c.provision(ctx); err != nil {
return Authorization{}, err
}
var authz Authorization
_, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, authzURL, nil, &authz)
if err != nil {
return authz, err
}
authz.Location = authzURL
err = authz.fillChallengeFields(account)
if err != nil {
return authz, err
}
return authz, nil
}
// PollAuthorization polls the authorization resource endpoint until the authorization is
// considered "finalized" which means that it either succeeded, failed, or was abandoned.
// It blocks until that happens or until the configured timeout.
//
// "Usually, the validation process will take some time, so the client
// will need to poll the authorization resource to see when it is
// finalized."
//
// "For challenges where the client can tell when the server
// has validated the challenge (e.g., by seeing an HTTP or DNS request
// from the server), the client SHOULD NOT begin polling until it has
// seen the validation request from the server." §7.5.1
func (c *Client) PollAuthorization(ctx context.Context, account Account, authz Authorization) (Authorization, error) {
start, interval, maxDuration := time.Now(), c.pollInterval(), c.pollTimeout()
if authz.Status != "" {
if finalized, err := authzIsFinalized(authz); finalized {
return authz, err
}
}
for time.Since(start) < maxDuration {
select {
case <-time.After(interval):
case <-ctx.Done():
return authz, ctx.Err()
}
// get the latest authz object
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, authz.Location, nil, &authz)
if err != nil {
return authz, fmt.Errorf("checking authorization status: %w", err)
}
if finalized, err := authzIsFinalized(authz); finalized {
return authz, err
}
// "The server MUST provide information about its retry state to the
// client via the 'error' field in the challenge and the Retry-After
// HTTP header field in response to requests to the challenge resource."
// §8.2
interval, err = retryAfter(resp, interval)
if err != nil {
return authz, err
}
}
return authz, fmt.Errorf("authorization took too long")
}
// DeactivateAuthorization deactivates an authorization on the server, which is
// a good idea if the authorization is not going to be utilized by the client.
//
// "If a client wishes to relinquish its authorization to issue
// certificates for an identifier, then it may request that the server
// deactivate each authorization associated with it by sending POST
// requests with the static object {"status": "deactivated"} to each
// authorization URL." §7.5.2
func (c *Client) DeactivateAuthorization(ctx context.Context, account Account, authzURL string) (Authorization, error) {
if err := c.provision(ctx); err != nil {
return Authorization{}, err
}
if authzURL == "" {
return Authorization{}, fmt.Errorf("empty authz url")
}
deactivate := struct {
Status string `json:"status"`
}{Status: "deactivated"}
var authz Authorization
_, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, authzURL, deactivate, &authz)
authz.Location = authzURL
return authz, err
}
// authzIsFinalized returns true if the authorization is finished,
// whether successfully or not. If not, an error will be returned.
// Post-valid statuses that make an authz unusable are treated as
// errors.
func authzIsFinalized(authz Authorization) (bool, error) {
switch authz.Status {
case StatusPending:
// "Authorization objects are created in the 'pending' state." §7.1.6
return false, nil
case StatusValid:
// "If one of the challenges listed in the authorization transitions
// to the 'valid' state, then the authorization also changes to the
// 'valid' state." §7.1.6
return true, nil
case StatusInvalid:
// "If the client attempts to fulfill a challenge and fails, or if
// there is an error while the authorization is still pending, then
// the authorization transitions to the 'invalid' state." §7.1.6
var firstProblem Problem
for _, chal := range authz.Challenges {
if chal.Error != nil {
firstProblem = *chal.Error
break
}
}
firstProblem.Resource = authz
return true, fmt.Errorf("authorization failed: %w", firstProblem)
case StatusExpired, StatusDeactivated, StatusRevoked:
// Once the authorization is in the 'valid' state, it can expire
// ('expired'), be deactivated by the client ('deactivated', see
// Section 7.5.2), or revoked by the server ('revoked')." §7.1.6
return true, fmt.Errorf("authorization %s", authz.Status)
case "":
return false, fmt.Errorf("status unknown")
default:
return true, fmt.Errorf("server set unrecognized authorization status: %s", authz.Status)
}
}

165
vendor/github.com/mholt/acmez/acme/certificate.go generated vendored Normal file
View File

@@ -0,0 +1,165 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acme
import (
"bytes"
"context"
"crypto"
"crypto/x509"
"encoding/base64"
"fmt"
"net/http"
)
// Certificate represents a certificate chain, which we usually refer
// to as "a certificate" because in practice an end-entity certificate
// is seldom useful/practical without a chain.
type Certificate struct {
// The certificate resource URL as provisioned by
// the ACME server. Some ACME servers may split
// the chain into multiple URLs that are Linked
// together, in which case this URL represents the
// starting point.
URL string `json:"url"`
// The PEM-encoded certificate chain, end-entity first.
ChainPEM []byte `json:"-"`
}
// GetCertificateChain downloads all available certificate chains originating from
// the given certURL. This is to be done after an order is finalized.
//
// "To download the issued certificate, the client simply sends a POST-
// as-GET request to the certificate URL."
//
// "The server MAY provide one or more link relation header fields
// [RFC8288] with relation 'alternate'. Each such field SHOULD express
// an alternative certificate chain starting with the same end-entity
// certificate. This can be used to express paths to various trust
// anchors. Clients can fetch these alternates and use their own
// heuristics to decide which is optimal." §7.4.2
func (c *Client) GetCertificateChain(ctx context.Context, account Account, certURL string) ([]Certificate, error) {
if err := c.provision(ctx); err != nil {
return nil, err
}
var chains []Certificate
addChain := func(certURL string) (*http.Response, error) {
// can't pool this buffer; bytes escape scope
buf := new(bytes.Buffer)
// TODO: set the Accept header? ("application/pem-certificate-chain") See end of §7.4.2
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, certURL, nil, buf)
if err != nil {
return resp, err
}
contentType := parseMediaType(resp)
switch contentType {
case "application/pem-certificate-chain":
chains = append(chains, Certificate{
URL: certURL,
ChainPEM: buf.Bytes(),
})
default:
return resp, fmt.Errorf("unrecognized Content-Type from server: %s", contentType)
}
// "For formats that can only express a single certificate, the server SHOULD
// provide one or more "Link: rel="up"" header fields pointing to an
// issuer or issuers so that ACME clients can build a certificate chain
// as defined in TLS (see Section 4.4.2 of [RFC8446])." (end of §7.4.2)
allUp := extractLinks(resp, "up")
for _, upURL := range allUp {
upCerts, err := c.GetCertificateChain(ctx, account, upURL)
if err != nil {
return resp, fmt.Errorf("retrieving next certificate in chain: %s: %w", upURL, err)
}
for _, upCert := range upCerts {
chains[len(chains)-1].ChainPEM = append(chains[len(chains)-1].ChainPEM, upCert.ChainPEM...)
}
}
return resp, nil
}
// always add preferred/first certificate chain
resp, err := addChain(certURL)
if err != nil {
return chains, err
}
// "The server MAY provide one or more link relation header fields
// [RFC8288] with relation 'alternate'. Each such field SHOULD express
// an alternative certificate chain starting with the same end-entity
// certificate. This can be used to express paths to various trust
// anchors. Clients can fetch these alternates and use their own
// heuristics to decide which is optimal." §7.4.2
alternates := extractLinks(resp, "alternate")
for _, altURL := range alternates {
_, err = addChain(altURL)
if err != nil {
return nil, fmt.Errorf("retrieving alternate certificate chain at %s: %w", altURL, err)
}
}
return chains, nil
}
// RevokeCertificate revokes the given certificate. If the certificate key is not
// provided, then the account key is used instead. See §7.6.
func (c *Client) RevokeCertificate(ctx context.Context, account Account, cert *x509.Certificate, certKey crypto.Signer, reason int) error {
if err := c.provision(ctx); err != nil {
return err
}
body := struct {
Certificate string `json:"certificate"`
Reason int `json:"reason"`
}{
Certificate: base64.RawURLEncoding.EncodeToString(cert.Raw),
Reason: reason,
}
// "Revocation requests are different from other ACME requests in that
// they can be signed with either an account key pair or the key pair in
// the certificate." §7.6
kid := ""
if certKey == account.PrivateKey {
kid = account.Location
}
_, err := c.httpPostJWS(ctx, certKey, kid, c.dir.RevokeCert, body, nil)
return err
}
// Reasons for revoking a certificate, as defined
// by RFC 5280 §5.3.1.
// https://tools.ietf.org/html/rfc5280#section-5.3.1
const (
ReasonUnspecified = iota // 0
ReasonKeyCompromise // 1
ReasonCACompromise // 2
ReasonAffiliationChanged // 3
ReasonSuperseded // 4
ReasonCessationOfOperation // 5
ReasonCertificateHold // 6
_ // 7 (unused)
ReasonRemoveFromCRL // 8
ReasonPrivilegeWithdrawn // 9
ReasonAACompromise // 10
)

133
vendor/github.com/mholt/acmez/acme/challenge.go generated vendored Normal file
View File

@@ -0,0 +1,133 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acme
import (
"context"
"crypto/sha256"
"encoding/base64"
)
// Challenge holds information about an ACME challenge.
//
// "An ACME challenge object represents a server's offer to validate a
// client's possession of an identifier in a specific way. Unlike the
// other objects listed above, there is not a single standard structure
// for a challenge object. The contents of a challenge object depend on
// the validation method being used. The general structure of challenge
// objects and an initial set of validation methods are described in
// Section 8." §7.1.5
type Challenge struct {
// "Challenge objects all contain the following basic fields..." §8
// type (required, string): The type of challenge encoded in the
// object.
Type string `json:"type"`
// url (required, string): The URL to which a response can be posted.
URL string `json:"url"`
// status (required, string): The status of this challenge. Possible
// values are "pending", "processing", "valid", and "invalid" (see
// Section 7.1.6).
Status string `json:"status"`
// validated (optional, string): The time at which the server validated
// this challenge, encoded in the format specified in [RFC3339].
// This field is REQUIRED if the "status" field is "valid".
Validated string `json:"validated,omitempty"`
// error (optional, object): Error that occurred while the server was
// validating the challenge, if any, structured as a problem document
// [RFC7807]. Multiple errors can be indicated by using subproblems
// Section 6.7.1. A challenge object with an error MUST have status
// equal to "invalid".
Error *Problem `json:"error,omitempty"`
// "All additional fields are specified by the challenge type." §8
// (We also add our own for convenience.)
// "The token for a challenge is a string comprised entirely of
// characters in the URL-safe base64 alphabet." §8.1
//
// Used by the http-01, tls-alpn-01, and dns-01 challenges.
Token string `json:"token,omitempty"`
// A key authorization is a string that concatenates the token for the
// challenge with a key fingerprint, separated by a "." character (§8.1):
//
// keyAuthorization = token || '.' || base64url(Thumbprint(accountKey))
//
// This client package automatically assembles and sets this value for you.
KeyAuthorization string `json:"keyAuthorization,omitempty"`
// We attach the identifier that this challenge is associated with, which
// may be useful information for solving a challenge. It is not part of the
// structure as defined by the spec but is added by us to provide enough
// information to solve the DNS-01 challenge.
Identifier Identifier `json:"identifier,omitempty"`
}
// HTTP01ResourcePath returns the URI path for solving the http-01 challenge.
//
// "The path at which the resource is provisioned is comprised of the
// fixed prefix '/.well-known/acme-challenge/', followed by the 'token'
// value in the challenge." §8.3
func (c Challenge) HTTP01ResourcePath() string {
return "/.well-known/acme-challenge/" + c.Token
}
// DNS01TXTRecordName returns the name of the TXT record to create for
// solving the dns-01 challenge.
//
// "The client constructs the validation domain name by prepending the
// label '_acme-challenge' to the domain name being validated, then
// provisions a TXT record with the digest value under that name." §8.4
func (c Challenge) DNS01TXTRecordName() string {
return "_acme-challenge." + c.Identifier.Value
}
// DNS01KeyAuthorization encodes a key authorization value to be used
// in a TXT record for the _acme-challenge DNS record.
//
// "A client fulfills this challenge by constructing a key authorization
// from the 'token' value provided in the challenge and the client's
// account key. The client then computes the SHA-256 digest [FIPS180-4]
// of the key authorization.
//
// The record provisioned to the DNS contains the base64url encoding of
// this digest." §8.4
func (c Challenge) DNS01KeyAuthorization() string {
h := sha256.Sum256([]byte(c.KeyAuthorization))
return base64.RawURLEncoding.EncodeToString(h[:])
}
// InitiateChallenge "indicates to the server that it is ready for the challenge
// validation by sending an empty JSON body ('{}') carried in a POST request to
// the challenge URL (not the authorization URL)." §7.5.1
func (c *Client) InitiateChallenge(ctx context.Context, account Account, challenge Challenge) (Challenge, error) {
if err := c.provision(ctx); err != nil {
return Challenge{}, err
}
_, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, challenge.URL, struct{}{}, &challenge)
return challenge, err
}
// The standard or well-known ACME challenge types.
const (
ChallengeTypeHTTP01 = "http-01" // RFC 8555 §8.3
ChallengeTypeDNS01 = "dns-01" // RFC 8555 §8.4
ChallengeTypeTLSALPN01 = "tls-alpn-01" // RFC 8737 §3
)

240
vendor/github.com/mholt/acmez/acme/client.go generated vendored Normal file
View File

@@ -0,0 +1,240 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package acme fully implements the ACME protocol specification as
// described in RFC 8555: https://tools.ietf.org/html/rfc8555.
//
// It is designed to work smoothly in large-scale deployments with
// high resilience to errors and intermittent network or server issues,
// with retries built-in at every layer of the HTTP request stack.
//
// NOTE: This is a low-level API. Most users will want the mholt/acmez
// package which is more concerned with configuring challenges and
// implementing the order flow. However, using this package directly
// is recommended for advanced use cases having niche requirements.
// See the examples in the examples/plumbing folder for a tutorial.
package acme
import (
"context"
"fmt"
"net/http"
"sync"
"time"
"go.uber.org/zap"
)
// Client facilitates ACME client operations as defined by the spec.
//
// Because the client is synchronized for concurrent use, it should
// not be copied.
//
// Many errors that are returned by a Client are likely to be of type
// Problem as long as the ACME server returns a structured error
// response. This package wraps errors that may be of type Problem,
// so you can access the details using the conventional Go pattern:
//
// var problem Problem
// if errors.As(err, &problem) {
// log.Printf("Houston, we have a problem: %+v", problem)
// }
//
// All Problem errors originate from the ACME server.
type Client struct {
// The ACME server's directory endpoint.
Directory string
// Custom HTTP client.
HTTPClient *http.Client
// Augmentation of the User-Agent header. Please set
// this so that CAs can troubleshoot bugs more easily.
UserAgent string
// Delay between poll attempts. Only used if server
// does not supply a Retry-Afer header. Default: 250ms
PollInterval time.Duration
// Maximum duration for polling. Default: 5m
PollTimeout time.Duration
// An optional logger. Default: no logs
Logger *zap.Logger
mu sync.Mutex // protects all unexported fields
dir Directory
nonces *stack
}
// GetDirectory retrieves the directory configured at c.Directory. It is
// NOT necessary to call this to provision the client. It is only useful
// if you want to access a copy of the directory yourself.
func (c *Client) GetDirectory(ctx context.Context) (Directory, error) {
if err := c.provision(ctx); err != nil {
return Directory{}, err
}
return c.dir, nil
}
func (c *Client) provision(ctx context.Context) error {
c.mu.Lock()
defer c.mu.Unlock()
if c.nonces == nil {
c.nonces = new(stack)
}
err := c.provisionDirectory(ctx)
if err != nil {
return fmt.Errorf("provisioning client: %w", err)
}
return nil
}
func (c *Client) provisionDirectory(ctx context.Context) error {
// don't get directory again if we already have it;
// checking any one of the required fields will do
if c.dir.NewNonce != "" {
return nil
}
if c.Directory == "" {
return fmt.Errorf("missing directory URL")
}
// prefer cached version if it's recent enough
directoriesMu.Lock()
defer directoriesMu.Unlock()
if dir, ok := directories[c.Directory]; ok {
if time.Since(dir.retrieved) < 12*time.Hour {
c.dir = dir.Directory
return nil
}
}
_, err := c.httpReq(ctx, http.MethodGet, c.Directory, nil, &c.dir)
if err != nil {
return err
}
directories[c.Directory] = cachedDirectory{c.dir, time.Now()}
return nil
}
func (c *Client) nonce(ctx context.Context) (string, error) {
nonce := c.nonces.pop()
if nonce != "" {
return nonce, nil
}
if c.dir.NewNonce == "" {
return "", fmt.Errorf("directory missing newNonce endpoint")
}
resp, err := c.httpReq(ctx, http.MethodHead, c.dir.NewNonce, nil, nil)
if err != nil {
return "", fmt.Errorf("fetching new nonce from server: %w", err)
}
return resp.Header.Get(replayNonce), nil
}
func (c *Client) pollInterval() time.Duration {
if c.PollInterval == 0 {
return defaultPollInterval
}
return c.PollInterval
}
func (c *Client) pollTimeout() time.Duration {
if c.PollTimeout == 0 {
return defaultPollTimeout
}
return c.PollTimeout
}
// Directory acts as an index for the ACME server as
// specified in the spec: "In order to help clients
// configure themselves with the right URLs for each
// ACME operation, ACME servers provide a directory
// object." §7.1.1
type Directory struct {
NewNonce string `json:"newNonce"`
NewAccount string `json:"newAccount"`
NewOrder string `json:"newOrder"`
NewAuthz string `json:"newAuthz,omitempty"`
RevokeCert string `json:"revokeCert"`
KeyChange string `json:"keyChange"`
Meta *DirectoryMeta `json:"meta,omitempty"`
}
// DirectoryMeta is optional extra data that may be
// included in an ACME server directory. §7.1.1
type DirectoryMeta struct {
TermsOfService string `json:"termsOfService,omitempty"`
Website string `json:"website,omitempty"`
CAAIdentities []string `json:"caaIdentities,omitempty"`
ExternalAccountRequired bool `json:"externalAccountRequired,omitempty"`
}
// stack is a simple thread-safe stack.
type stack struct {
stack []string
stackMu sync.Mutex
}
func (s *stack) push(v string) {
if v == "" {
return
}
s.stackMu.Lock()
defer s.stackMu.Unlock()
if len(s.stack) >= 64 {
return
}
s.stack = append(s.stack, v)
}
func (s *stack) pop() string {
s.stackMu.Lock()
defer s.stackMu.Unlock()
n := len(s.stack)
if n == 0 {
return ""
}
v := s.stack[n-1]
s.stack = s.stack[:n-1]
return v
}
// Directories seldom (if ever) change in practice, and
// client structs are often ephemeral, so we can cache
// directories to speed things up a bit for the user.
// Keyed by directory URL.
var (
directories = make(map[string]cachedDirectory)
directoriesMu sync.Mutex
)
type cachedDirectory struct {
Directory
retrieved time.Time
}
// replayNonce is the header field that contains a new
// anti-replay nonce from the server.
const replayNonce = "Replay-Nonce"
const (
defaultPollInterval = 250 * time.Millisecond
defaultPollTimeout = 5 * time.Minute
)

393
vendor/github.com/mholt/acmez/acme/http.go generated vendored Normal file
View File

@@ -0,0 +1,393 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acme
import (
"bytes"
"context"
"crypto"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
"go.uber.org/zap"
)
// httpPostJWS performs robust HTTP requests by JWS-encoding the JSON of input.
// If output is specified, the response body is written into it: if the response
// Content-Type is JSON, it will be JSON-decoded into output (which must be a
// pointer); otherwise, if output is an io.Writer, the response body will be
// written to it uninterpreted. In all cases, the returned response value's
// body will have been drained and closed, so there is no need to close it again.
// It automatically retries in the case of network, I/O, or badNonce errors.
func (c *Client) httpPostJWS(ctx context.Context, privateKey crypto.Signer,
kid, endpoint string, input, output interface{}) (*http.Response, error) {
if err := c.provision(ctx); err != nil {
return nil, err
}
var resp *http.Response
var err error
// we can retry on internal server errors just in case it was a hiccup,
// but we probably don't need to retry so many times in that case
internalServerErrors, maxInternalServerErrors := 0, 3
// set a hard cap on the number of retries for any other reason
const maxAttempts = 10
var attempts int
for attempts = 1; attempts <= maxAttempts; attempts++ {
if attempts > 1 {
select {
case <-time.After(250 * time.Millisecond):
case <-ctx.Done():
return nil, ctx.Err()
}
}
var nonce string // avoid shadowing err
nonce, err = c.nonce(ctx)
if err != nil {
return nil, err
}
var encodedPayload []byte // avoid shadowing err
encodedPayload, err = jwsEncodeJSON(input, privateKey, keyID(kid), nonce, endpoint)
if err != nil {
return nil, fmt.Errorf("encoding payload: %v", err)
}
resp, err = c.httpReq(ctx, http.MethodPost, endpoint, encodedPayload, output)
if err == nil {
return resp, nil
}
// "When a server rejects a request because its nonce value was
// unacceptable (or not present), it MUST provide HTTP status code 400
// (Bad Request), and indicate the ACME error type
// 'urn:ietf:params:acme:error:badNonce'. An error response with the
// 'badNonce' error type MUST include a Replay-Nonce header field with a
// fresh nonce that the server will accept in a retry of the original
// query (and possibly in other requests, according to the server's
// nonce scoping policy). On receiving such a response, a client SHOULD
// retry the request using the new nonce." §6.5
var problem Problem
if errors.As(err, &problem) {
if problem.Type == ProblemTypeBadNonce {
if c.Logger != nil {
c.Logger.Debug("server rejected our nonce; retrying",
zap.String("detail", problem.Detail),
zap.Error(err))
}
continue
}
}
// internal server errors *could* just be a hiccup and it may be worth
// trying again, but not nearly so many times as for other reasons
if resp != nil && resp.StatusCode >= 500 {
internalServerErrors++
if internalServerErrors < maxInternalServerErrors {
continue
}
}
// for any other error, there's not much we can do automatically
break
}
return resp, fmt.Errorf("attempt %d: %s: %w", attempts, endpoint, err)
}
// httpReq robustly performs an HTTP request using the given method to the given endpoint, honoring
// the given context's cancellation. The joseJSONPayload is optional; if not nil, it is expected to
// be a JOSE+JSON encoding. The output is also optional; if not nil, the response body will be read
// into output. If the response Content-Type is JSON, it will be JSON-decoded into output, which
// must be a pointer type. If the response is any other Content-Type and if output is a io.Writer,
// it will be written (without interpretation or decoding) to output. In all cases, the returned
// response value will have the body drained and closed, so there is no need to close it again.
//
// If there are any network or I/O errors, the request will be retried as safely and resiliently as
// possible.
func (c *Client) httpReq(ctx context.Context, method, endpoint string, joseJSONPayload []byte, output interface{}) (*http.Response, error) {
// even if the caller doesn't specify an output, we still use a
// buffer to store possible error response (we reset it later)
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
var resp *http.Response
var err error
// potentially retry the request if there's network, I/O, or server internal errors
const maxAttempts = 3
for attempt := 0; attempt < maxAttempts; attempt++ {
if attempt > 0 {
// traffic calming ahead
select {
case <-time.After(250 * time.Millisecond):
case <-ctx.Done():
return nil, ctx.Err()
}
}
var body io.Reader
if joseJSONPayload != nil {
body = bytes.NewReader(joseJSONPayload)
}
var req *http.Request
req, err = http.NewRequestWithContext(ctx, method, endpoint, body)
if err != nil {
return nil, fmt.Errorf("creating request: %w", err)
}
if len(joseJSONPayload) > 0 {
req.Header.Set("Content-Type", "application/jose+json")
}
// on first attempt, we need to reset buf since it
// came from the pool; after first attempt, we should
// still reset it because we might be retrying after
// a partial download
buf.Reset()
var retry bool
resp, retry, err = c.doHTTPRequest(req, buf)
if err != nil {
if retry {
if c.Logger != nil {
c.Logger.Warn("HTTP request failed; retrying",
zap.String("url", req.URL.String()),
zap.Error(err))
}
continue
}
break
}
// check for HTTP errors
switch {
case resp.StatusCode >= 200 && resp.StatusCode < 300: // OK
case resp.StatusCode >= 400 && resp.StatusCode < 600: // error
if parseMediaType(resp) == "application/problem+json" {
// "When the server responds with an error status, it SHOULD provide
// additional information using a problem document [RFC7807]." (§6.7)
var problem Problem
err = json.Unmarshal(buf.Bytes(), &problem)
if err != nil {
return resp, fmt.Errorf("HTTP %d: JSON-decoding problem details: %w (raw='%s')",
resp.StatusCode, err, buf.String())
}
if resp.StatusCode >= 500 && joseJSONPayload == nil {
// a 5xx status is probably safe to retry on even after a
// request that had no I/O errors; it could be that the
// server just had a hiccup... so try again, but only if
// there is no request body, because we can't replay a
// request that has an anti-replay nonce, obviously
err = problem
continue
}
return resp, problem
}
return resp, fmt.Errorf("HTTP %d: %s", resp.StatusCode, buf.String())
default: // what even is this
return resp, fmt.Errorf("unexpected status code: HTTP %d", resp.StatusCode)
}
// do not retry if we got this far (success)
break
}
if err != nil {
return resp, err
}
// if expecting a body, finally decode it
if output != nil {
contentType := parseMediaType(resp)
switch contentType {
case "application/json":
// unmarshal JSON
err = json.Unmarshal(buf.Bytes(), output)
if err != nil {
return resp, fmt.Errorf("JSON-decoding response body: %w", err)
}
default:
// don't interpret anything else here; just hope
// it's a Writer and copy the bytes
w, ok := output.(io.Writer)
if !ok {
return resp, fmt.Errorf("response Content-Type is %s but target container is not io.Writer: %T", contentType, output)
}
_, err = io.Copy(w, buf)
if err != nil {
return resp, err
}
}
}
return resp, nil
}
// doHTTPRequest performs an HTTP request at most one time. It returns the response
// (with drained and closed body), having drained any request body into buf. If
// retry == true is returned, then the request should be safe to retry in the case
// of an error. However, in some cases a retry may be recommended even if part of
// the response body has been read and written into buf. Thus, the buffer may have
// been partially written to and should be reset before being reused.
//
// This method remembers any nonce returned by the server.
func (c *Client) doHTTPRequest(req *http.Request, buf *bytes.Buffer) (resp *http.Response, retry bool, err error) {
req.Header.Set("User-Agent", c.userAgent())
resp, err = c.httpClient().Do(req)
if err != nil {
return resp, true, fmt.Errorf("performing request: %w", err)
}
defer resp.Body.Close()
if c.Logger != nil {
c.Logger.Debug("http request",
zap.String("method", req.Method),
zap.String("url", req.URL.String()),
zap.Reflect("headers", req.Header),
zap.Reflect("response_headers", resp.Header),
zap.Int("status_code", resp.StatusCode))
}
// "The server MUST include a Replay-Nonce header field
// in every successful response to a POST request and
// SHOULD provide it in error responses as well." §6.5
//
// "Before sending a POST request to the server, an ACME
// client needs to have a fresh anti-replay nonce to put
// in the 'nonce' header of the JWS. In most cases, the
// client will have gotten a nonce from a previous
// request." §7.2
//
// So basically, we need to remember the nonces we get
// and use them at the next opportunity.
c.nonces.push(resp.Header.Get(replayNonce))
// drain the response body, even if we aren't keeping it
// (this allows us to reuse the connection and also read
// any error information)
_, err = io.Copy(buf, resp.Body)
if err != nil {
// this is likely a network or I/O error, but is it worth retrying?
// technically the request has already completed, it was just our
// download of the response that failed; so we probably should not
// retry if the request succeeded... however, if there was an HTTP
// error, it likely didn't count against any server-enforced rate
// limits, and we DO want to know the error information, so it should
// be safe to retry the request in those cases AS LONG AS there is
// no request body, which in the context of ACME likely includes an
// anti-replay nonce, which obviously we can't reuse
retry = resp.StatusCode >= 400 && req.Body == nil
return resp, retry, fmt.Errorf("reading response body: %w", err)
}
return resp, false, nil
}
func (c *Client) httpClient() *http.Client {
if c.HTTPClient == nil {
return http.DefaultClient
}
return c.HTTPClient
}
func (c *Client) userAgent() string {
ua := fmt.Sprintf("acmez (%s; %s)", runtime.GOOS, runtime.GOARCH)
if c.UserAgent != "" {
ua = c.UserAgent + " " + ua
}
return ua
}
// extractLinks extracts the URL from the Link header with the
// designated relation rel. It may return more than value
// if there are multiple matching Link values.
//
// Originally by Isaac: https://github.com/eggsampler/acme
// and has been modified to support multiple matching Links.
func extractLinks(resp *http.Response, rel string) []string {
if resp == nil {
return nil
}
var links []string
for _, l := range resp.Header["Link"] {
matches := linkRegex.FindAllStringSubmatch(l, -1)
for _, m := range matches {
if len(m) != 3 {
continue
}
if m[2] == rel {
links = append(links, m[1])
}
}
}
return links
}
// parseMediaType returns only the media type from the
// Content-Type header of resp.
func parseMediaType(resp *http.Response) string {
if resp == nil {
return ""
}
ct := resp.Header.Get("Content-Type")
sep := strings.Index(ct, ";")
if sep < 0 {
return ct
}
return strings.TrimSpace(ct[:sep])
}
// retryAfter returns a duration from the response's Retry-After
// header field, if it exists. It can return an error if the
// header contains an invalid value. If there is no error but
// there is no Retry-After header provided, then the fallback
// duration is returned instead.
func retryAfter(resp *http.Response, fallback time.Duration) (time.Duration, error) {
if resp == nil {
return fallback, nil
}
raSeconds := resp.Header.Get("Retry-After")
if raSeconds == "" {
return fallback, nil
}
ra, err := strconv.Atoi(raSeconds)
if err != nil || ra < 0 {
return 0, fmt.Errorf("response had invalid Retry-After header: %s", raSeconds)
}
return time.Duration(ra) * time.Second, nil
}
var bufPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
var linkRegex = regexp.MustCompile(`<(.+?)>;\s*rel="(.+?)"`)

263
vendor/github.com/mholt/acmez/acme/jws.go generated vendored Normal file
View File

@@ -0,0 +1,263 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// --- ORIGINAL LICENSE ---
//
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the THIRD-PARTY file.
//
// (This file has been modified from its original contents.)
// (And it has dragons. Don't wake the dragons.)
package acme
import (
"crypto"
"crypto/ecdsa"
"crypto/hmac"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
_ "crypto/sha512" // need for EC keys
"encoding/base64"
"encoding/json"
"fmt"
"math/big"
)
var errUnsupportedKey = fmt.Errorf("unknown key type; only RSA and ECDSA are supported")
// keyID is the account identity provided by a CA during registration.
type keyID string
// noKeyID indicates that jwsEncodeJSON should compute and use JWK instead of a KID.
// See jwsEncodeJSON for details.
const noKeyID = keyID("")
// // noPayload indicates jwsEncodeJSON will encode zero-length octet string
// // in a JWS request. This is called POST-as-GET in RFC 8555 and is used to make
// // authenticated GET requests via POSTing with an empty payload.
// // See https://tools.ietf.org/html/rfc8555#section-6.3 for more details.
// const noPayload = ""
// jwsEncodeEAB creates a JWS payload for External Account Binding according to RFC 8555 §7.3.4.
func jwsEncodeEAB(accountKey crypto.PublicKey, hmacKey []byte, kid keyID, url string) ([]byte, error) {
// §7.3.4: "The 'alg' field MUST indicate a MAC-based algorithm"
alg, sha := "HS256", crypto.SHA256
// §7.3.4: "The 'nonce' field MUST NOT be present"
phead, err := jwsHead(alg, "", url, kid, nil)
if err != nil {
return nil, err
}
encodedKey, err := jwkEncode(accountKey)
if err != nil {
return nil, err
}
payload := base64.RawURLEncoding.EncodeToString([]byte(encodedKey))
payloadToSign := []byte(phead + "." + payload)
h := hmac.New(sha256.New, hmacKey)
h.Write(payloadToSign)
sig := h.Sum(nil)
return jwsFinal(sha, sig, phead, payload)
}
// jwsEncodeJSON signs claimset using provided key and a nonce.
// The result is serialized in JSON format containing either kid or jwk
// fields based on the provided keyID value.
//
// If kid is non-empty, its quoted value is inserted in the protected head
// as "kid" field value. Otherwise, JWK is computed using jwkEncode and inserted
// as "jwk" field value. The "jwk" and "kid" fields are mutually exclusive.
//
// See https://tools.ietf.org/html/rfc7515#section-7.
//
// If nonce is empty, it will not be encoded into the header.
func jwsEncodeJSON(claimset interface{}, key crypto.Signer, kid keyID, nonce, url string) ([]byte, error) {
alg, sha := jwsHasher(key.Public())
if alg == "" || !sha.Available() {
return nil, errUnsupportedKey
}
phead, err := jwsHead(alg, nonce, url, kid, key)
if err != nil {
return nil, err
}
var payload string
if claimset != nil {
cs, err := json.Marshal(claimset)
if err != nil {
return nil, err
}
payload = base64.RawURLEncoding.EncodeToString(cs)
}
payloadToSign := []byte(phead + "." + payload)
hash := sha.New()
_, _ = hash.Write(payloadToSign)
digest := hash.Sum(nil)
sig, err := jwsSign(key, sha, digest)
if err != nil {
return nil, err
}
return jwsFinal(sha, sig, phead, payload)
}
// jwkEncode encodes public part of an RSA or ECDSA key into a JWK.
// The result is also suitable for creating a JWK thumbprint.
// https://tools.ietf.org/html/rfc7517
func jwkEncode(pub crypto.PublicKey) (string, error) {
switch pub := pub.(type) {
case *rsa.PublicKey:
// https://tools.ietf.org/html/rfc7518#section-6.3.1
n := pub.N
e := big.NewInt(int64(pub.E))
// Field order is important.
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`,
base64.RawURLEncoding.EncodeToString(e.Bytes()),
base64.RawURLEncoding.EncodeToString(n.Bytes()),
), nil
case *ecdsa.PublicKey:
// https://tools.ietf.org/html/rfc7518#section-6.2.1
p := pub.Curve.Params()
n := p.BitSize / 8
if p.BitSize%8 != 0 {
n++
}
x := pub.X.Bytes()
if n > len(x) {
x = append(make([]byte, n-len(x)), x...)
}
y := pub.Y.Bytes()
if n > len(y) {
y = append(make([]byte, n-len(y)), y...)
}
// Field order is important.
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`,
p.Name,
base64.RawURLEncoding.EncodeToString(x),
base64.RawURLEncoding.EncodeToString(y),
), nil
}
return "", errUnsupportedKey
}
// jwsHead constructs the protected JWS header for the given fields.
// Since jwk and kid are mutually-exclusive, the jwk will be encoded
// only if kid is empty. If nonce is empty, it will not be encoded.
func jwsHead(alg, nonce, url string, kid keyID, key crypto.Signer) (string, error) {
phead := fmt.Sprintf(`{"alg":%q`, alg)
if kid == noKeyID {
jwk, err := jwkEncode(key.Public())
if err != nil {
return "", err
}
phead += fmt.Sprintf(`,"jwk":%s`, jwk)
} else {
phead += fmt.Sprintf(`,"kid":%q`, kid)
}
if nonce != "" {
phead += fmt.Sprintf(`,"nonce":%q`, nonce)
}
phead += fmt.Sprintf(`,"url":%q}`, url)
phead = base64.RawURLEncoding.EncodeToString([]byte(phead))
return phead, nil
}
// jwsFinal constructs the final JWS object.
func jwsFinal(sha crypto.Hash, sig []byte, phead, payload string) ([]byte, error) {
enc := struct {
Protected string `json:"protected"`
Payload string `json:"payload"`
Sig string `json:"signature"`
}{
Protected: phead,
Payload: payload,
Sig: base64.RawURLEncoding.EncodeToString(sig),
}
result, err := json.Marshal(&enc)
if err != nil {
return nil, err
}
return result, nil
}
// jwsSign signs the digest using the given key.
// The hash is unused for ECDSA keys.
//
// Note: non-stdlib crypto.Signer implementations are expected to return
// the signature in the format as specified in RFC7518.
// See https://tools.ietf.org/html/rfc7518 for more details.
func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) {
if key, ok := key.(*ecdsa.PrivateKey); ok {
// The key.Sign method of ecdsa returns ASN1-encoded signature.
// So, we use the package Sign function instead
// to get R and S values directly and format the result accordingly.
r, s, err := ecdsa.Sign(rand.Reader, key, digest)
if err != nil {
return nil, err
}
rb, sb := r.Bytes(), s.Bytes()
size := key.Params().BitSize / 8
if size%8 > 0 {
size++
}
sig := make([]byte, size*2)
copy(sig[size-len(rb):], rb)
copy(sig[size*2-len(sb):], sb)
return sig, nil
}
return key.Sign(rand.Reader, digest, hash)
}
// jwsHasher indicates suitable JWS algorithm name and a hash function
// to use for signing a digest with the provided key.
// It returns ("", 0) if the key is not supported.
func jwsHasher(pub crypto.PublicKey) (string, crypto.Hash) {
switch pub := pub.(type) {
case *rsa.PublicKey:
return "RS256", crypto.SHA256
case *ecdsa.PublicKey:
switch pub.Params().Name {
case "P-256":
return "ES256", crypto.SHA256
case "P-384":
return "ES384", crypto.SHA384
case "P-521":
return "ES512", crypto.SHA512
}
}
return "", 0
}
// jwkThumbprint creates a JWK thumbprint out of pub
// as specified in https://tools.ietf.org/html/rfc7638.
func jwkThumbprint(pub crypto.PublicKey) (string, error) {
jwk, err := jwkEncode(pub)
if err != nil {
return "", err
}
b := sha256.Sum256([]byte(jwk))
return base64.RawURLEncoding.EncodeToString(b[:]), nil
}

256
vendor/github.com/mholt/acmez/acme/order.go generated vendored Normal file
View File

@@ -0,0 +1,256 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acme
import (
"context"
"encoding/base64"
"errors"
"fmt"
"time"
)
// Order is an object that "represents a client's request for a certificate
// and is used to track the progress of that order through to issuance.
// Thus, the object contains information about the requested
// certificate, the authorizations that the server requires the client
// to complete, and any certificates that have resulted from this order."
// §7.1.3
type Order struct {
// status (required, string): The status of this order. Possible
// values are "pending", "ready", "processing", "valid", and
// "invalid". See Section 7.1.6.
Status string `json:"status"`
// expires (optional, string): The timestamp after which the server
// will consider this order invalid, encoded in the format specified
// in [RFC3339]. This field is REQUIRED for objects with "pending"
// or "valid" in the status field.
Expires time.Time `json:"expires,omitempty"`
// identifiers (required, array of object): An array of identifier
// objects that the order pertains to.
Identifiers []Identifier `json:"identifiers"`
// notBefore (optional, string): The requested value of the notBefore
// field in the certificate, in the date format defined in [RFC3339].
NotBefore *time.Time `json:"notBefore,omitempty"`
// notAfter (optional, string): The requested value of the notAfter
// field in the certificate, in the date format defined in [RFC3339].
NotAfter *time.Time `json:"notAfter,omitempty"`
// error (optional, object): The error that occurred while processing
// the order, if any. This field is structured as a problem document
// [RFC7807].
Error *Problem `json:"error,omitempty"`
// authorizations (required, array of string): For pending orders, the
// authorizations that the client needs to complete before the
// requested certificate can be issued (see Section 7.5), including
// unexpired authorizations that the client has completed in the past
// for identifiers specified in the order. The authorizations
// required are dictated by server policy; there may not be a 1:1
// relationship between the order identifiers and the authorizations
// required. For final orders (in the "valid" or "invalid" state),
// the authorizations that were completed. Each entry is a URL from
// which an authorization can be fetched with a POST-as-GET request.
Authorizations []string `json:"authorizations"`
// finalize (required, string): A URL that a CSR must be POSTed to once
// all of the order's authorizations are satisfied to finalize the
// order. The result of a successful finalization will be the
// population of the certificate URL for the order.
Finalize string `json:"finalize"`
// certificate (optional, string): A URL for the certificate that has
// been issued in response to this order.
Certificate string `json:"certificate"`
// Similar to new-account, the server returns a 201 response with
// the URL to the order object in the Location header.
//
// We transfer the value from the header to this field for
// storage and recall purposes.
Location string `json:"-"`
}
// Identifier is used in order and authorization (authz) objects.
type Identifier struct {
// type (required, string): The type of identifier. This document
// defines the "dns" identifier type. See the registry defined in
// Section 9.7.7 for any others.
Type string `json:"type"`
// value (required, string): The identifier itself.
Value string `json:"value"`
}
// NewOrder creates a new order with the server.
//
// "The client begins the certificate issuance process by sending a POST
// request to the server's newOrder resource." §7.4
func (c *Client) NewOrder(ctx context.Context, account Account, order Order) (Order, error) {
if err := c.provision(ctx); err != nil {
return order, err
}
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, c.dir.NewOrder, order, &order)
if err != nil {
return order, err
}
order.Location = resp.Header.Get("Location")
return order, nil
}
// GetOrder retrieves an order from the server. The Order's Location field must be populated.
func (c *Client) GetOrder(ctx context.Context, account Account, order Order) (Order, error) {
if err := c.provision(ctx); err != nil {
return order, err
}
_, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, order.Location, nil, &order)
return order, err
}
// FinalizeOrder finalizes the order with the server and polls under the server has
// updated the order status. The CSR must be in ASN.1 DER-encoded format. If this
// succeeds, the certificate is ready to download once this returns.
//
// "Once the client believes it has fulfilled the server's requirements,
// it should send a POST request to the order resource's finalize URL." §7.4
func (c *Client) FinalizeOrder(ctx context.Context, account Account, order Order, csrASN1DER []byte) (Order, error) {
if err := c.provision(ctx); err != nil {
return order, err
}
body := struct {
// csr (required, string): A CSR encoding the parameters for the
// certificate being requested [RFC2986]. The CSR is sent in the
// base64url-encoded version of the DER format. (Note: Because this
// field uses base64url, and does not include headers, it is
// different from PEM.) §7.4
CSR string `json:"csr"`
}{
CSR: base64.RawURLEncoding.EncodeToString(csrASN1DER),
}
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, order.Finalize, body, &order)
if err != nil {
// "A request to finalize an order will result in error if the order is
// not in the 'ready' state. In such cases, the server MUST return a
// 403 (Forbidden) error with a problem document of type
// 'orderNotReady'. The client should then send a POST-as-GET request
// to the order resource to obtain its current state. The status of the
// order will indicate what action the client should take (see below)."
// §7.4
var problem Problem
if errors.As(err, &problem) {
if problem.Type != ProblemTypeOrderNotReady {
return order, err
}
} else {
return order, err
}
}
// unlike with accounts and authorizations, the spec isn't clear on whether
// the server MUST set this on finalizing the order, but their example shows a
// Location header, so I guess if it's set in the response, we should keep it
if newLocation := resp.Header.Get("Location"); newLocation != "" {
order.Location = newLocation
}
if finished, err := orderIsFinished(order); finished {
return order, err
}
// TODO: "The elements of the "authorizations" and "identifiers" arrays are
// immutable once set. If a client observes a change
// in the contents of either array, then it SHOULD consider the order
// invalid."
maxDuration := c.pollTimeout()
start := time.Now()
for time.Since(start) < maxDuration {
// querying an order is expensive on the server-side, so we
// shouldn't do it too frequently; honor server preference
interval, err := retryAfter(resp, c.pollInterval())
if err != nil {
return order, err
}
select {
case <-time.After(interval):
case <-ctx.Done():
return order, ctx.Err()
}
resp, err = c.httpPostJWS(ctx, account.PrivateKey, account.Location, order.Location, nil, &order)
if err != nil {
return order, fmt.Errorf("polling order status: %w", err)
}
// (same reasoning as above)
if newLocation := resp.Header.Get("Location"); newLocation != "" {
order.Location = newLocation
}
if finished, err := orderIsFinished(order); finished {
return order, err
}
}
return order, fmt.Errorf("order took too long")
}
// orderIsFinished returns true if the order processing is complete,
// regardless of success or failure. If this function returns true,
// polling an order status should stop. If there is an error with the
// order, an error will be returned. This function should be called
// only after a request to finalize an order. See §7.4.
func orderIsFinished(order Order) (bool, error) {
switch order.Status {
case StatusInvalid:
// "invalid": The certificate will not be issued. Consider this
// order process abandoned.
return true, fmt.Errorf("final order is invalid: %w", order.Error)
case StatusPending:
// "pending": The server does not believe that the client has
// fulfilled the requirements. Check the "authorizations" array for
// entries that are still pending.
return true, fmt.Errorf("order pending, authorizations remaining: %v", order.Authorizations)
case StatusReady:
// "ready": The server agrees that the requirements have been
// fulfilled, and is awaiting finalization. Submit a finalization
// request.
// (we did just submit a finalization request, so this is an error)
return true, fmt.Errorf("unexpected state: %s - order already finalized", order.Status)
case StatusProcessing:
// "processing": The certificate is being issued. Send a GET request
// after the time given in the "Retry-After" header field of the
// response, if any.
return false, nil
case StatusValid:
// "valid": The server has issued the certificate and provisioned its
// URL to the "certificate" field of the order. Download the
// certificate.
return true, nil
default:
return true, fmt.Errorf("unrecognized order status: %s", order.Status)
}
}

174
vendor/github.com/mholt/acmez/acme/problem.go generated vendored Normal file
View File

@@ -0,0 +1,174 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acme
import (
"fmt"
"go.uber.org/zap/zapcore"
)
// Problem carries the details of an error from HTTP APIs as
// defined in RFC 7807: https://tools.ietf.org/html/rfc7807
// and as extended by RFC 8555 §6.7:
// https://tools.ietf.org/html/rfc8555#section-6.7
type Problem struct {
// "type" (string) - A URI reference [RFC3986] that identifies the
// problem type. This specification encourages that, when
// dereferenced, it provide human-readable documentation for the
// problem type (e.g., using HTML [W3C.REC-html5-20141028]). When
// this member is not present, its value is assumed to be
// "about:blank". §3.1
Type string `json:"type"`
// "title" (string) - A short, human-readable summary of the problem
// type. It SHOULD NOT change from occurrence to occurrence of the
// problem, except for purposes of localization (e.g., using
// proactive content negotiation; see [RFC7231], Section 3.4). §3.1
Title string `json:"title,omitempty"`
// "status" (number) - The HTTP status code ([RFC7231], Section 6)
// generated by the origin server for this occurrence of the problem.
// §3.1
Status int `json:"status,omitempty"`
// "detail" (string) - A human-readable explanation specific to this
// occurrence of the problem. §3.1
//
// RFC 8555 §6.7: "Clients SHOULD display the 'detail' field of all
// errors."
Detail string `json:"detail,omitempty"`
// "instance" (string) - A URI reference that identifies the specific
// occurrence of the problem. It may or may not yield further
// information if dereferenced. §3.1
Instance string `json:"instance,omitempty"`
// "Sometimes a CA may need to return multiple errors in response to a
// request. Additionally, the CA may need to attribute errors to
// specific identifiers. For instance, a newOrder request may contain
// multiple identifiers for which the CA cannot issue certificates. In
// this situation, an ACME problem document MAY contain the
// 'subproblems' field, containing a JSON array of problem documents."
// RFC 8555 §6.7.1
Subproblems []Subproblem `json:"subproblems,omitempty"`
// For convenience, we've added this field to associate with a value
// that is related to or caused the problem. It is not part of the
// spec, but, if a challenge fails for example, we can associate the
// error with the problematic authz object by setting this field.
// Challenge failures will have this set to an Authorization type.
Resource interface{} `json:"-"`
}
func (p Problem) Error() string {
// TODO: 7.3.3: Handle changes to Terms of Service (notice it uses the Instance field and Link header)
// RFC 8555 §6.7: "Clients SHOULD display the 'detail' field of all errors."
s := fmt.Sprintf("HTTP %d %s - %s", p.Status, p.Type, p.Detail)
if len(p.Subproblems) > 0 {
for _, v := range p.Subproblems {
s += fmt.Sprintf(", problem %q: %s", v.Type, v.Detail)
if v.Identifier.Type != "" || v.Identifier.Value != "" {
s += fmt.Sprintf(" (%s_identifier=%s)", v.Identifier.Type, v.Identifier.Value)
}
}
}
if p.Instance != "" {
s += ", url: " + p.Instance
}
return s
}
// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
// This allows problems to be serialized by the zap logger.
func (p Problem) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddString("type", p.Type)
enc.AddString("title", p.Title)
enc.AddString("detail", p.Detail)
enc.AddString("instance", p.Instance)
enc.AddArray("subproblems", loggableSubproblems(p.Subproblems))
return nil
}
// Subproblem describes a more specific error in a problem according to
// RFC 8555 §6.7.1: "An ACME problem document MAY contain the
// 'subproblems' field, containing a JSON array of problem documents,
// each of which MAY contain an 'identifier' field."
type Subproblem struct {
Problem
// "If present, the 'identifier' field MUST contain an ACME
// identifier (Section 9.7.7)." §6.7.1
Identifier Identifier `json:"identifier,omitempty"`
}
// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
// This allows subproblems to be serialized by the zap logger.
func (sp Subproblem) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddString("identifier_type", sp.Identifier.Type)
enc.AddString("identifier", sp.Identifier.Value)
enc.AddObject("subproblem", sp.Problem)
return nil
}
type loggableSubproblems []Subproblem
// MarshalLogArray satisfies the zapcore.ArrayMarshaler interface.
// This allows a list of subproblems to be serialized by the zap logger.
func (ls loggableSubproblems) MarshalLogArray(enc zapcore.ArrayEncoder) error {
for _, sp := range ls {
enc.AppendObject(sp)
}
return nil
}
// Standard token values for the "type" field of problems, as defined
// in RFC 8555 §6.7: https://tools.ietf.org/html/rfc8555#section-6.7
//
// "To facilitate automatic response to errors, this document defines the
// following standard tokens for use in the 'type' field (within the
// ACME URN namespace 'urn:ietf:params:acme:error:') ... This list is not
// exhaustive. The server MAY return errors whose 'type' field is set to
// a URI other than those defined above."
const (
// The ACME error URN prefix.
ProblemTypeNamespace = "urn:ietf:params:acme:error:"
ProblemTypeAccountDoesNotExist = ProblemTypeNamespace + "accountDoesNotExist"
ProblemTypeAlreadyRevoked = ProblemTypeNamespace + "alreadyRevoked"
ProblemTypeBadCSR = ProblemTypeNamespace + "badCSR"
ProblemTypeBadNonce = ProblemTypeNamespace + "badNonce"
ProblemTypeBadPublicKey = ProblemTypeNamespace + "badPublicKey"
ProblemTypeBadRevocationReason = ProblemTypeNamespace + "badRevocationReason"
ProblemTypeBadSignatureAlgorithm = ProblemTypeNamespace + "badSignatureAlgorithm"
ProblemTypeCAA = ProblemTypeNamespace + "caa"
ProblemTypeCompound = ProblemTypeNamespace + "compound"
ProblemTypeConnection = ProblemTypeNamespace + "connection"
ProblemTypeDNS = ProblemTypeNamespace + "dns"
ProblemTypeExternalAccountRequired = ProblemTypeNamespace + "externalAccountRequired"
ProblemTypeIncorrectResponse = ProblemTypeNamespace + "incorrectResponse"
ProblemTypeInvalidContact = ProblemTypeNamespace + "invalidContact"
ProblemTypeMalformed = ProblemTypeNamespace + "malformed"
ProblemTypeOrderNotReady = ProblemTypeNamespace + "orderNotReady"
ProblemTypeRateLimited = ProblemTypeNamespace + "rateLimited"
ProblemTypeRejectedIdentifier = ProblemTypeNamespace + "rejectedIdentifier"
ProblemTypeServerInternal = ProblemTypeNamespace + "serverInternal"
ProblemTypeTLS = ProblemTypeNamespace + "tls"
ProblemTypeUnauthorized = ProblemTypeNamespace + "unauthorized"
ProblemTypeUnsupportedContact = ProblemTypeNamespace + "unsupportedContact"
ProblemTypeUnsupportedIdentifier = ProblemTypeNamespace + "unsupportedIdentifier"
ProblemTypeUserActionRequired = ProblemTypeNamespace + "userActionRequired"
)

697
vendor/github.com/mholt/acmez/client.go generated vendored Normal file
View File

@@ -0,0 +1,697 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package acmez implements the higher-level flow of the ACME specification,
// RFC 8555: https://tools.ietf.org/html/rfc8555, specifically the sequence
// in Section 7.1 (page 21).
//
// It makes it easy to obtain certificates with various challenge types
// using pluggable challenge solvers, and provides some handy utilities for
// implementing solvers and using the certificates. It DOES NOT manage
// certificates, it only gets them from the ACME server.
//
// NOTE: This package's primary purpose is to get a certificate, not manage it.
// Most users actually want to *manage* certificates over the lifetime of
// long-running programs such as HTTPS or TLS servers, and should use CertMagic
// instead: https://github.com/caddyserver/certmagic.
package acmez
import (
"context"
"crypto"
"crypto/rand"
"crypto/x509"
"errors"
"fmt"
weakrand "math/rand"
"net"
"net/url"
"sort"
"strings"
"sync"
"time"
"github.com/mholt/acmez/acme"
"go.uber.org/zap"
"golang.org/x/net/idna"
)
func init() {
weakrand.Seed(time.Now().UnixNano())
}
// Client is a high-level API for ACME operations. It wraps
// a lower-level ACME client with useful functions to make
// common flows easier, especially for the issuance of
// certificates.
type Client struct {
*acme.Client
// Map of solvers keyed by name of the challenge type.
ChallengeSolvers map[string]Solver
}
// ObtainCertificateUsingCSR obtains all resulting certificate chains using the given CSR, which
// must be completely and properly filled out (particularly its DNSNames and Raw fields - this
// usually involves creating a template CSR, then calling x509.CreateCertificateRequest, then
// x509.ParseCertificateRequest on the output). The Subject CommonName is NOT considered.
//
// It implements every single part of the ACME flow described in RFC 8555 §7.1 with the exception
// of "Create account" because this method signature does not have a way to return the updated
// account object. The account's status MUST be "valid" in order to succeed.
//
// As far as SANs go, this method currently only supports DNSNames and IPAddresses on the csr.
func (c *Client) ObtainCertificateUsingCSR(ctx context.Context, account acme.Account, csr *x509.CertificateRequest) ([]acme.Certificate, error) {
if account.Status != acme.StatusValid {
return nil, fmt.Errorf("account status is not valid: %s", account.Status)
}
if csr == nil {
return nil, fmt.Errorf("missing CSR")
}
var ids []acme.Identifier
for _, name := range csr.DNSNames {
ids = append(ids, acme.Identifier{
Type: "dns", // RFC 8555 §9.7.7
Value: name,
})
}
for _, ip := range csr.IPAddresses {
ids = append(ids, acme.Identifier{
Type: "ip", // RFC 8738
Value: ip.String(),
})
}
if len(ids) == 0 {
return nil, fmt.Errorf("no identifiers found")
}
order := acme.Order{Identifiers: ids}
var err error
// remember which challenge types failed for which identifiers
// so we can retry with other challenge types
failedChallengeTypes := make(failedChallengeMap)
const maxAttempts = 3 // hard cap on number of retries for good measure
for attempt := 1; attempt <= maxAttempts; attempt++ {
if attempt > 1 {
select {
case <-time.After(1 * time.Second):
case <-ctx.Done():
return nil, ctx.Err()
}
}
// create order for a new certificate
order, err = c.Client.NewOrder(ctx, account, order)
if err != nil {
return nil, fmt.Errorf("creating new order: %w", err)
}
// solve one challenge for each authz on the order
err = c.solveChallenges(ctx, account, order, failedChallengeTypes)
// yay, we win!
if err == nil {
break
}
// for some errors, we can retry with different challenge types
var problem acme.Problem
if errors.As(err, &problem) {
authz, haveAuthz := problem.Resource.(acme.Authorization)
if c.Logger != nil {
l := c.Logger
if haveAuthz {
l = l.With(zap.String("identifier", authz.IdentifierValue()))
}
l.Error("validating authorization",
zap.Object("problem", problem),
zap.String("order", order.Location),
zap.Int("attempt", attempt),
zap.Int("max_attempts", maxAttempts))
}
errStr := "solving challenge"
if haveAuthz {
errStr += ": " + authz.IdentifierValue()
}
err = fmt.Errorf("%s: %w", errStr, err)
if errors.As(err, &retryableErr{}) {
continue
}
return nil, err
}
return nil, fmt.Errorf("solving challenges: %w (order=%s)", err, order.Location)
}
if c.Logger != nil {
c.Logger.Info("validations succeeded; finalizing order", zap.String("order", order.Location))
}
// finalize the order, which requests the CA to issue us a certificate
order, err = c.Client.FinalizeOrder(ctx, account, order, csr.Raw)
if err != nil {
return nil, fmt.Errorf("finalizing order %s: %w", order.Location, err)
}
// finally, download the certificate
certChains, err := c.Client.GetCertificateChain(ctx, account, order.Certificate)
if err != nil {
return nil, fmt.Errorf("downloading certificate chain from %s: %w (order=%s)",
order.Certificate, err, order.Location)
}
if c.Logger != nil {
if len(certChains) == 0 {
c.Logger.Info("no certificate chains offered by server")
} else {
c.Logger.Info("successfully downloaded available certificate chains",
zap.Int("count", len(certChains)),
zap.String("first_url", certChains[0].URL))
}
}
return certChains, nil
}
// ObtainCertificate is the same as ObtainCertificateUsingCSR, except it is a slight wrapper
// that generates the CSR for you. Doing so requires the private key you will be using for
// the certificate (different from the account private key). It obtains a certificate for
// the given SANs (domain names) using the provided account.
func (c *Client) ObtainCertificate(ctx context.Context, account acme.Account, certPrivateKey crypto.Signer, sans []string) ([]acme.Certificate, error) {
if len(sans) == 0 {
return nil, fmt.Errorf("no DNS names provided: %v", sans)
}
if certPrivateKey == nil {
return nil, fmt.Errorf("missing certificate private key")
}
csrTemplate := new(x509.CertificateRequest)
for _, name := range sans {
if ip := net.ParseIP(name); ip != nil {
csrTemplate.IPAddresses = append(csrTemplate.IPAddresses, ip)
} else if strings.Contains(name, "@") {
csrTemplate.EmailAddresses = append(csrTemplate.EmailAddresses, name)
} else if u, err := url.Parse(name); err == nil && strings.Contains(name, "/") {
csrTemplate.URIs = append(csrTemplate.URIs, u)
} else {
// "The domain name MUST be encoded in the form in which it would appear
// in a certificate. That is, it MUST be encoded according to the rules
// in Section 7 of [RFC5280]." §7.1.4
normalizedName, err := idna.ToASCII(name)
if err != nil {
return nil, fmt.Errorf("converting identifier '%s' to ASCII: %v", name, err)
}
csrTemplate.DNSNames = append(csrTemplate.DNSNames, normalizedName)
}
}
// to properly fill out the CSR, we need to create it, then parse it
csrDER, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, certPrivateKey)
if err != nil {
return nil, fmt.Errorf("generating CSR: %v", err)
}
csr, err := x509.ParseCertificateRequest(csrDER)
if err != nil {
return nil, fmt.Errorf("parsing generated CSR: %v", err)
}
return c.ObtainCertificateUsingCSR(ctx, account, csr)
}
// getAuthzObjects constructs stateful authorization objects for each authz on the order.
// It includes all authorizations regardless of their status so that they can be
// deactivated at the end if necessary. Be sure to check authz status before operating
// on the authz; not all will be "pending" - some authorizations might already be valid.
func (c *Client) getAuthzObjects(ctx context.Context, account acme.Account, order acme.Order,
failedChallengeTypes failedChallengeMap) ([]*authzState, error) {
var authzStates []*authzState
var err error
// start by allowing each authz's solver to present for its challenge
for _, authzURL := range order.Authorizations {
authz := &authzState{account: account}
authz.Authorization, err = c.Client.GetAuthorization(ctx, account, authzURL)
if err != nil {
return nil, fmt.Errorf("getting authorization at %s: %w", authzURL, err)
}
// add all offered challenge types to our memory if they
// arent't there already; we use this for statistics to
// choose the most successful challenge type over time;
// if initial fill, randomize challenge order
preferredChallengesMu.Lock()
preferredWasEmpty := len(preferredChallenges) == 0
for _, chal := range authz.Challenges {
preferredChallenges.addUnique(chal.Type)
}
if preferredWasEmpty {
weakrand.Shuffle(len(preferredChallenges), func(i, j int) {
preferredChallenges[i], preferredChallenges[j] =
preferredChallenges[j], preferredChallenges[i]
})
}
preferredChallengesMu.Unlock()
// copy over any challenges that are not known to have already
// failed, making them candidates for solving for this authz
failedChallengeTypes.enqueueUnfailedChallenges(authz)
authzStates = append(authzStates, authz)
}
// sort authzs so that challenges which require waiting go first; no point
// in getting authorizations quickly while others will take a long time
sort.SliceStable(authzStates, func(i, j int) bool {
_, iIsWaiter := authzStates[i].currentSolver.(Waiter)
_, jIsWaiter := authzStates[j].currentSolver.(Waiter)
// "if i is a waiter, and j is not a waiter, then i is less than j"
return iIsWaiter && !jIsWaiter
})
return authzStates, nil
}
func (c *Client) solveChallenges(ctx context.Context, account acme.Account, order acme.Order, failedChallengeTypes failedChallengeMap) error {
authzStates, err := c.getAuthzObjects(ctx, account, order, failedChallengeTypes)
if err != nil {
return err
}
// when the function returns, make sure we clean up any and all resources
defer func() {
// always clean up any remaining challenge solvers
for _, authz := range authzStates {
if authz.currentSolver == nil {
// happens when authz state ended on a challenge we have no
// solver for or if we have already cleaned up this solver
continue
}
if err := authz.currentSolver.CleanUp(ctx, authz.currentChallenge); err != nil {
if c.Logger != nil {
c.Logger.Error("cleaning up solver",
zap.String("identifier", authz.IdentifierValue()),
zap.String("challenge_type", authz.currentChallenge.Type),
zap.Error(err))
}
}
}
if err == nil {
return
}
// if this function returns with an error, make sure to deactivate
// all pending or valid authorization objects so they don't "leak"
// See: https://github.com/go-acme/lego/issues/383 and https://github.com/go-acme/lego/issues/353
for _, authz := range authzStates {
if authz.Status != acme.StatusPending && authz.Status != acme.StatusValid {
continue
}
updatedAuthz, err := c.Client.DeactivateAuthorization(ctx, account, authz.Location)
if err != nil {
if c.Logger != nil {
c.Logger.Error("deactivating authorization",
zap.String("identifier", authz.IdentifierValue()),
zap.String("authz", authz.Location),
zap.Error(err))
}
}
authz.Authorization = updatedAuthz
}
}()
// present for all challenges first; this allows them all to begin any
// slow tasks up front if necessary before we start polling/waiting
for _, authz := range authzStates {
// see §7.1.6 for state transitions
if authz.Status != acme.StatusPending && authz.Status != acme.StatusValid {
return fmt.Errorf("authz %s has unexpected status; order will fail: %s", authz.Location, authz.Status)
}
if authz.Status == acme.StatusValid {
continue
}
err = c.presentForNextChallenge(ctx, authz)
if err != nil {
return err
}
}
// now that all solvers have had the opportunity to present, tell
// the server to begin the selected challenge for each authz
for _, authz := range authzStates {
err = c.initiateCurrentChallenge(ctx, authz)
if err != nil {
return err
}
}
// poll each authz to wait for completion of all challenges
for _, authz := range authzStates {
err = c.pollAuthorization(ctx, account, authz, failedChallengeTypes)
if err != nil {
return err
}
}
return nil
}
func (c *Client) presentForNextChallenge(ctx context.Context, authz *authzState) error {
if authz.Status != acme.StatusPending {
if authz.Status == acme.StatusValid && c.Logger != nil {
c.Logger.Info("authorization already valid",
zap.String("identifier", authz.IdentifierValue()),
zap.String("authz_url", authz.Location),
zap.Time("expires", authz.Expires))
}
return nil
}
err := c.nextChallenge(authz)
if err != nil {
return err
}
if c.Logger != nil {
c.Logger.Info("trying to solve challenge",
zap.String("identifier", authz.IdentifierValue()),
zap.String("challenge_type", authz.currentChallenge.Type),
zap.String("ca", c.Directory))
}
err = authz.currentSolver.Present(ctx, authz.currentChallenge)
if err != nil {
return fmt.Errorf("presenting for challenge: %w", err)
}
return nil
}
func (c *Client) initiateCurrentChallenge(ctx context.Context, authz *authzState) error {
if authz.Status != acme.StatusPending {
if c.Logger != nil {
c.Logger.Debug("skipping challenge initiation because authorization is not pending",
zap.String("identifier", authz.IdentifierValue()),
zap.String("authz_status", authz.Status))
}
return nil
}
// by now, all challenges should have had an opportunity to present, so
// if this solver needs more time to finish presenting, wait on it now
// (yes, this does block the initiation of the other challenges, but
// that's probably OK, since we can't finalize the order until the slow
// challenges are done too)
if waiter, ok := authz.currentSolver.(Waiter); ok {
if c.Logger != nil {
c.Logger.Debug("waiting for solver before continuing",
zap.String("identifier", authz.IdentifierValue()),
zap.String("challenge_type", authz.currentChallenge.Type))
}
err := waiter.Wait(ctx, authz.currentChallenge)
if c.Logger != nil {
c.Logger.Debug("done waiting for solver",
zap.String("identifier", authz.IdentifierValue()),
zap.String("challenge_type", authz.currentChallenge.Type))
}
if err != nil {
return fmt.Errorf("waiting for solver %T to be ready: %w", authz.currentSolver, err)
}
}
// tell the server to initiate the challenge
var err error
authz.currentChallenge, err = c.Client.InitiateChallenge(ctx, authz.account, authz.currentChallenge)
if err != nil {
return fmt.Errorf("initiating challenge with server: %w", err)
}
if c.Logger != nil {
c.Logger.Debug("challenge accepted",
zap.String("identifier", authz.IdentifierValue()),
zap.String("challenge_type", authz.currentChallenge.Type))
}
return nil
}
// nextChallenge sets the next challenge (and associated solver) on
// authz; it returns an error if there is no compatible challenge.
func (c *Client) nextChallenge(authz *authzState) error {
preferredChallengesMu.Lock()
defer preferredChallengesMu.Unlock()
// find the most-preferred challenge that is also in the list of
// remaining challenges, then make sure we have a solver for it
for _, prefChalType := range preferredChallenges {
for i, remainingChal := range authz.remainingChallenges {
if remainingChal.Type != prefChalType.typeName {
continue
}
authz.currentChallenge = remainingChal
authz.currentSolver = c.ChallengeSolvers[authz.currentChallenge.Type]
if authz.currentSolver != nil {
authz.remainingChallenges = append(authz.remainingChallenges[:i], authz.remainingChallenges[i+1:]...)
return nil
}
if c.Logger != nil {
c.Logger.Debug("no solver configured", zap.String("challenge_type", remainingChal.Type))
}
break
}
}
return fmt.Errorf("%s: no solvers available for remaining challenges (configured=%v offered=%v remaining=%v)",
authz.IdentifierValue(), c.enabledChallengeTypes(), authz.listOfferedChallenges(), authz.listRemainingChallenges())
}
func (c *Client) pollAuthorization(ctx context.Context, account acme.Account, authz *authzState, failedChallengeTypes failedChallengeMap) error {
// In §7.5.1, the spec says:
//
// "For challenges where the client can tell when the server has
// validated the challenge (e.g., by seeing an HTTP or DNS request
// from the server), the client SHOULD NOT begin polling until it has
// seen the validation request from the server."
//
// However, in practice, this is difficult in the general case because
// we would need to design some relatively-nuanced concurrency and hope
// that the solver implementations also get their side right -- and the
// fact that it's even possible only sometimes makes it harder, because
// each solver needs a way to signal whether we should wait for its
// approval. So no, I've decided not to implement that recommendation
// in this particular library, but any implementations that use the lower
// ACME API directly are welcome and encouraged to do so where possible.
var err error
authz.Authorization, err = c.Client.PollAuthorization(ctx, account, authz.Authorization)
// if a challenge was attempted (i.e. did not start valid)...
if authz.currentSolver != nil {
// increment the statistics on this challenge type before handling error
preferredChallengesMu.Lock()
preferredChallenges.increment(authz.currentChallenge.Type, err == nil)
preferredChallengesMu.Unlock()
// always clean up the challenge solver after polling, regardless of error
cleanupErr := authz.currentSolver.CleanUp(ctx, authz.currentChallenge)
if cleanupErr != nil && c.Logger != nil {
c.Logger.Error("cleaning up solver",
zap.String("identifier", authz.IdentifierValue()),
zap.String("challenge_type", authz.currentChallenge.Type),
zap.Error(cleanupErr))
}
authz.currentSolver = nil // avoid cleaning it up again later
}
// finally, handle any error from validating the authz
if err != nil {
var problem acme.Problem
if errors.As(err, &problem) {
if c.Logger != nil {
c.Logger.Error("challenge failed",
zap.String("identifier", authz.IdentifierValue()),
zap.String("challenge_type", authz.currentChallenge.Type),
zap.Object("problem", problem))
}
failedChallengeTypes.rememberFailedChallenge(authz)
if c.countAvailableChallenges(authz) > 0 {
switch problem.Type {
case acme.ProblemTypeConnection,
acme.ProblemTypeDNS,
acme.ProblemTypeServerInternal,
acme.ProblemTypeUnauthorized,
acme.ProblemTypeTLS:
// this error might be recoverable with another challenge type
return retryableErr{err}
}
}
}
return fmt.Errorf("[%s] %w", authz.Authorization.IdentifierValue(), err)
}
if c.Logger != nil {
c.Logger.Info("authorization finalized",
zap.String("identifier", authz.IdentifierValue()),
zap.String("authz_status", authz.Status))
}
return nil
}
func (c *Client) countAvailableChallenges(authz *authzState) int {
count := 0
for _, remainingChal := range authz.remainingChallenges {
if _, ok := c.ChallengeSolvers[remainingChal.Type]; ok {
count++
}
}
return count
}
func (c *Client) enabledChallengeTypes() []string {
enabledChallenges := make([]string, 0, len(c.ChallengeSolvers))
for name, val := range c.ChallengeSolvers {
if val != nil {
enabledChallenges = append(enabledChallenges, name)
}
}
return enabledChallenges
}
type authzState struct {
acme.Authorization
account acme.Account
currentChallenge acme.Challenge
currentSolver Solver
remainingChallenges []acme.Challenge
}
func (authz authzState) listOfferedChallenges() []string {
return challengeTypeNames(authz.Challenges)
}
func (authz authzState) listRemainingChallenges() []string {
return challengeTypeNames(authz.remainingChallenges)
}
func challengeTypeNames(challengeList []acme.Challenge) []string {
names := make([]string, 0, len(challengeList))
for _, chal := range challengeList {
names = append(names, chal.Type)
}
return names
}
// TODO: possibly configurable policy? converge to most successful (current) vs. completely random
// challengeHistory is a memory of how successful a challenge type is.
type challengeHistory struct {
typeName string
successes, total int
}
func (ch challengeHistory) successRatio() float64 {
if ch.total == 0 {
return 1.0
}
return float64(ch.successes) / float64(ch.total)
}
// failedChallengeMap keeps track of failed challenge types per identifier.
type failedChallengeMap map[string][]string
func (fcm failedChallengeMap) rememberFailedChallenge(authz *authzState) {
idKey := fcm.idKey(authz)
fcm[idKey] = append(fcm[idKey], authz.currentChallenge.Type)
}
// enqueueUnfailedChallenges enqueues each challenge offered in authz if it
// is not known to have failed for the authz's identifier already.
func (fcm failedChallengeMap) enqueueUnfailedChallenges(authz *authzState) {
idKey := fcm.idKey(authz)
for _, chal := range authz.Challenges {
if !contains(fcm[idKey], chal.Type) {
authz.remainingChallenges = append(authz.remainingChallenges, chal)
}
}
}
func (fcm failedChallengeMap) idKey(authz *authzState) string {
return authz.Identifier.Type + authz.IdentifierValue()
}
// challengeTypes is a list of challenges we've seen and/or
// used previously. It sorts from most successful to least
// successful, such that most successful challenges are first.
type challengeTypes []challengeHistory
// Len is part of sort.Interface.
func (ct challengeTypes) Len() int { return len(ct) }
// Swap is part of sort.Interface.
func (ct challengeTypes) Swap(i, j int) { ct[i], ct[j] = ct[j], ct[i] }
// Less is part of sort.Interface. It sorts challenge
// types from highest success ratio to lowest.
func (ct challengeTypes) Less(i, j int) bool {
return ct[i].successRatio() > ct[j].successRatio()
}
func (ct *challengeTypes) addUnique(challengeType string) {
for _, c := range *ct {
if c.typeName == challengeType {
return
}
}
*ct = append(*ct, challengeHistory{typeName: challengeType})
}
func (ct challengeTypes) increment(challengeType string, successful bool) {
defer sort.Stable(ct) // keep most successful challenges in front
for i, c := range ct {
if c.typeName == challengeType {
ct[i].total++
if successful {
ct[i].successes++
}
return
}
}
}
func contains(haystack []string, needle string) bool {
for _, s := range haystack {
if s == needle {
return true
}
}
return false
}
// retryableErr wraps an error that indicates the caller should retry
// the operation; specifically with a different challenge type.
type retryableErr struct{ error }
func (re retryableErr) Unwrap() error { return re.error }
// Keep a list of challenges we've seen offered by servers,
// and prefer keep an ordered list of
var (
preferredChallenges challengeTypes
preferredChallengesMu sync.Mutex
)

72
vendor/github.com/mholt/acmez/solver.go generated vendored Normal file
View File

@@ -0,0 +1,72 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acmez
import (
"context"
"github.com/mholt/acmez/acme"
)
// Solver is a type that can solve ACME challenges. All
// implementations MUST honor context cancellation.
type Solver interface {
// Present is called just before a challenge is initiated.
// The implementation MUST prepare anything that is necessary
// for completing the challenge; for example, provisioning
// an HTTP resource, TLS certificate, or a DNS record.
//
// It MUST return quickly. If presenting the challenge token
// will take time, then the implementation MUST do the
// minimum amount of work required in this method, and
// SHOULD additionally implement the Waiter interface.
// For example, a DNS challenge solver might make a quick
// HTTP request to a provider's API to create a new DNS
// record, but it might be several minutes or hours before
// the DNS record propagates. The API request should be
// done in Present(), and waiting for propagation should
// be done in Wait().
Present(context.Context, acme.Challenge) error
// CleanUp is called after a challenge is finished, whether
// successful or not. It MUST free/remove any resources it
// allocated/created during Present. It SHOULD NOT require
// that Present ran successfully. It MUST return quickly.
CleanUp(context.Context, acme.Challenge) error
}
// Waiter is an optional interface for Solvers to implement. Its
// primary purpose is to help ensure the challenge can be solved
// before the server gives up trying to verify the challenge.
//
// If implemented, it will be called after Present() but just
// before the challenge is initiated with the server. It blocks
// until the challenge is ready to be solved. (For example,
// waiting on a DNS record to propagate.) This allows challenges
// to succeed that would normally fail because they take too long
// to set up (i.e. the ACME server would give up polling DNS or
// the client would timeout its polling). By separating Present()
// from Wait(), it allows the slow part of all solvers to begin
// up front, rather than waiting on each solver one at a time.
//
// It MUST NOT do anything exclusive of Present() that is required
// for the challenge to succeed. In other words, if Present() is
// called but Wait() is not, then the challenge should still be able
// to succeed assuming infinite time.
//
// Implementations MUST honor context cancellation.
type Waiter interface {
Wait(context.Context, acme.Challenge) error
}

98
vendor/github.com/mholt/acmez/tlsalpn01.go generated vendored Normal file
View File

@@ -0,0 +1,98 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acmez
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"math/big"
"time"
"github.com/mholt/acmez/acme"
)
// TLSALPN01ChallengeCert creates a certificate that can be used for
// handshakes while solving the tls-alpn-01 challenge. See RFC 8737 §3.
func TLSALPN01ChallengeCert(challenge acme.Challenge) (*tls.Certificate, error) {
keyAuthSum := sha256.Sum256([]byte(challenge.KeyAuthorization))
keyAuthSumASN1, err := asn1.Marshal(keyAuthSum[:sha256.Size])
if err != nil {
return nil, err
}
certKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, err
}
challengeKeyASN1, err := x509.MarshalECPrivateKey(certKey)
if err != nil {
return nil, err
}
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, err
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{CommonName: "ACME challenge"},
NotBefore: time.Now(),
NotAfter: time.Now().Add(24 * time.Hour * 365),
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
DNSNames: []string{challenge.Identifier.Value},
// add key authentication digest as the acmeValidation-v1 extension
// (marked as critical such that it won't be used by non-ACME software).
// Reference: https://www.rfc-editor.org/rfc/rfc8737.html#section-3
ExtraExtensions: []pkix.Extension{
{
Id: idPEACMEIdentifierV1,
Critical: true,
Value: keyAuthSumASN1,
},
},
}
challengeCertDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &certKey.PublicKey, certKey)
if err != nil {
return nil, err
}
challengeCertPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: challengeCertDER})
challengeKeyPEM := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: challengeKeyASN1})
cert, err := tls.X509KeyPair(challengeCertPEM, challengeKeyPEM)
if err != nil {
return nil, err
}
return &cert, nil
}
// ACMETLS1Protocol is the ALPN value for the TLS-ALPN challenge
// handshake. See RFC 8737 §6.2.
const ACMETLS1Protocol = "acme-tls/1"
// idPEACMEIdentifierV1 is the SMI Security for PKIX Certification Extension OID referencing the ACME extension.
// See RFC 8737 §6.1. https://www.rfc-editor.org/rfc/rfc8737.html#section-6.1
var idPEACMEIdentifierV1 = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 31}

8
vendor/github.com/miekg/dns/.codecov.yml generated vendored Normal file
View File

@@ -0,0 +1,8 @@
coverage:
status:
project:
default:
target: 40%
threshold: null
patch: false
changes: false

4
vendor/github.com/miekg/dns/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,4 @@
*.6
tags
test.out
a.out

1
vendor/github.com/miekg/dns/AUTHORS generated vendored Normal file
View File

@@ -0,0 +1 @@
Miek Gieben <miek@miek.nl>

1
vendor/github.com/miekg/dns/CODEOWNERS generated vendored Normal file
View File

@@ -0,0 +1 @@
* @miekg @tmthrgd

10
vendor/github.com/miekg/dns/CONTRIBUTORS generated vendored Normal file
View File

@@ -0,0 +1,10 @@
Alex A. Skinner
Andrew Tunnell-Jones
Ask Bjørn Hansen
Dave Cheney
Dusty Wilson
Marek Majkowski
Peter van Dijk
Omri Bahumi
Alex Sergeyev
James Hartig

9
vendor/github.com/miekg/dns/COPYRIGHT generated vendored Normal file
View File

@@ -0,0 +1,9 @@
Copyright 2009 The Go Authors. All rights reserved. Use of this source code
is governed by a BSD-style license that can be found in the LICENSE file.
Extensions of the original work are copyright (c) 2011 Miek Gieben
Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is
governed by a BSD-style license that can be found in the LICENSE file.
Copyright 2014 CloudFlare. All rights reserved. Use of this source code is
governed by a BSD-style license that can be found in the LICENSE file.

30
vendor/github.com/miekg/dns/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,30 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
As this is fork of the official Go code the same license applies.
Extensions of the original work are copyright (c) 2011 Miek Gieben

33
vendor/github.com/miekg/dns/Makefile.fuzz generated vendored Normal file
View File

@@ -0,0 +1,33 @@
# Makefile for fuzzing
#
# Use go-fuzz and needs the tools installed.
# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/
#
# Installing go-fuzz:
# $ make -f Makefile.fuzz get
# Installs:
# * github.com/dvyukov/go-fuzz/go-fuzz
# * get github.com/dvyukov/go-fuzz/go-fuzz-build
all: build
.PHONY: build
build:
go-fuzz-build -tags fuzz github.com/miekg/dns
.PHONY: build-newrr
build-newrr:
go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns
.PHONY: fuzz
fuzz:
go-fuzz -bin=dns-fuzz.zip -workdir=fuzz
.PHONY: get
get:
go get github.com/dvyukov/go-fuzz/go-fuzz
go get github.com/dvyukov/go-fuzz/go-fuzz-build
.PHONY: clean
clean:
rm *-fuzz.zip

52
vendor/github.com/miekg/dns/Makefile.release generated vendored Normal file
View File

@@ -0,0 +1,52 @@
# Makefile for releasing.
#
# The release is controlled from version.go. The version found there is
# used to tag the git repo, we're not building any artifacts so there is nothing
# to upload to github.
#
# * Up the version in version.go
# * Run: make -f Makefile.release release
# * will *commit* your change with 'Release $VERSION'
# * push to github
#
define GO
//+build ignore
package main
import (
"fmt"
"github.com/miekg/dns"
)
func main() {
fmt.Println(dns.Version.String())
}
endef
$(file > version_release.go,$(GO))
VERSION:=$(shell go run version_release.go)
TAG="v$(VERSION)"
all:
@echo Use the \'release\' target to start a release $(VERSION)
rm -f version_release.go
.PHONY: release
release: commit push
@echo Released $(VERSION)
rm -f version_release.go
.PHONY: commit
commit:
@echo Committing release $(VERSION)
git commit -am"Release $(VERSION)"
git tag $(TAG)
.PHONY: push
push:
@echo Pushing release $(VERSION) to master
git push --tags
git push

186
vendor/github.com/miekg/dns/README.md generated vendored Normal file
View File

@@ -0,0 +1,186 @@
[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns)
[![Code Coverage](https://img.shields.io/codecov/c/github/miekg/dns/master.svg)](https://codecov.io/github/miekg/dns?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/miekg/dns)](https://goreportcard.com/report/miekg/dns)
[![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns)
# Alternative (more granular) approach to a DNS library
> Less is more.
Complete and usable DNS library. All Resource Records are supported, including the DNSSEC types.
It follows a lean and mean philosophy. If there is stuff you should know as a DNS programmer there
isn't a convenience function for it. Server side and client side programming is supported, i.e. you
can build servers and resolvers with it.
We try to keep the "master" branch as sane as possible and at the bleeding edge of standards,
avoiding breaking changes wherever reasonable. We support the last two versions of Go.
# Goals
* KISS;
* Fast;
* Small API. If it's easy to code in Go, don't make a function for it.
# Users
A not-so-up-to-date-list-that-may-be-actually-current:
* https://github.com/coredns/coredns
* https://github.com/abh/geodns
* https://github.com/baidu/bfe
* http://www.statdns.com/
* http://www.dnsinspect.com/
* https://github.com/chuangbo/jianbing-dictionary-dns
* http://www.dns-lg.com/
* https://github.com/fcambus/rrda
* https://github.com/kenshinx/godns
* https://github.com/skynetservices/skydns
* https://github.com/hashicorp/consul
* https://github.com/DevelopersPL/godnsagent
* https://github.com/duedil-ltd/discodns
* https://github.com/StalkR/dns-reverse-proxy
* https://github.com/tianon/rawdns
* https://mesosphere.github.io/mesos-dns/
* https://github.com/fcambus/statzone
* https://github.com/benschw/dns-clb-go
* https://github.com/corny/dnscheck for <http://public-dns.info/>
* https://github.com/miekg/unbound
* https://github.com/miekg/exdns
* https://dnslookup.org
* https://github.com/looterz/grimd
* https://github.com/phamhongviet/serf-dns
* https://github.com/mehrdadrad/mylg
* https://github.com/bamarni/dockness
* https://github.com/fffaraz/microdns
* https://github.com/ipdcode/hades <https://jd.com>
* https://github.com/StackExchange/dnscontrol/
* https://www.dnsperf.com/
* https://dnssectest.net/
* https://github.com/oif/apex
* https://github.com/jedisct1/dnscrypt-proxy
* https://github.com/jedisct1/rpdns
* https://github.com/xor-gate/sshfp
* https://github.com/rs/dnstrace
* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss))
* https://render.com
* https://github.com/peterzen/goresolver
* https://github.com/folbricht/routedns
* https://domainr.com/
* https://zonedb.org/
* https://router7.org/
* https://github.com/fortio/dnsping
* https://github.com/Luzilla/dnsbl_exporter
* https://github.com/bodgit/tsig
* https://github.com/v2fly/v2ray-core (test only)
* https://kuma.io/
* https://www.misaka.io/services/dns
* https://ping.sx/dig
* https://fleetdeck.io/
* https://github.com/markdingo/autoreverse
Send pull request if you want to be listed here.
# Features
* UDP/TCP queries, IPv4 and IPv6
* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported
* Fast
* Server side programming (mimicking the net/http package)
* Client side programming
* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519
* EDNS0, NSID, Cookies
* AXFR/IXFR
* TSIG, SIG(0)
* DNS over TLS (DoT): encrypted connection between client and server over TCP
* DNS name compression
Have fun!
Miek Gieben - 2010-2012 - <miek@miek.nl>
DNS Authors 2012-
# Building
This library uses Go modules and uses semantic versioning. Building is done with the `go` tool, so
the following should work:
go get github.com/miekg/dns
go build github.com/miekg/dns
## Examples
A short "how to use the API" is at the beginning of doc.go (this also will show when you call `godoc
github.com/miekg/dns`).
Example programs can be found in the `github.com/miekg/exdns` repository.
## Supported RFCs
*all of them*
* 103{4,5} - DNS standard
* 1348 - NSAP record (removed the record)
* 1982 - Serial Arithmetic
* 1876 - LOC record
* 1995 - IXFR
* 1996 - DNS notify
* 2136 - DNS Update (dynamic updates)
* 2181 - RRset definition - there is no RRset type though, just []RR
* 2537 - RSAMD5 DNS keys
* 2065 - DNSSEC (updated in later RFCs)
* 2671 - EDNS record
* 2782 - SRV record
* 2845 - TSIG record
* 2915 - NAPTR record
* 2929 - DNS IANA Considerations
* 3110 - RSASHA1 DNS keys
* 3123 - APL record
* 3225 - DO bit (DNSSEC OK)
* 340{1,2,3} - NAPTR record
* 3445 - Limiting the scope of (DNS)KEY
* 3597 - Unknown RRs
* 403{3,4,5} - DNSSEC + validation functions
* 4255 - SSHFP record
* 4343 - Case insensitivity
* 4408 - SPF record
* 4509 - SHA256 Hash in DS
* 4592 - Wildcards in the DNS
* 4635 - HMAC SHA TSIG
* 4701 - DHCID
* 4892 - id.server
* 5001 - NSID
* 5155 - NSEC3 record
* 5205 - HIP record
* 5702 - SHA2 in the DNS
* 5936 - AXFR
* 5966 - TCP implementation recommendations
* 6605 - ECDSA
* 6725 - IANA Registry Update
* 6742 - ILNP DNS
* 6840 - Clarifications and Implementation Notes for DNS Security
* 6844 - CAA record
* 6891 - EDNS0 update
* 6895 - DNS IANA considerations
* 6944 - DNSSEC DNSKEY Algorithm Status
* 6975 - Algorithm Understanding in DNSSEC
* 7043 - EUI48/EUI64 records
* 7314 - DNS (EDNS) EXPIRE Option
* 7477 - CSYNC RR
* 7828 - edns-tcp-keepalive EDNS0 Option
* 7553 - URI record
* 7858 - DNS over TLS: Initiation and Performance Considerations
* 7871 - EDNS0 Client Subnet
* 7873 - Domain Name System (DNS) Cookies
* 8080 - EdDSA for DNSSEC
* 8499 - DNS Terminology
* 8659 - DNS Certification Authority Authorization (CAA) Resource Record
* 8914 - Extended DNS Errors
* 8976 - Message Digest for DNS Zones (ZONEMD RR)
## Loosely Based Upon
* ldns - <https://nlnetlabs.nl/projects/ldns/about/>
* NSD - <https://nlnetlabs.nl/projects/nsd/about/>
* Net::DNS - <http://www.net-dns.org/>
* GRONG - <https://github.com/bortzmeyer/grong>

62
vendor/github.com/miekg/dns/acceptfunc.go generated vendored Normal file
View File

@@ -0,0 +1,62 @@
package dns
// MsgAcceptFunc is used early in the server code to accept or reject a message with RcodeFormatError.
// It returns a MsgAcceptAction to indicate what should happen with the message.
type MsgAcceptFunc func(dh Header) MsgAcceptAction
// DefaultMsgAcceptFunc checks the request and will reject if:
//
// * isn't a request (don't respond in that case)
//
// * opcode isn't OpcodeQuery or OpcodeNotify
//
// * Zero bit isn't zero
//
// * does not have exactly 1 question in the question section
//
// * has more than 1 RR in the Answer section
//
// * has more than 0 RRs in the Authority section
//
// * has more than 2 RRs in the Additional section
//
var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc
// MsgAcceptAction represents the action to be taken.
type MsgAcceptAction int
// Allowed returned values from a MsgAcceptFunc.
const (
MsgAccept MsgAcceptAction = iota // Accept the message
MsgReject // Reject the message with a RcodeFormatError
MsgIgnore // Ignore the error and send nothing back.
MsgRejectNotImplemented // Reject the message with a RcodeNotImplemented
)
func defaultMsgAcceptFunc(dh Header) MsgAcceptAction {
if isResponse := dh.Bits&_QR != 0; isResponse {
return MsgIgnore
}
// Don't allow dynamic updates, because then the sections can contain a whole bunch of RRs.
opcode := int(dh.Bits>>11) & 0xF
if opcode != OpcodeQuery && opcode != OpcodeNotify {
return MsgRejectNotImplemented
}
if dh.Qdcount != 1 {
return MsgReject
}
// NOTIFY requests can have a SOA in the ANSWER section. See RFC 1996 Section 3.7 and 3.11.
if dh.Ancount > 1 {
return MsgReject
}
// IXFR request could have one SOA RR in the NS section. See RFC 1995, section 3.
if dh.Nscount > 1 {
return MsgReject
}
if dh.Arcount > 2 {
return MsgReject
}
return MsgAccept
}

484
vendor/github.com/miekg/dns/client.go generated vendored Normal file
View File

@@ -0,0 +1,484 @@
package dns
// A client implementation.
import (
"context"
"crypto/tls"
"encoding/binary"
"fmt"
"io"
"net"
"strings"
"time"
)
const (
dnsTimeout time.Duration = 2 * time.Second
tcpIdleTimeout time.Duration = 8 * time.Second
)
func isPacketConn(c net.Conn) bool {
if _, ok := c.(net.PacketConn); !ok {
return false
}
if ua, ok := c.LocalAddr().(*net.UnixAddr); ok {
return ua.Net == "unixgram"
}
return true
}
// A Conn represents a connection to a DNS server.
type Conn struct {
net.Conn // a net.Conn holding the connection
UDPSize uint16 // minimum receive buffer for UDP messages
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
tsigRequestMAC string
}
func (co *Conn) tsigProvider() TsigProvider {
if co.TsigProvider != nil {
return co.TsigProvider
}
// tsigSecretProvider will return ErrSecret if co.TsigSecret is nil.
return tsigSecretProvider(co.TsigSecret)
}
// A Client defines parameters for a DNS client.
type Client struct {
Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
UDPSize uint16 // minimum receive buffer for UDP messages
TLSConfig *tls.Config // TLS connection configuration
Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more
// Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
// WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
// Client.Dialer) or context.Context.Deadline (see ExchangeContext)
Timeout time.Duration
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
group singleflight
}
// Exchange performs a synchronous UDP query. It sends the message m to the address
// contained in a and waits for a reply. Exchange does not retry a failed query, nor
// will it fall back to TCP in case of truncation.
// See client.Exchange for more information on setting larger buffer sizes.
func Exchange(m *Msg, a string) (r *Msg, err error) {
client := Client{Net: "udp"}
r, _, err = client.Exchange(m, a)
return r, err
}
func (c *Client) dialTimeout() time.Duration {
if c.Timeout != 0 {
return c.Timeout
}
if c.DialTimeout != 0 {
return c.DialTimeout
}
return dnsTimeout
}
func (c *Client) readTimeout() time.Duration {
if c.ReadTimeout != 0 {
return c.ReadTimeout
}
return dnsTimeout
}
func (c *Client) writeTimeout() time.Duration {
if c.WriteTimeout != 0 {
return c.WriteTimeout
}
return dnsTimeout
}
// Dial connects to the address on the named network.
func (c *Client) Dial(address string) (conn *Conn, err error) {
return c.DialContext(context.Background(), address)
}
// DialContext connects to the address on the named network, with a context.Context.
// For TLS over TCP (DoT) the context isn't used yet. This will be enabled when Go 1.18 is released.
func (c *Client) DialContext(ctx context.Context, address string) (conn *Conn, err error) {
// create a new dialer with the appropriate timeout
var d net.Dialer
if c.Dialer == nil {
d = net.Dialer{Timeout: c.getTimeoutForRequest(c.dialTimeout())}
} else {
d = *c.Dialer
}
network := c.Net
if network == "" {
network = "udp"
}
useTLS := strings.HasPrefix(network, "tcp") && strings.HasSuffix(network, "-tls")
conn = new(Conn)
if useTLS {
network = strings.TrimSuffix(network, "-tls")
// TODO(miekg): Enable after Go 1.18 is released, to be able to support two prev. releases.
/*
tlsDialer := tls.Dialer{
NetDialer: &d,
Config: c.TLSConfig,
}
conn.Conn, err = tlsDialer.DialContext(ctx, network, address)
*/
conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
} else {
conn.Conn, err = d.DialContext(ctx, network, address)
}
if err != nil {
return nil, err
}
conn.UDPSize = c.UDPSize
return conn, nil
}
// Exchange performs a synchronous query. It sends the message m to the address
// contained in a and waits for a reply. Basic use pattern with a *dns.Client:
//
// c := new(dns.Client)
// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
//
// Exchange does not retry a failed query, nor will it fall back to TCP in
// case of truncation.
// It is up to the caller to create a message that allows for larger responses to be
// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit
// of 512 bytes
// To specify a local address or a timeout, the caller has to set the `Client.Dialer`
// attribute appropriately
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
co, err := c.Dial(address)
if err != nil {
return nil, 0, err
}
defer co.Close()
return c.ExchangeWithConn(m, co)
}
// ExchangeWithConn has the same behavior as Exchange, just with a predetermined connection
// that will be used instead of creating a new one.
// Usage pattern with a *dns.Client:
//
// c := new(dns.Client)
// // connection management logic goes here
//
// conn := c.Dial(address)
// in, rtt, err := c.ExchangeWithConn(message, conn)
//
// This allows users of the library to implement their own connection management,
// as opposed to Exchange, which will always use new connections and incur the added overhead
// that entails when using "tcp" and especially "tcp-tls" clients.
//
// When the singleflight is set for this client the context is _not_ forwarded to the (shared) exchange, to
// prevent one cancelation from canceling all outstanding requests.
func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) {
return c.exchangeWithConnContext(context.Background(), m, conn)
}
func (c *Client) exchangeWithConnContext(ctx context.Context, m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) {
if !c.SingleInflight {
return c.exchangeContext(ctx, m, conn)
}
q := m.Question[0]
key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass)
r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) {
// When we're doing singleflight we don't want one context cancelation, cancel _all_ outstanding queries.
// Hence we ignore the context and use Background().
return c.exchangeContext(context.Background(), m, conn)
})
if r != nil && shared {
r = r.Copy()
}
return r, rtt, err
}
func (c *Client) exchangeContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) {
opt := m.IsEdns0()
// If EDNS0 is used use that for size.
if opt != nil && opt.UDPSize() >= MinMsgSize {
co.UDPSize = opt.UDPSize()
}
// Otherwise use the client's configured UDP size.
if opt == nil && c.UDPSize >= MinMsgSize {
co.UDPSize = c.UDPSize
}
// write with the appropriate write timeout
t := time.Now()
writeDeadline := t.Add(c.getTimeoutForRequest(c.writeTimeout()))
readDeadline := t.Add(c.getTimeoutForRequest(c.readTimeout()))
if deadline, ok := ctx.Deadline(); ok {
if deadline.Before(writeDeadline) {
writeDeadline = deadline
}
if deadline.Before(readDeadline) {
readDeadline = deadline
}
}
co.SetWriteDeadline(writeDeadline)
co.SetReadDeadline(readDeadline)
co.TsigSecret, co.TsigProvider = c.TsigSecret, c.TsigProvider
if err = co.WriteMsg(m); err != nil {
return nil, 0, err
}
if isPacketConn(co.Conn) {
for {
r, err = co.ReadMsg()
// Ignore replies with mismatched IDs because they might be
// responses to earlier queries that timed out.
if err != nil || r.Id == m.Id {
break
}
}
} else {
r, err = co.ReadMsg()
if err == nil && r.Id != m.Id {
err = ErrId
}
}
rtt = time.Since(t)
return r, rtt, err
}
// ReadMsg reads a message from the connection co.
// If the received message contains a TSIG record the transaction signature
// is verified. This method always tries to return the message, however if an
// error is returned there are no guarantees that the returned message is a
// valid representation of the packet read.
func (co *Conn) ReadMsg() (*Msg, error) {
p, err := co.ReadMsgHeader(nil)
if err != nil {
return nil, err
}
m := new(Msg)
if err := m.Unpack(p); err != nil {
// If an error was returned, we still want to allow the user to use
// the message, but naively they can just check err if they don't want
// to use an erroneous message
return m, err
}
if t := m.IsTsig(); t != nil {
// Need to work on the original message p, as that was used to calculate the tsig.
err = tsigVerifyProvider(p, co.tsigProvider(), co.tsigRequestMAC, false)
}
return m, err
}
// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil).
// Returns message as a byte slice to be parsed with Msg.Unpack later on.
// Note that error handling on the message body is not possible as only the header is parsed.
func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
var (
p []byte
n int
err error
)
if isPacketConn(co.Conn) {
if co.UDPSize > MinMsgSize {
p = make([]byte, co.UDPSize)
} else {
p = make([]byte, MinMsgSize)
}
n, err = co.Read(p)
} else {
var length uint16
if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
return nil, err
}
p = make([]byte, length)
n, err = io.ReadFull(co.Conn, p)
}
if err != nil {
return nil, err
} else if n < headerSize {
return nil, ErrShortRead
}
p = p[:n]
if hdr != nil {
dh, _, err := unpackMsgHdr(p, 0)
if err != nil {
return nil, err
}
*hdr = dh
}
return p, err
}
// Read implements the net.Conn read method.
func (co *Conn) Read(p []byte) (n int, err error) {
if co.Conn == nil {
return 0, ErrConnEmpty
}
if isPacketConn(co.Conn) {
// UDP connection
return co.Conn.Read(p)
}
var length uint16
if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
return 0, err
}
if int(length) > len(p) {
return 0, io.ErrShortBuffer
}
return io.ReadFull(co.Conn, p[:length])
}
// WriteMsg sends a message through the connection co.
// If the message m contains a TSIG record the transaction
// signature is calculated.
func (co *Conn) WriteMsg(m *Msg) (err error) {
var out []byte
if t := m.IsTsig(); t != nil {
// Set tsigRequestMAC for the next read, although only used in zone transfers.
out, co.tsigRequestMAC, err = tsigGenerateProvider(m, co.tsigProvider(), co.tsigRequestMAC, false)
} else {
out, err = m.Pack()
}
if err != nil {
return err
}
_, err = co.Write(out)
return err
}
// Write implements the net.Conn Write method.
func (co *Conn) Write(p []byte) (int, error) {
if len(p) > MaxMsgSize {
return 0, &Error{err: "message too large"}
}
if isPacketConn(co.Conn) {
return co.Conn.Write(p)
}
msg := make([]byte, 2+len(p))
binary.BigEndian.PutUint16(msg, uint16(len(p)))
copy(msg[2:], p)
return co.Conn.Write(msg)
}
// Return the appropriate timeout for a specific request
func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration {
var requestTimeout time.Duration
if c.Timeout != 0 {
requestTimeout = c.Timeout
} else {
requestTimeout = timeout
}
// net.Dialer.Timeout has priority if smaller than the timeouts computed so
// far
if c.Dialer != nil && c.Dialer.Timeout != 0 {
if c.Dialer.Timeout < requestTimeout {
requestTimeout = c.Dialer.Timeout
}
}
return requestTimeout
}
// Dial connects to the address on the named network.
func Dial(network, address string) (conn *Conn, err error) {
conn = new(Conn)
conn.Conn, err = net.Dial(network, address)
if err != nil {
return nil, err
}
return conn, nil
}
// ExchangeContext performs a synchronous UDP query, like Exchange. It
// additionally obeys deadlines from the passed Context.
func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) {
client := Client{Net: "udp"}
r, _, err = client.ExchangeContext(ctx, m, a)
// ignoring rtt to leave the original ExchangeContext API unchanged, but
// this function will go away
return r, err
}
// ExchangeConn performs a synchronous query. It sends the message m via the connection
// c and waits for a reply. The connection c is not closed by ExchangeConn.
// Deprecated: This function is going away, but can easily be mimicked:
//
// co := &dns.Conn{Conn: c} // c is your net.Conn
// co.WriteMsg(m)
// in, _ := co.ReadMsg()
// co.Close()
//
func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
println("dns: ExchangeConn: this function is deprecated")
co := new(Conn)
co.Conn = c
if err = co.WriteMsg(m); err != nil {
return nil, err
}
r, err = co.ReadMsg()
if err == nil && r.Id != m.Id {
err = ErrId
}
return r, err
}
// DialTimeout acts like Dial but takes a timeout.
func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}}
return client.Dial(address)
}
// DialWithTLS connects to the address on the named network with TLS.
func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) {
if !strings.HasSuffix(network, "-tls") {
network += "-tls"
}
client := Client{Net: network, TLSConfig: tlsConfig}
return client.Dial(address)
}
// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) {
if !strings.HasSuffix(network, "-tls") {
network += "-tls"
}
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig}
return client.Dial(address)
}
// ExchangeContext acts like Exchange, but honors the deadline on the provided
// context, if present. If there is both a context deadline and a configured
// timeout on the client, the earliest of the two takes effect.
func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
conn, err := c.DialContext(ctx, a)
if err != nil {
return nil, 0, err
}
defer conn.Close()
return c.exchangeWithConnContext(ctx, m, conn)
}

135
vendor/github.com/miekg/dns/clientconfig.go generated vendored Normal file
View File

@@ -0,0 +1,135 @@
package dns
import (
"bufio"
"io"
"os"
"strconv"
"strings"
)
// ClientConfig wraps the contents of the /etc/resolv.conf file.
type ClientConfig struct {
Servers []string // servers to use
Search []string // suffixes to append to local name
Port string // what port to use
Ndots int // number of dots in name to trigger absolute lookup
Timeout int // seconds before giving up on packet
Attempts int // lost packets before giving up on server, not used in the package dns
}
// ClientConfigFromFile parses a resolv.conf(5) like file and returns
// a *ClientConfig.
func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
file, err := os.Open(resolvconf)
if err != nil {
return nil, err
}
defer file.Close()
return ClientConfigFromReader(file)
}
// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument
func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) {
c := new(ClientConfig)
scanner := bufio.NewScanner(resolvconf)
c.Servers = make([]string, 0)
c.Search = make([]string, 0)
c.Port = "53"
c.Ndots = 1
c.Timeout = 5
c.Attempts = 2
for scanner.Scan() {
if err := scanner.Err(); err != nil {
return nil, err
}
line := scanner.Text()
f := strings.Fields(line)
if len(f) < 1 {
continue
}
switch f[0] {
case "nameserver": // add one name server
if len(f) > 1 {
// One more check: make sure server name is
// just an IP address. Otherwise we need DNS
// to look it up.
name := f[1]
c.Servers = append(c.Servers, name)
}
case "domain": // set search path to just this domain
if len(f) > 1 {
c.Search = make([]string, 1)
c.Search[0] = f[1]
} else {
c.Search = make([]string, 0)
}
case "search": // set search path to given servers
c.Search = append([]string(nil), f[1:]...)
case "options": // magic options
for _, s := range f[1:] {
switch {
case len(s) >= 6 && s[:6] == "ndots:":
n, _ := strconv.Atoi(s[6:])
if n < 0 {
n = 0
} else if n > 15 {
n = 15
}
c.Ndots = n
case len(s) >= 8 && s[:8] == "timeout:":
n, _ := strconv.Atoi(s[8:])
if n < 1 {
n = 1
}
c.Timeout = n
case len(s) >= 9 && s[:9] == "attempts:":
n, _ := strconv.Atoi(s[9:])
if n < 1 {
n = 1
}
c.Attempts = n
case s == "rotate":
/* not imp */
}
}
}
}
return c, nil
}
// NameList returns all of the names that should be queried based on the
// config. It is based off of go's net/dns name building, but it does not
// check the length of the resulting names.
func (c *ClientConfig) NameList(name string) []string {
// if this domain is already fully qualified, no append needed.
if IsFqdn(name) {
return []string{name}
}
// Check to see if the name has more labels than Ndots. Do this before making
// the domain fully qualified.
hasNdots := CountLabel(name) > c.Ndots
// Make the domain fully qualified.
name = Fqdn(name)
// Make a list of names based off search.
names := []string{}
// If name has enough dots, try that first.
if hasNdots {
names = append(names, name)
}
for _, s := range c.Search {
names = append(names, Fqdn(name+s))
}
// If we didn't have enough dots, try after suffixes.
if !hasNdots {
names = append(names, name)
}
return names
}

43
vendor/github.com/miekg/dns/dane.go generated vendored Normal file
View File

@@ -0,0 +1,43 @@
package dns
import (
"crypto/sha256"
"crypto/sha512"
"crypto/x509"
"encoding/hex"
"errors"
)
// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records.
func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
switch matchingType {
case 0:
switch selector {
case 0:
return hex.EncodeToString(cert.Raw), nil
case 1:
return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
}
case 1:
h := sha256.New()
switch selector {
case 0:
h.Write(cert.Raw)
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
h.Write(cert.RawSubjectPublicKeyInfo)
return hex.EncodeToString(h.Sum(nil)), nil
}
case 2:
h := sha512.New()
switch selector {
case 0:
h.Write(cert.Raw)
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
h.Write(cert.RawSubjectPublicKeyInfo)
return hex.EncodeToString(h.Sum(nil)), nil
}
}
return "", errors.New("dns: bad MatchingType or Selector")
}

381
vendor/github.com/miekg/dns/defaults.go generated vendored Normal file
View File

@@ -0,0 +1,381 @@
package dns
import (
"errors"
"net"
"strconv"
"strings"
)
const hexDigit = "0123456789abcdef"
// Everything is assumed in ClassINET.
// SetReply creates a reply message from a request message.
func (dns *Msg) SetReply(request *Msg) *Msg {
dns.Id = request.Id
dns.Response = true
dns.Opcode = request.Opcode
if dns.Opcode == OpcodeQuery {
dns.RecursionDesired = request.RecursionDesired // Copy rd bit
dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit
}
dns.Rcode = RcodeSuccess
if len(request.Question) > 0 {
dns.Question = make([]Question, 1)
dns.Question[0] = request.Question[0]
}
return dns
}
// SetQuestion creates a question message, it sets the Question
// section, generates an Id and sets the RecursionDesired (RD)
// bit to true.
func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
dns.Id = Id()
dns.RecursionDesired = true
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, t, ClassINET}
return dns
}
// SetNotify creates a notify message, it sets the Question
// section, generates an Id and sets the Authoritative (AA)
// bit to true.
func (dns *Msg) SetNotify(z string) *Msg {
dns.Opcode = OpcodeNotify
dns.Authoritative = true
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeSOA, ClassINET}
return dns
}
// SetRcode creates an error message suitable for the request.
func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg {
dns.SetReply(request)
dns.Rcode = rcode
return dns
}
// SetRcodeFormatError creates a message with FormError set.
func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg {
dns.Rcode = RcodeFormatError
dns.Opcode = OpcodeQuery
dns.Response = true
dns.Authoritative = false
dns.Id = request.Id
return dns
}
// SetUpdate makes the message a dynamic update message. It
// sets the ZONE section to: z, TypeSOA, ClassINET.
func (dns *Msg) SetUpdate(z string) *Msg {
dns.Id = Id()
dns.Response = false
dns.Opcode = OpcodeUpdate
dns.Compress = false // BIND9 cannot handle compression
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeSOA, ClassINET}
return dns
}
// SetIxfr creates message for requesting an IXFR.
func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg {
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Ns = make([]RR, 1)
s := new(SOA)
s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0}
s.Serial = serial
s.Ns = ns
s.Mbox = mbox
dns.Question[0] = Question{z, TypeIXFR, ClassINET}
dns.Ns[0] = s
return dns
}
// SetAxfr creates message for requesting an AXFR.
func (dns *Msg) SetAxfr(z string) *Msg {
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeAXFR, ClassINET}
return dns
}
// SetTsig appends a TSIG RR to the message.
// This is only a skeleton TSIG RR that is added as the last RR in the
// additional section. The TSIG is calculated when the message is being send.
func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg {
t := new(TSIG)
t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
t.Algorithm = algo
t.Fudge = fudge
t.TimeSigned = uint64(timesigned)
t.OrigId = dns.Id
dns.Extra = append(dns.Extra, t)
return dns
}
// SetEdns0 appends a EDNS0 OPT RR to the message.
// TSIG should always the last RR in a message.
func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg {
e := new(OPT)
e.Hdr.Name = "."
e.Hdr.Rrtype = TypeOPT
e.SetUDPSize(udpsize)
if do {
e.SetDo()
}
dns.Extra = append(dns.Extra, e)
return dns
}
// IsTsig checks if the message has a TSIG record as the last record
// in the additional section. It returns the TSIG record found or nil.
func (dns *Msg) IsTsig() *TSIG {
if len(dns.Extra) > 0 {
if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG {
return dns.Extra[len(dns.Extra)-1].(*TSIG)
}
}
return nil
}
// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0
// record in the additional section will do. It returns the OPT record
// found or nil.
func (dns *Msg) IsEdns0() *OPT {
// RFC 6891, Section 6.1.1 allows the OPT record to appear
// anywhere in the additional record section, but it's usually at
// the end so start there.
for i := len(dns.Extra) - 1; i >= 0; i-- {
if dns.Extra[i].Header().Rrtype == TypeOPT {
return dns.Extra[i].(*OPT)
}
}
return nil
}
// popEdns0 is like IsEdns0, but it removes the record from the message.
func (dns *Msg) popEdns0() *OPT {
// RFC 6891, Section 6.1.1 allows the OPT record to appear
// anywhere in the additional record section, but it's usually at
// the end so start there.
for i := len(dns.Extra) - 1; i >= 0; i-- {
if dns.Extra[i].Header().Rrtype == TypeOPT {
opt := dns.Extra[i].(*OPT)
dns.Extra = append(dns.Extra[:i], dns.Extra[i+1:]...)
return opt
}
}
return nil
}
// IsDomainName checks if s is a valid domain name, it returns the number of
// labels and true, when a domain name is valid. Note that non fully qualified
// domain name is considered valid, in this case the last label is counted in
// the number of labels. When false is returned the number of labels is not
// defined. Also note that this function is extremely liberal; almost any
// string is a valid domain name as the DNS is 8 bit protocol. It checks if each
// label fits in 63 characters and that the entire name will fit into the 255
// octet wire format limit.
func IsDomainName(s string) (labels int, ok bool) {
// XXX: The logic in this function was copied from packDomainName and
// should be kept in sync with that function.
const lenmsg = 256
if len(s) == 0 { // Ok, for instance when dealing with update RR without any rdata.
return 0, false
}
s = Fqdn(s)
// Each dot ends a segment of the name. Except for escaped dots (\.), which
// are normal dots.
var (
off int
begin int
wasDot bool
)
for i := 0; i < len(s); i++ {
switch s[i] {
case '\\':
if off+1 > lenmsg {
return labels, false
}
// check for \DDD
if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
i += 3
begin += 3
} else {
i++
begin++
}
wasDot = false
case '.':
if wasDot {
// two dots back to back is not legal
return labels, false
}
wasDot = true
labelLen := i - begin
if labelLen >= 1<<6 { // top two bits of length must be clear
return labels, false
}
// off can already (we're in a loop) be bigger than lenmsg
// this happens when a name isn't fully qualified
off += 1 + labelLen
if off > lenmsg {
return labels, false
}
labels++
begin = i + 1
default:
wasDot = false
}
}
return labels, true
}
// IsSubDomain checks if child is indeed a child of the parent. If child and parent
// are the same domain true is returned as well.
func IsSubDomain(parent, child string) bool {
// Entire child is contained in parent
return CompareDomainName(parent, child) == CountLabel(parent)
}
// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet.
// The checking is performed on the binary payload.
func IsMsg(buf []byte) error {
// Header
if len(buf) < headerSize {
return errors.New("dns: bad message header")
}
// Header: Opcode
// TODO(miek): more checks here, e.g. check all header bits.
return nil
}
// IsFqdn checks if a domain name is fully qualified.
func IsFqdn(s string) bool {
s2 := strings.TrimSuffix(s, ".")
if s == s2 {
return false
}
i := strings.LastIndexFunc(s2, func(r rune) bool {
return r != '\\'
})
// Test whether we have an even number of escape sequences before
// the dot or none.
return (len(s2)-i)%2 != 0
}
// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
// This means the RRs need to have the same type, name, and class. Returns true
// if the RR set is valid, otherwise false.
func IsRRset(rrset []RR) bool {
if len(rrset) == 0 {
return false
}
if len(rrset) == 1 {
return true
}
rrHeader := rrset[0].Header()
rrType := rrHeader.Rrtype
rrClass := rrHeader.Class
rrName := rrHeader.Name
for _, rr := range rrset[1:] {
curRRHeader := rr.Header()
if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName {
// Mismatch between the records, so this is not a valid rrset for
//signing/verifying
return false
}
}
return true
}
// Fqdn return the fully qualified domain name from s.
// If s is already fully qualified, it behaves as the identity function.
func Fqdn(s string) string {
if IsFqdn(s) {
return s
}
return s + "."
}
// CanonicalName returns the domain name in canonical form. A name in canonical
// form is lowercase and fully qualified. See Section 6.2 in RFC 4034.
func CanonicalName(s string) string {
return strings.ToLower(Fqdn(s))
}
// Copied from the official Go code.
// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
// address suitable for reverse DNS (PTR) record lookups or an error if it fails
// to parse the IP address.
func ReverseAddr(addr string) (arpa string, err error) {
ip := net.ParseIP(addr)
if ip == nil {
return "", &Error{err: "unrecognized address: " + addr}
}
if v4 := ip.To4(); v4 != nil {
buf := make([]byte, 0, net.IPv4len*4+len("in-addr.arpa."))
// Add it, in reverse, to the buffer
for i := len(v4) - 1; i >= 0; i-- {
buf = strconv.AppendInt(buf, int64(v4[i]), 10)
buf = append(buf, '.')
}
// Append "in-addr.arpa." and return (buf already has the final .)
buf = append(buf, "in-addr.arpa."...)
return string(buf), nil
}
// Must be IPv6
buf := make([]byte, 0, net.IPv6len*4+len("ip6.arpa."))
// Add it, in reverse, to the buffer
for i := len(ip) - 1; i >= 0; i-- {
v := ip[i]
buf = append(buf, hexDigit[v&0xF], '.', hexDigit[v>>4], '.')
}
// Append "ip6.arpa." and return (buf already has the final .)
buf = append(buf, "ip6.arpa."...)
return string(buf), nil
}
// String returns the string representation for the type t.
func (t Type) String() string {
if t1, ok := TypeToString[uint16(t)]; ok {
return t1
}
return "TYPE" + strconv.Itoa(int(t))
}
// String returns the string representation for the class c.
func (c Class) String() string {
if s, ok := ClassToString[uint16(c)]; ok {
// Only emit mnemonics when they are unambiguous, specially ANY is in both.
if _, ok := StringToType[s]; !ok {
return s
}
}
return "CLASS" + strconv.Itoa(int(c))
}
// String returns the string representation for the name n.
func (n Name) String() string {
return sprintName(string(n))
}

158
vendor/github.com/miekg/dns/dns.go generated vendored Normal file
View File

@@ -0,0 +1,158 @@
package dns
import (
"encoding/hex"
"strconv"
)
const (
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
defaultTtl = 3600 // Default internal TTL.
// DefaultMsgSize is the standard default for messages larger than 512 bytes.
DefaultMsgSize = 4096
// MinMsgSize is the minimal size of a DNS packet.
MinMsgSize = 512
// MaxMsgSize is the largest possible DNS packet.
MaxMsgSize = 65535
)
// Error represents a DNS error.
type Error struct{ err string }
func (e *Error) Error() string {
if e == nil {
return "dns: <nil>"
}
return "dns: " + e.err
}
// An RR represents a resource record.
type RR interface {
// Header returns the header of an resource record. The header contains
// everything up to the rdata.
Header() *RR_Header
// String returns the text representation of the resource record.
String() string
// copy returns a copy of the RR
copy() RR
// len returns the length (in octets) of the compressed or uncompressed RR in wire format.
//
// If compression is nil, the uncompressed size will be returned, otherwise the compressed
// size will be returned and domain names will be added to the map for future compression.
len(off int, compression map[string]struct{}) int
// pack packs the records RDATA into wire format. The header will
// already have been packed into msg.
pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error)
// unpack unpacks an RR from wire format.
//
// This will only be called on a new and empty RR type with only the header populated. It
// will only be called if the record's RDATA is non-empty.
unpack(msg []byte, off int) (off1 int, err error)
// parse parses an RR from zone file format.
//
// This will only be called on a new and empty RR type with only the header populated.
parse(c *zlexer, origin string) *ParseError
// isDuplicate returns whether the two RRs are duplicates.
isDuplicate(r2 RR) bool
}
// RR_Header is the header all DNS resource records share.
type RR_Header struct {
Name string `dns:"cdomain-name"`
Rrtype uint16
Class uint16
Ttl uint32
Rdlength uint16 // Length of data after header.
}
// Header returns itself. This is here to make RR_Header implements the RR interface.
func (h *RR_Header) Header() *RR_Header { return h }
// Just to implement the RR interface.
func (h *RR_Header) copy() RR { return nil }
func (h *RR_Header) String() string {
var s string
if h.Rrtype == TypeOPT {
s = ";"
// and maybe other things
}
s += sprintName(h.Name) + "\t"
s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
s += Class(h.Class).String() + "\t"
s += Type(h.Rrtype).String() + "\t"
return s
}
func (h *RR_Header) len(off int, compression map[string]struct{}) int {
l := domainNameLen(h.Name, off, compression, true)
l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
return l
}
func (h *RR_Header) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
// RR_Header has no RDATA to pack.
return off, nil
}
func (h *RR_Header) unpack(msg []byte, off int) (int, error) {
panic("dns: internal error: unpack should never be called on RR_Header")
}
func (h *RR_Header) parse(c *zlexer, origin string) *ParseError {
panic("dns: internal error: parse should never be called on RR_Header")
}
// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
func (rr *RFC3597) ToRFC3597(r RR) error {
buf := make([]byte, Len(r))
headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false)
if err != nil {
return err
}
buf = buf[:off]
*rr = RFC3597{Hdr: *r.Header()}
rr.Hdr.Rdlength = uint16(off - headerEnd)
if noRdata(rr.Hdr) {
return nil
}
_, err = rr.unpack(buf, headerEnd)
return err
}
// fromRFC3597 converts an unknown RR representation from RFC 3597 to the known RR type.
func (rr *RFC3597) fromRFC3597(r RR) error {
hdr := r.Header()
*hdr = rr.Hdr
// Can't overflow uint16 as the length of Rdata is validated in (*RFC3597).parse.
// We can only get here when rr was constructed with that method.
hdr.Rdlength = uint16(hex.DecodedLen(len(rr.Rdata)))
if noRdata(*hdr) {
// Dynamic update.
return nil
}
// rr.pack requires an extra allocation and a copy so we just decode Rdata
// manually, it's simpler anyway.
msg, err := hex.DecodeString(rr.Rdata)
if err != nil {
return err
}
_, err = r.unpack(msg, 0)
return err
}

765
vendor/github.com/miekg/dns/dnssec.go generated vendored Normal file
View File

@@ -0,0 +1,765 @@
package dns
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
_ "crypto/sha1" // need its init function
_ "crypto/sha256" // need its init function
_ "crypto/sha512" // need its init function
"encoding/asn1"
"encoding/binary"
"encoding/hex"
"math/big"
"sort"
"strings"
"time"
)
// DNSSEC encryption algorithm codes.
const (
_ uint8 = iota
RSAMD5
DH
DSA
_ // Skip 4, RFC 6725, section 2.1
RSASHA1
DSANSEC3SHA1
RSASHA1NSEC3SHA1
RSASHA256
_ // Skip 9, RFC 6725, section 2.1
RSASHA512
_ // Skip 11, RFC 6725, section 2.1
ECCGOST
ECDSAP256SHA256
ECDSAP384SHA384
ED25519
ED448
INDIRECT uint8 = 252
PRIVATEDNS uint8 = 253 // Private (experimental keys)
PRIVATEOID uint8 = 254
)
// AlgorithmToString is a map of algorithm IDs to algorithm names.
var AlgorithmToString = map[uint8]string{
RSAMD5: "RSAMD5",
DH: "DH",
DSA: "DSA",
RSASHA1: "RSASHA1",
DSANSEC3SHA1: "DSA-NSEC3-SHA1",
RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1",
RSASHA256: "RSASHA256",
RSASHA512: "RSASHA512",
ECCGOST: "ECC-GOST",
ECDSAP256SHA256: "ECDSAP256SHA256",
ECDSAP384SHA384: "ECDSAP384SHA384",
ED25519: "ED25519",
ED448: "ED448",
INDIRECT: "INDIRECT",
PRIVATEDNS: "PRIVATEDNS",
PRIVATEOID: "PRIVATEOID",
}
// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
var AlgorithmToHash = map[uint8]crypto.Hash{
RSAMD5: crypto.MD5, // Deprecated in RFC 6725
DSA: crypto.SHA1,
RSASHA1: crypto.SHA1,
RSASHA1NSEC3SHA1: crypto.SHA1,
RSASHA256: crypto.SHA256,
ECDSAP256SHA256: crypto.SHA256,
ECDSAP384SHA384: crypto.SHA384,
RSASHA512: crypto.SHA512,
ED25519: crypto.Hash(0),
}
// DNSSEC hashing algorithm codes.
const (
_ uint8 = iota
SHA1 // RFC 4034
SHA256 // RFC 4509
GOST94 // RFC 5933
SHA384 // Experimental
SHA512 // Experimental
)
// HashToString is a map of hash IDs to names.
var HashToString = map[uint8]string{
SHA1: "SHA1",
SHA256: "SHA256",
GOST94: "GOST94",
SHA384: "SHA384",
SHA512: "SHA512",
}
// DNSKEY flag values.
const (
SEP = 1
REVOKE = 1 << 7
ZONE = 1 << 8
)
// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing.
type rrsigWireFmt struct {
TypeCovered uint16
Algorithm uint8
Labels uint8
OrigTtl uint32
Expiration uint32
Inception uint32
KeyTag uint16
SignerName string `dns:"domain-name"`
/* No Signature */
}
// Used for converting DNSKEY's rdata to wirefmt.
type dnskeyWireFmt struct {
Flags uint16
Protocol uint8
Algorithm uint8
PublicKey string `dns:"base64"`
/* Nothing is left out */
}
func divRoundUp(a, b int) int {
return (a + b - 1) / b
}
// KeyTag calculates the keytag (or key-id) of the DNSKEY.
func (k *DNSKEY) KeyTag() uint16 {
if k == nil {
return 0
}
var keytag int
switch k.Algorithm {
case RSAMD5:
// Look at the bottom two bytes of the modules, which the last
// item in the pubkey.
// This algorithm has been deprecated, but keep this key-tag calculation.
modulus, _ := fromBase64([]byte(k.PublicKey))
if len(modulus) > 1 {
x := binary.BigEndian.Uint16(modulus[len(modulus)-2:])
keytag = int(x)
}
default:
keywire := new(dnskeyWireFmt)
keywire.Flags = k.Flags
keywire.Protocol = k.Protocol
keywire.Algorithm = k.Algorithm
keywire.PublicKey = k.PublicKey
wire := make([]byte, DefaultMsgSize)
n, err := packKeyWire(keywire, wire)
if err != nil {
return 0
}
wire = wire[:n]
for i, v := range wire {
if i&1 != 0 {
keytag += int(v) // must be larger than uint32
} else {
keytag += int(v) << 8
}
}
keytag += keytag >> 16 & 0xFFFF
keytag &= 0xFFFF
}
return uint16(keytag)
}
// ToDS converts a DNSKEY record to a DS record.
func (k *DNSKEY) ToDS(h uint8) *DS {
if k == nil {
return nil
}
ds := new(DS)
ds.Hdr.Name = k.Hdr.Name
ds.Hdr.Class = k.Hdr.Class
ds.Hdr.Rrtype = TypeDS
ds.Hdr.Ttl = k.Hdr.Ttl
ds.Algorithm = k.Algorithm
ds.DigestType = h
ds.KeyTag = k.KeyTag()
keywire := new(dnskeyWireFmt)
keywire.Flags = k.Flags
keywire.Protocol = k.Protocol
keywire.Algorithm = k.Algorithm
keywire.PublicKey = k.PublicKey
wire := make([]byte, DefaultMsgSize)
n, err := packKeyWire(keywire, wire)
if err != nil {
return nil
}
wire = wire[:n]
owner := make([]byte, 255)
off, err1 := PackDomainName(CanonicalName(k.Hdr.Name), owner, 0, nil, false)
if err1 != nil {
return nil
}
owner = owner[:off]
// RFC4034:
// digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA);
// "|" denotes concatenation
// DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key.
var hash crypto.Hash
switch h {
case SHA1:
hash = crypto.SHA1
case SHA256:
hash = crypto.SHA256
case SHA384:
hash = crypto.SHA384
case SHA512:
hash = crypto.SHA512
default:
return nil
}
s := hash.New()
s.Write(owner)
s.Write(wire)
ds.Digest = hex.EncodeToString(s.Sum(nil))
return ds
}
// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
c := &CDNSKEY{DNSKEY: *k}
c.Hdr = k.Hdr
c.Hdr.Rrtype = TypeCDNSKEY
return c
}
// ToCDS converts a DS record to a CDS record.
func (d *DS) ToCDS() *CDS {
c := &CDS{DS: *d}
c.Hdr = d.Hdr
c.Hdr.Rrtype = TypeCDS
return c
}
// Sign signs an RRSet. The signature needs to be filled in with the values:
// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied
// from the RRset. Sign returns a non-nill error when the signing went OK.
// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non
// zero, it is used as-is, otherwise the TTL of the RRset is used as the
// OrigTTL.
func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
if k == nil {
return ErrPrivKey
}
// s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return ErrKey
}
h0 := rrset[0].Header()
rr.Hdr.Rrtype = TypeRRSIG
rr.Hdr.Name = h0.Name
rr.Hdr.Class = h0.Class
if rr.OrigTtl == 0 { // If set don't override
rr.OrigTtl = h0.Ttl
}
rr.TypeCovered = h0.Rrtype
rr.Labels = uint8(CountLabel(h0.Name))
if strings.HasPrefix(h0.Name, "*") {
rr.Labels-- // wildcard, remove from label count
}
sigwire := new(rrsigWireFmt)
sigwire.TypeCovered = rr.TypeCovered
sigwire.Algorithm = rr.Algorithm
sigwire.Labels = rr.Labels
sigwire.OrigTtl = rr.OrigTtl
sigwire.Expiration = rr.Expiration
sigwire.Inception = rr.Inception
sigwire.KeyTag = rr.KeyTag
// For signing, lowercase this name
sigwire.SignerName = CanonicalName(rr.SignerName)
// Create the desired binary blob
signdata := make([]byte, DefaultMsgSize)
n, err := packSigWire(sigwire, signdata)
if err != nil {
return err
}
signdata = signdata[:n]
wire, err := rawSignatureData(rrset, rr)
if err != nil {
return err
}
hash, ok := AlgorithmToHash[rr.Algorithm]
if !ok {
return ErrAlg
}
switch rr.Algorithm {
case ED25519:
// ed25519 signs the raw message and performs hashing internally.
// All other supported signature schemes operate over the pre-hashed
// message, and thus ed25519 must be handled separately here.
//
// The raw message is passed directly into sign and crypto.Hash(0) is
// used to signal to the crypto.Signer that the data has not been hashed.
signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm)
if err != nil {
return err
}
rr.Signature = toBase64(signature)
return nil
case RSAMD5, DSA, DSANSEC3SHA1:
// See RFC 6944.
return ErrAlg
default:
h := hash.New()
h.Write(signdata)
h.Write(wire)
signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
if err != nil {
return err
}
rr.Signature = toBase64(signature)
return nil
}
}
func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) {
signature, err := k.Sign(rand.Reader, hashed, hash)
if err != nil {
return nil, err
}
switch alg {
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
return signature, nil
case ECDSAP256SHA256, ECDSAP384SHA384:
ecdsaSignature := &struct {
R, S *big.Int
}{}
if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil {
return nil, err
}
var intlen int
switch alg {
case ECDSAP256SHA256:
intlen = 32
case ECDSAP384SHA384:
intlen = 48
}
signature := intToBytes(ecdsaSignature.R, intlen)
signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...)
return signature, nil
case ED25519:
return signature, nil
default:
return nil, ErrAlg
}
}
// Verify validates an RRSet with the signature and key. This is only the
// cryptographic test, the signature validity period must be checked separately.
// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work.
// It also checks that the Zone Key bit (RFC 4034 2.1.1) is set on the DNSKEY
// and that the Protocol field is set to 3 (RFC 4034 2.1.2).
func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
// First the easy checks
if !IsRRset(rrset) {
return ErrRRset
}
if rr.KeyTag != k.KeyTag() {
return ErrKey
}
if rr.Hdr.Class != k.Hdr.Class {
return ErrKey
}
if rr.Algorithm != k.Algorithm {
return ErrKey
}
if !strings.EqualFold(rr.SignerName, k.Hdr.Name) {
return ErrKey
}
if k.Protocol != 3 {
return ErrKey
}
// RFC 4034 2.1.1 If bit 7 has value 0, then the DNSKEY record holds some
// other type of DNS public key and MUST NOT be used to verify RRSIGs that
// cover RRsets.
if k.Flags&ZONE == 0 {
return ErrKey
}
// IsRRset checked that we have at least one RR and that the RRs in
// the set have consistent type, class, and name. Also check that type and
// class matches the RRSIG record.
if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered {
return ErrRRset
}
// RFC 4035 5.3.2. Reconstructing the Signed Data
// Copy the sig, except the rrsig data
sigwire := new(rrsigWireFmt)
sigwire.TypeCovered = rr.TypeCovered
sigwire.Algorithm = rr.Algorithm
sigwire.Labels = rr.Labels
sigwire.OrigTtl = rr.OrigTtl
sigwire.Expiration = rr.Expiration
sigwire.Inception = rr.Inception
sigwire.KeyTag = rr.KeyTag
sigwire.SignerName = CanonicalName(rr.SignerName)
// Create the desired binary blob
signeddata := make([]byte, DefaultMsgSize)
n, err := packSigWire(sigwire, signeddata)
if err != nil {
return err
}
signeddata = signeddata[:n]
wire, err := rawSignatureData(rrset, rr)
if err != nil {
return err
}
sigbuf := rr.sigBuf() // Get the binary signature data
if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
// TODO(miek)
// remove the domain name and assume its ours?
}
hash, ok := AlgorithmToHash[rr.Algorithm]
if !ok {
return ErrAlg
}
switch rr.Algorithm {
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
// TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere??
pubkey := k.publicKeyRSA() // Get the key
if pubkey == nil {
return ErrKey
}
h := hash.New()
h.Write(signeddata)
h.Write(wire)
return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf)
case ECDSAP256SHA256, ECDSAP384SHA384:
pubkey := k.publicKeyECDSA()
if pubkey == nil {
return ErrKey
}
// Split sigbuf into the r and s coordinates
r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2])
s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:])
h := hash.New()
h.Write(signeddata)
h.Write(wire)
if ecdsa.Verify(pubkey, h.Sum(nil), r, s) {
return nil
}
return ErrSig
case ED25519:
pubkey := k.publicKeyED25519()
if pubkey == nil {
return ErrKey
}
if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) {
return nil
}
return ErrSig
default:
return ErrAlg
}
}
// ValidityPeriod uses RFC1982 serial arithmetic to calculate
// if a signature period is valid. If t is the zero time, the
// current time is taken other t is. Returns true if the signature
// is valid at the given time, otherwise returns false.
func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
var utc int64
if t.IsZero() {
utc = time.Now().UTC().Unix()
} else {
utc = t.UTC().Unix()
}
modi := (int64(rr.Inception) - utc) / year68
mode := (int64(rr.Expiration) - utc) / year68
ti := int64(rr.Inception) + modi*year68
te := int64(rr.Expiration) + mode*year68
return ti <= utc && utc <= te
}
// Return the signatures base64 encoding sigdata as a byte slice.
func (rr *RRSIG) sigBuf() []byte {
sigbuf, err := fromBase64([]byte(rr.Signature))
if err != nil {
return nil
}
return sigbuf
}
// publicKeyRSA returns the RSA public key from a DNSKEY record.
func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
keybuf, err := fromBase64([]byte(k.PublicKey))
if err != nil {
return nil
}
if len(keybuf) < 1+1+64 {
// Exponent must be at least 1 byte and modulus at least 64
return nil
}
// RFC 2537/3110, section 2. RSA Public KEY Resource Records
// Length is in the 0th byte, unless its zero, then it
// it in bytes 1 and 2 and its a 16 bit number
explen := uint16(keybuf[0])
keyoff := 1
if explen == 0 {
explen = uint16(keybuf[1])<<8 | uint16(keybuf[2])
keyoff = 3
}
if explen > 4 || explen == 0 || keybuf[keyoff] == 0 {
// Exponent larger than supported by the crypto package,
// empty, or contains prohibited leading zero.
return nil
}
modoff := keyoff + int(explen)
modlen := len(keybuf) - modoff
if modlen < 64 || modlen > 512 || keybuf[modoff] == 0 {
// Modulus is too small, large, or contains prohibited leading zero.
return nil
}
pubkey := new(rsa.PublicKey)
var expo uint64
// The exponent of length explen is between keyoff and modoff.
for _, v := range keybuf[keyoff:modoff] {
expo <<= 8
expo |= uint64(v)
}
if expo > 1<<31-1 {
// Larger exponent than supported by the crypto package.
return nil
}
pubkey.E = int(expo)
pubkey.N = new(big.Int).SetBytes(keybuf[modoff:])
return pubkey
}
// publicKeyECDSA returns the Curve public key from the DNSKEY record.
func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey {
keybuf, err := fromBase64([]byte(k.PublicKey))
if err != nil {
return nil
}
pubkey := new(ecdsa.PublicKey)
switch k.Algorithm {
case ECDSAP256SHA256:
pubkey.Curve = elliptic.P256()
if len(keybuf) != 64 {
// wrongly encoded key
return nil
}
case ECDSAP384SHA384:
pubkey.Curve = elliptic.P384()
if len(keybuf) != 96 {
// Wrongly encoded key
return nil
}
}
pubkey.X = new(big.Int).SetBytes(keybuf[:len(keybuf)/2])
pubkey.Y = new(big.Int).SetBytes(keybuf[len(keybuf)/2:])
return pubkey
}
func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey {
keybuf, err := fromBase64([]byte(k.PublicKey))
if err != nil {
return nil
}
if len(keybuf) != ed25519.PublicKeySize {
return nil
}
return keybuf
}
type wireSlice [][]byte
func (p wireSlice) Len() int { return len(p) }
func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p wireSlice) Less(i, j int) bool {
_, ioff, _ := UnpackDomainName(p[i], 0)
_, joff, _ := UnpackDomainName(p[j], 0)
return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0
}
// Return the raw signature data.
func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
wires := make(wireSlice, len(rrset))
for i, r := range rrset {
r1 := r.copy()
h := r1.Header()
h.Ttl = s.OrigTtl
labels := SplitDomainName(h.Name)
// 6.2. Canonical RR Form. (4) - wildcards
if len(labels) > int(s.Labels) {
// Wildcard
h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
}
// RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase
h.Name = CanonicalName(h.Name)
// 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
// NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
// HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
// SRV, DNAME, A6
//
// RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC):
// Section 6.2 of [RFC4034] also erroneously lists HINFO as a record
// that needs conversion to lowercase, and twice at that. Since HINFO
// records contain no domain names, they are not subject to case
// conversion.
switch x := r1.(type) {
case *NS:
x.Ns = CanonicalName(x.Ns)
case *MD:
x.Md = CanonicalName(x.Md)
case *MF:
x.Mf = CanonicalName(x.Mf)
case *CNAME:
x.Target = CanonicalName(x.Target)
case *SOA:
x.Ns = CanonicalName(x.Ns)
x.Mbox = CanonicalName(x.Mbox)
case *MB:
x.Mb = CanonicalName(x.Mb)
case *MG:
x.Mg = CanonicalName(x.Mg)
case *MR:
x.Mr = CanonicalName(x.Mr)
case *PTR:
x.Ptr = CanonicalName(x.Ptr)
case *MINFO:
x.Rmail = CanonicalName(x.Rmail)
x.Email = CanonicalName(x.Email)
case *MX:
x.Mx = CanonicalName(x.Mx)
case *RP:
x.Mbox = CanonicalName(x.Mbox)
x.Txt = CanonicalName(x.Txt)
case *AFSDB:
x.Hostname = CanonicalName(x.Hostname)
case *RT:
x.Host = CanonicalName(x.Host)
case *SIG:
x.SignerName = CanonicalName(x.SignerName)
case *PX:
x.Map822 = CanonicalName(x.Map822)
x.Mapx400 = CanonicalName(x.Mapx400)
case *NAPTR:
x.Replacement = CanonicalName(x.Replacement)
case *KX:
x.Exchanger = CanonicalName(x.Exchanger)
case *SRV:
x.Target = CanonicalName(x.Target)
case *DNAME:
x.Target = CanonicalName(x.Target)
}
// 6.2. Canonical RR Form. (5) - origTTL
wire := make([]byte, Len(r1)+1) // +1 to be safe(r)
off, err1 := PackRR(r1, wire, 0, nil, false)
if err1 != nil {
return nil, err1
}
wire = wire[:off]
wires[i] = wire
}
sort.Sort(wires)
for i, wire := range wires {
if i > 0 && bytes.Equal(wire, wires[i-1]) {
continue
}
buf = append(buf, wire...)
}
return buf, nil
}
func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) {
// copied from zmsg.go RRSIG packing
off, err := packUint16(sw.TypeCovered, msg, 0)
if err != nil {
return off, err
}
off, err = packUint8(sw.Algorithm, msg, off)
if err != nil {
return off, err
}
off, err = packUint8(sw.Labels, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.OrigTtl, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.Expiration, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.Inception, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(sw.KeyTag, msg, off)
if err != nil {
return off, err
}
off, err = PackDomainName(sw.SignerName, msg, off, nil, false)
if err != nil {
return off, err
}
return off, nil
}
func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) {
// copied from zmsg.go DNSKEY packing
off, err := packUint16(dw.Flags, msg, 0)
if err != nil {
return off, err
}
off, err = packUint8(dw.Protocol, msg, off)
if err != nil {
return off, err
}
off, err = packUint8(dw.Algorithm, msg, off)
if err != nil {
return off, err
}
off, err = packStringBase64(dw.PublicKey, msg, off)
if err != nil {
return off, err
}
return off, nil
}

139
vendor/github.com/miekg/dns/dnssec_keygen.go generated vendored Normal file
View File

@@ -0,0 +1,139 @@
package dns
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"math/big"
)
// Generate generates a DNSKEY of the given bit size.
// The public part is put inside the DNSKEY record.
// The Algorithm in the key must be set as this will define
// what kind of DNSKEY will be generated.
// The ECDSA algorithms imply a fixed keysize, in that case
// bits should be set to the size of the algorithm.
func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
switch k.Algorithm {
case RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
if bits < 512 || bits > 4096 {
return nil, ErrKeySize
}
case RSASHA512:
if bits < 1024 || bits > 4096 {
return nil, ErrKeySize
}
case ECDSAP256SHA256:
if bits != 256 {
return nil, ErrKeySize
}
case ECDSAP384SHA384:
if bits != 384 {
return nil, ErrKeySize
}
case ED25519:
if bits != 256 {
return nil, ErrKeySize
}
default:
return nil, ErrAlg
}
switch k.Algorithm {
case RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
priv, err := rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return nil, err
}
k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
return priv, nil
case ECDSAP256SHA256, ECDSAP384SHA384:
var c elliptic.Curve
switch k.Algorithm {
case ECDSAP256SHA256:
c = elliptic.P256()
case ECDSAP384SHA384:
c = elliptic.P384()
}
priv, err := ecdsa.GenerateKey(c, rand.Reader)
if err != nil {
return nil, err
}
k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
return priv, nil
case ED25519:
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
return nil, err
}
k.setPublicKeyED25519(pub)
return priv, nil
default:
return nil, ErrAlg
}
}
// Set the public key (the value E and N)
func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool {
if _E == 0 || _N == nil {
return false
}
buf := exponentToBuf(_E)
buf = append(buf, _N.Bytes()...)
k.PublicKey = toBase64(buf)
return true
}
// Set the public key for Elliptic Curves
func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool {
if _X == nil || _Y == nil {
return false
}
var intlen int
switch k.Algorithm {
case ECDSAP256SHA256:
intlen = 32
case ECDSAP384SHA384:
intlen = 48
}
k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen))
return true
}
// Set the public key for Ed25519
func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool {
if _K == nil {
return false
}
k.PublicKey = toBase64(_K)
return true
}
// Set the public key (the values E and N) for RSA
// RFC 3110: Section 2. RSA Public KEY Resource Records
func exponentToBuf(_E int) []byte {
var buf []byte
i := big.NewInt(int64(_E)).Bytes()
if len(i) < 256 {
buf = make([]byte, 1, 1+len(i))
buf[0] = uint8(len(i))
} else {
buf = make([]byte, 3, 3+len(i))
buf[0] = 0
buf[1] = uint8(len(i) >> 8)
buf[2] = uint8(len(i))
}
buf = append(buf, i...)
return buf
}
// Set the public key for X and Y for Curve. The two
// values are just concatenated.
func curveToBuf(_X, _Y *big.Int, intlen int) []byte {
buf := intToBytes(_X, intlen)
buf = append(buf, intToBytes(_Y, intlen)...)
return buf
}

309
vendor/github.com/miekg/dns/dnssec_keyscan.go generated vendored Normal file
View File

@@ -0,0 +1,309 @@
package dns
import (
"bufio"
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rsa"
"io"
"math/big"
"strconv"
"strings"
)
// NewPrivateKey returns a PrivateKey by parsing the string s.
// s should be in the same form of the BIND private key files.
func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
if s == "" || s[len(s)-1] != '\n' { // We need a closing newline
return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
}
return k.ReadPrivateKey(strings.NewReader(s), "")
}
// ReadPrivateKey reads a private key from the io.Reader q. The string file is
// only used in error reporting.
// The public key must be known, because some cryptographic algorithms embed
// the public inside the privatekey.
func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) {
m, err := parseKey(q, file)
if m == nil {
return nil, err
}
if _, ok := m["private-key-format"]; !ok {
return nil, ErrPrivKey
}
if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" {
return nil, ErrPrivKey
}
// TODO(mg): check if the pubkey matches the private key
algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8)
if err != nil {
return nil, ErrPrivKey
}
switch uint8(algo) {
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
priv, err := readPrivateKeyRSA(m)
if err != nil {
return nil, err
}
pub := k.publicKeyRSA()
if pub == nil {
return nil, ErrKey
}
priv.PublicKey = *pub
return priv, nil
case ECDSAP256SHA256, ECDSAP384SHA384:
priv, err := readPrivateKeyECDSA(m)
if err != nil {
return nil, err
}
pub := k.publicKeyECDSA()
if pub == nil {
return nil, ErrKey
}
priv.PublicKey = *pub
return priv, nil
case ED25519:
return readPrivateKeyED25519(m)
default:
return nil, ErrAlg
}
}
// Read a private key (file) string and create a public key. Return the private key.
func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
p := new(rsa.PrivateKey)
p.Primes = []*big.Int{nil, nil}
for k, v := range m {
switch k {
case "modulus", "publicexponent", "privateexponent", "prime1", "prime2":
v1, err := fromBase64([]byte(v))
if err != nil {
return nil, err
}
switch k {
case "modulus":
p.PublicKey.N = new(big.Int).SetBytes(v1)
case "publicexponent":
i := new(big.Int).SetBytes(v1)
p.PublicKey.E = int(i.Int64()) // int64 should be large enough
case "privateexponent":
p.D = new(big.Int).SetBytes(v1)
case "prime1":
p.Primes[0] = new(big.Int).SetBytes(v1)
case "prime2":
p.Primes[1] = new(big.Int).SetBytes(v1)
}
case "exponent1", "exponent2", "coefficient":
// not used in Go (yet)
case "created", "publish", "activate":
// not used in Go (yet)
}
}
return p, nil
}
func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
p := new(ecdsa.PrivateKey)
p.D = new(big.Int)
// TODO: validate that the required flags are present
for k, v := range m {
switch k {
case "privatekey":
v1, err := fromBase64([]byte(v))
if err != nil {
return nil, err
}
p.D.SetBytes(v1)
case "created", "publish", "activate":
/* not used in Go (yet) */
}
}
return p, nil
}
func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) {
var p ed25519.PrivateKey
// TODO: validate that the required flags are present
for k, v := range m {
switch k {
case "privatekey":
p1, err := fromBase64([]byte(v))
if err != nil {
return nil, err
}
if len(p1) != ed25519.SeedSize {
return nil, ErrPrivKey
}
p = ed25519.NewKeyFromSeed(p1)
case "created", "publish", "activate":
/* not used in Go (yet) */
}
}
return p, nil
}
// parseKey reads a private key from r. It returns a map[string]string,
// with the key-value pairs, or an error when the file is not correct.
func parseKey(r io.Reader, file string) (map[string]string, error) {
m := make(map[string]string)
var k string
c := newKLexer(r)
for l, ok := c.Next(); ok; l, ok = c.Next() {
// It should alternate
switch l.value {
case zKey:
k = l.token
case zValue:
if k == "" {
return nil, &ParseError{file, "no private key seen", l}
}
m[strings.ToLower(k)] = l.token
k = ""
}
}
// Surface any read errors from r.
if err := c.Err(); err != nil {
return nil, &ParseError{file: file, err: err.Error()}
}
return m, nil
}
type klexer struct {
br io.ByteReader
readErr error
line int
column int
key bool
eol bool // end-of-line
}
func newKLexer(r io.Reader) *klexer {
br, ok := r.(io.ByteReader)
if !ok {
br = bufio.NewReaderSize(r, 1024)
}
return &klexer{
br: br,
line: 1,
key: true,
}
}
func (kl *klexer) Err() error {
if kl.readErr == io.EOF {
return nil
}
return kl.readErr
}
// readByte returns the next byte from the input
func (kl *klexer) readByte() (byte, bool) {
if kl.readErr != nil {
return 0, false
}
c, err := kl.br.ReadByte()
if err != nil {
kl.readErr = err
return 0, false
}
// delay the newline handling until the next token is delivered,
// fixes off-by-one errors when reporting a parse error.
if kl.eol {
kl.line++
kl.column = 0
kl.eol = false
}
if c == '\n' {
kl.eol = true
} else {
kl.column++
}
return c, true
}
func (kl *klexer) Next() (lex, bool) {
var (
l lex
str strings.Builder
commt bool
)
for x, ok := kl.readByte(); ok; x, ok = kl.readByte() {
l.line, l.column = kl.line, kl.column
switch x {
case ':':
if commt || !kl.key {
break
}
kl.key = false
// Next token is a space, eat it
kl.readByte()
l.value = zKey
l.token = str.String()
return l, true
case ';':
commt = true
case '\n':
if commt {
// Reset a comment
commt = false
}
if kl.key && str.Len() == 0 {
// ignore empty lines
break
}
kl.key = true
l.value = zValue
l.token = str.String()
return l, true
default:
if commt {
break
}
str.WriteByte(x)
}
}
if kl.readErr != nil && kl.readErr != io.EOF {
// Don't return any tokens after a read error occurs.
return lex{value: zEOF}, false
}
if str.Len() > 0 {
// Send remainder
l.value = zValue
l.token = str.String()
return l, true
}
return lex{value: zEOF}, false
}

77
vendor/github.com/miekg/dns/dnssec_privkey.go generated vendored Normal file
View File

@@ -0,0 +1,77 @@
package dns
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rsa"
"math/big"
"strconv"
)
const format = "Private-key-format: v1.3\n"
var bigIntOne = big.NewInt(1)
// PrivateKeyString converts a PrivateKey to a string. This string has the same
// format as the private-key-file of BIND9 (Private-key-format: v1.3).
// It needs some info from the key (the algorithm), so its a method of the DNSKEY.
// It supports *rsa.PrivateKey, *ecdsa.PrivateKey and ed25519.PrivateKey.
func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
algorithm := strconv.Itoa(int(r.Algorithm))
algorithm += " (" + AlgorithmToString[r.Algorithm] + ")"
switch p := p.(type) {
case *rsa.PrivateKey:
modulus := toBase64(p.PublicKey.N.Bytes())
e := big.NewInt(int64(p.PublicKey.E))
publicExponent := toBase64(e.Bytes())
privateExponent := toBase64(p.D.Bytes())
prime1 := toBase64(p.Primes[0].Bytes())
prime2 := toBase64(p.Primes[1].Bytes())
// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
// and from: http://code.google.com/p/go/issues/detail?id=987
p1 := new(big.Int).Sub(p.Primes[0], bigIntOne)
q1 := new(big.Int).Sub(p.Primes[1], bigIntOne)
exp1 := new(big.Int).Mod(p.D, p1)
exp2 := new(big.Int).Mod(p.D, q1)
coeff := new(big.Int).ModInverse(p.Primes[1], p.Primes[0])
exponent1 := toBase64(exp1.Bytes())
exponent2 := toBase64(exp2.Bytes())
coefficient := toBase64(coeff.Bytes())
return format +
"Algorithm: " + algorithm + "\n" +
"Modulus: " + modulus + "\n" +
"PublicExponent: " + publicExponent + "\n" +
"PrivateExponent: " + privateExponent + "\n" +
"Prime1: " + prime1 + "\n" +
"Prime2: " + prime2 + "\n" +
"Exponent1: " + exponent1 + "\n" +
"Exponent2: " + exponent2 + "\n" +
"Coefficient: " + coefficient + "\n"
case *ecdsa.PrivateKey:
var intlen int
switch r.Algorithm {
case ECDSAP256SHA256:
intlen = 32
case ECDSAP384SHA384:
intlen = 48
}
private := toBase64(intToBytes(p.D, intlen))
return format +
"Algorithm: " + algorithm + "\n" +
"PrivateKey: " + private + "\n"
case ed25519.PrivateKey:
private := toBase64(p.Seed())
return format +
"Algorithm: " + algorithm + "\n" +
"PrivateKey: " + private + "\n"
default:
return ""
}
}

292
vendor/github.com/miekg/dns/doc.go generated vendored Normal file
View File

@@ -0,0 +1,292 @@
/*
Package dns implements a full featured interface to the Domain Name System.
Both server- and client-side programming is supported. The package allows
complete control over what is sent out to the DNS. The API follows the
less-is-more principle, by presenting a small, clean interface.
It supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
Note that domain names MUST be fully qualified before sending them, unqualified
names in a message will result in a packing failure.
Resource records are native types. They are not stored in wire format. Basic
usage pattern for creating a new resource record:
r := new(dns.MX)
r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
r.Preference = 10
r.Mx = "mx.miek.nl."
Or directly from a string:
mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
Or when the default origin (.) and TTL (3600) and class (IN) suit you:
mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl")
Or even:
mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
In the DNS messages are exchanged, these messages contain resource records
(sets). Use pattern for creating a message:
m := new(dns.Msg)
m.SetQuestion("miek.nl.", dns.TypeMX)
Or when not certain if the domain name is fully qualified:
m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
The message m is now a message with the question section set to ask the MX
records for the miek.nl. zone.
The following is slightly more verbose, but more flexible:
m1 := new(dns.Msg)
m1.Id = dns.Id()
m1.RecursionDesired = true
m1.Question = make([]dns.Question, 1)
m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
After creating a message it can be sent. Basic use pattern for synchronous
querying the DNS at a server configured on 127.0.0.1 and port 53:
c := new(dns.Client)
in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
Suppressing multiple outstanding queries (with the same question, type and
class) is as easy as setting:
c.SingleInflight = true
More advanced options are available using a net.Dialer and the corresponding API.
For example it is possible to set a timeout, or to specify a source IP address
and port to use for the connection:
c := new(dns.Client)
laddr := net.UDPAddr{
IP: net.ParseIP("[::1]"),
Port: 12345,
Zone: "",
}
c.Dialer := &net.Dialer{
Timeout: 200 * time.Millisecond,
LocalAddr: &laddr,
}
in, rtt, err := c.Exchange(m1, "8.8.8.8:53")
If these "advanced" features are not needed, a simple UDP query can be sent,
with:
in, err := dns.Exchange(m1, "127.0.0.1:53")
When this functions returns you will get DNS message. A DNS message consists
out of four sections.
The question section: in.Question, the answer section: in.Answer,
the authority section: in.Ns and the additional section: in.Extra.
Each of these sections (except the Question section) contain a []RR. Basic
use pattern for accessing the rdata of a TXT RR as the first RR in
the Answer section:
if t, ok := in.Answer[0].(*dns.TXT); ok {
// do something with t.Txt
}
Domain Name and TXT Character String Representations
Both domain names and TXT character strings are converted to presentation form
both when unpacked and when converted to strings.
For TXT character strings, tabs, carriage returns and line feeds will be
converted to \t, \r and \n respectively. Back slashes and quotations marks will
be escaped. Bytes below 32 and above 127 will be converted to \DDD form.
For domain names, in addition to the above rules brackets, periods, spaces,
semicolons and the at symbol are escaped.
DNSSEC
DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses
public key cryptography to sign resource records. The public keys are stored in
DNSKEY records and the signatures in RRSIG records.
Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK)
bit to a request.
m := new(dns.Msg)
m.SetEdns0(4096, true)
Signature generation, signature verification and key generation are all supported.
DYNAMIC UPDATES
Dynamic updates reuses the DNS message format, but renames three of the
sections. Question is Zone, Answer is Prerequisite, Authority is Update, only
the Additional is not renamed. See RFC 2136 for the gory details.
You can set a rather complex set of rules for the existence of absence of
certain resource records or names in a zone to specify if resource records
should be added or removed. The table from RFC 2136 supplemented with the Go
DNS function shows which functions exist to specify the prerequisites.
3.2.4 - Table Of Metavalues Used In Prerequisite Section
CLASS TYPE RDATA Meaning Function
--------------------------------------------------------------
ANY ANY empty Name is in use dns.NameUsed
ANY rrset empty RRset exists (value indep) dns.RRsetUsed
NONE ANY empty Name is not in use dns.NameNotUsed
NONE rrset empty RRset does not exist dns.RRsetNotUsed
zone rrset rr RRset exists (value dep) dns.Used
The prerequisite section can also be left empty. If you have decided on the
prerequisites you can tell what RRs should be added or deleted. The next table
shows the options you have and what functions to call.
3.4.2.6 - Table Of Metavalues Used In Update Section
CLASS TYPE RDATA Meaning Function
---------------------------------------------------------------
ANY ANY empty Delete all RRsets from name dns.RemoveName
ANY rrset empty Delete an RRset dns.RemoveRRset
NONE rrset rr Delete an RR from RRset dns.Remove
zone rrset rr Add to an RRset dns.Insert
TRANSACTION SIGNATURE
An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
The supported algorithms include: HmacSHA1, HmacSHA256 and HmacSHA512.
Basic use pattern when querying with a TSIG name "axfr." (note that these key names
must be fully qualified - as they are domain names) and the base64 secret
"so6ZGir4GPAqINNh9U5c3A==":
If an incoming message contains a TSIG record it MUST be the last record in
the additional section (RFC2845 3.2). This means that you should make the
call to SetTsig last, right before executing the query. If you make any
changes to the RRset after calling SetTsig() the signature will be incorrect.
c := new(dns.Client)
c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
m := new(dns.Msg)
m.SetQuestion("miek.nl.", dns.TypeMX)
m.SetTsig("axfr.", dns.HmacSHA256, 300, time.Now().Unix())
...
// When sending the TSIG RR is calculated and filled in before sending
When requesting an zone transfer (almost all TSIG usage is when requesting zone
transfers), with TSIG, this is the basic use pattern. In this example we
request an AXFR for miek.nl. with TSIG key named "axfr." and secret
"so6ZGir4GPAqINNh9U5c3A==" and using the server 176.58.119.54:
t := new(dns.Transfer)
m := new(dns.Msg)
t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
m.SetAxfr("miek.nl.")
m.SetTsig("axfr.", dns.HmacSHA256, 300, time.Now().Unix())
c, err := t.In(m, "176.58.119.54:53")
for r := range c { ... }
You can now read the records from the transfer as they come in. Each envelope
is checked with TSIG. If something is not correct an error is returned.
A custom TSIG implementation can be used. This requires additional code to
perform any session establishment and signature generation/verification. The
client must be configured with an implementation of the TsigProvider interface:
type Provider struct{}
func (*Provider) Generate(msg []byte, tsig *dns.TSIG) ([]byte, error) {
// Use tsig.Hdr.Name and tsig.Algorithm in your code to
// generate the MAC using msg as the payload.
}
func (*Provider) Verify(msg []byte, tsig *dns.TSIG) error {
// Use tsig.Hdr.Name and tsig.Algorithm in your code to verify
// that msg matches the value in tsig.MAC.
}
c := new(dns.Client)
c.TsigProvider = new(Provider)
m := new(dns.Msg)
m.SetQuestion("miek.nl.", dns.TypeMX)
m.SetTsig(keyname, dns.HmacSHA256, 300, time.Now().Unix())
...
// TSIG RR is calculated by calling your Generate method
Basic use pattern validating and replying to a message that has TSIG set.
server := &dns.Server{Addr: ":53", Net: "udp"}
server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
go server.ListenAndServe()
dns.HandleFunc(".", handleRequest)
func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
m := new(dns.Msg)
m.SetReply(r)
if r.IsTsig() != nil {
if w.TsigStatus() == nil {
// *Msg r has an TSIG record and it was validated
m.SetTsig("axfr.", dns.HmacSHA256, 300, time.Now().Unix())
} else {
// *Msg r has an TSIG records and it was not validated
}
}
w.WriteMsg(m)
}
PRIVATE RRS
RFC 6895 sets aside a range of type codes for private use. This range is 65,280
- 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
can be used, before requesting an official type code from IANA.
See https://miek.nl/2014/september/21/idn-and-private-rr-in-go-dns/ for more
information.
EDNS0
EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by
RFC 6891. It defines a new RR type, the OPT RR, which is then completely
abused.
Basic use pattern for creating an (empty) OPT RR:
o := new(dns.OPT)
o.Hdr.Name = "." // MUST be the root zone, per definition.
o.Hdr.Rrtype = dns.TypeOPT
The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) interfaces.
Currently only a few have been standardized: EDNS0_NSID (RFC 5001) and
EDNS0_SUBNET (RFC 7871). Note that these options may be combined in an OPT RR.
Basic use pattern for a server to check if (and which) options are set:
// o is a dns.OPT
for _, s := range o.Option {
switch e := s.(type) {
case *dns.EDNS0_NSID:
// do stuff with e.Nsid
case *dns.EDNS0_SUBNET:
// access e.Family, e.Address, etc.
}
}
SIG(0)
From RFC 2931:
SIG(0) provides protection for DNS transactions and requests ....
... protection for glue records, DNS requests, protection for message headers
on requests and responses, and protection of the overall integrity of a response.
It works like TSIG, except that SIG(0) uses public key cryptography, instead of
the shared secret approach in TSIG. Supported algorithms: ECDSAP256SHA256,
ECDSAP384SHA384, RSASHA1, RSASHA256 and RSASHA512.
Signing subsequent messages in multi-message sessions is not implemented.
*/
package dns

37
vendor/github.com/miekg/dns/duplicate.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
package dns
//go:generate go run duplicate_generate.go
// IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL.
// So this means the header data is equal *and* the RDATA is the same. Returns true
// if so, otherwise false. It's a protocol violation to have identical RRs in a message.
func IsDuplicate(r1, r2 RR) bool {
// Check whether the record header is identical.
if !r1.Header().isDuplicate(r2.Header()) {
return false
}
// Check whether the RDATA is identical.
return r1.isDuplicate(r2)
}
func (r1 *RR_Header) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*RR_Header)
if !ok {
return false
}
if r1.Class != r2.Class {
return false
}
if r1.Rrtype != r2.Rrtype {
return false
}
if !isDuplicateName(r1.Name, r2.Name) {
return false
}
// ignore TTL
return true
}
// isDuplicateName checks if the domain names s1 and s2 are equal.
func isDuplicateName(s1, s2 string) bool { return equal(s1, s2) }

839
vendor/github.com/miekg/dns/edns.go generated vendored Normal file
View File

@@ -0,0 +1,839 @@
package dns
import (
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"net"
"strconv"
)
// EDNS0 Option codes.
const (
EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt
EDNS0NSID = 0x3 // nsid (See RFC 5001)
EDNS0ESU = 0x4 // ENUM Source-URI draft: https://datatracker.ietf.org/doc/html/draft-kaplan-enum-source-uri-00
EDNS0DAU = 0x5 // DNSSEC Algorithm Understood
EDNS0DHU = 0x6 // DS Hash Understood
EDNS0N3U = 0x7 // NSEC3 Hash Understood
EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871)
EDNS0EXPIRE = 0x9 // EDNS0 expire
EDNS0COOKIE = 0xa // EDNS0 Cookie
EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828)
EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830)
EDNS0EDE = 0xf // EDNS0 extended DNS errors (See RFC 8914)
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891)
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891)
_DO = 1 << 15 // DNSSEC OK
)
// makeDataOpt is used to unpack the EDNS0 option(s) from a message.
func makeDataOpt(code uint16) EDNS0 {
// All the EDNS0.* constants above need to be in this switch.
switch code {
case EDNS0LLQ:
return new(EDNS0_LLQ)
case EDNS0UL:
return new(EDNS0_UL)
case EDNS0NSID:
return new(EDNS0_NSID)
case EDNS0DAU:
return new(EDNS0_DAU)
case EDNS0DHU:
return new(EDNS0_DHU)
case EDNS0N3U:
return new(EDNS0_N3U)
case EDNS0SUBNET:
return new(EDNS0_SUBNET)
case EDNS0EXPIRE:
return new(EDNS0_EXPIRE)
case EDNS0COOKIE:
return new(EDNS0_COOKIE)
case EDNS0TCPKEEPALIVE:
return new(EDNS0_TCP_KEEPALIVE)
case EDNS0PADDING:
return new(EDNS0_PADDING)
case EDNS0EDE:
return new(EDNS0_EDE)
case EDNS0ESU:
return &EDNS0_ESU{Code: EDNS0ESU}
default:
e := new(EDNS0_LOCAL)
e.Code = code
return e
}
}
// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
// See RFC 6891.
type OPT struct {
Hdr RR_Header
Option []EDNS0 `dns:"opt"`
}
func (rr *OPT) String() string {
s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
if rr.Do() {
s += "flags: do; "
} else {
s += "flags: ; "
}
s += "udp: " + strconv.Itoa(int(rr.UDPSize()))
for _, o := range rr.Option {
switch o.(type) {
case *EDNS0_NSID:
s += "\n; NSID: " + o.String()
h, e := o.pack()
var r string
if e == nil {
for _, c := range h {
r += "(" + string(c) + ")"
}
s += " " + r
}
case *EDNS0_SUBNET:
s += "\n; SUBNET: " + o.String()
case *EDNS0_COOKIE:
s += "\n; COOKIE: " + o.String()
case *EDNS0_TCP_KEEPALIVE:
s += "\n; KEEPALIVE: " + o.String()
case *EDNS0_UL:
s += "\n; UPDATE LEASE: " + o.String()
case *EDNS0_LLQ:
s += "\n; LONG LIVED QUERIES: " + o.String()
case *EDNS0_DAU:
s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String()
case *EDNS0_DHU:
s += "\n; DS HASH UNDERSTOOD: " + o.String()
case *EDNS0_N3U:
s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
case *EDNS0_LOCAL:
s += "\n; LOCAL OPT: " + o.String()
case *EDNS0_PADDING:
s += "\n; PADDING: " + o.String()
case *EDNS0_EDE:
s += "\n; EDE: " + o.String()
case *EDNS0_ESU:
s += "\n; ESU: " + o.String()
}
}
return s
}
func (rr *OPT) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, o := range rr.Option {
l += 4 // Account for 2-byte option code and 2-byte option length.
lo, _ := o.pack()
l += len(lo)
}
return l
}
func (*OPT) parse(c *zlexer, origin string) *ParseError {
return &ParseError{err: "OPT records do not have a presentation format"}
}
func (rr *OPT) isDuplicate(r2 RR) bool { return false }
// return the old value -> delete SetVersion?
// Version returns the EDNS version used. Only zero is defined.
func (rr *OPT) Version() uint8 {
return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16)
}
// SetVersion sets the version of EDNS. This is usually zero.
func (rr *OPT) SetVersion(v uint8) {
rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | uint32(v)<<16
}
// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
func (rr *OPT) ExtendedRcode() int {
return int(rr.Hdr.Ttl&0xFF000000>>24) << 4
}
// SetExtendedRcode sets the EDNS extended RCODE field.
//
// If the RCODE is not an extended RCODE, will reset the extended RCODE field to 0.
func (rr *OPT) SetExtendedRcode(v uint16) {
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v>>4)<<24
}
// UDPSize returns the UDP buffer size.
func (rr *OPT) UDPSize() uint16 {
return rr.Hdr.Class
}
// SetUDPSize sets the UDP buffer size.
func (rr *OPT) SetUDPSize(size uint16) {
rr.Hdr.Class = size
}
// Do returns the value of the DO (DNSSEC OK) bit.
func (rr *OPT) Do() bool {
return rr.Hdr.Ttl&_DO == _DO
}
// SetDo sets the DO (DNSSEC OK) bit.
// If we pass an argument, set the DO bit to that value.
// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored.
func (rr *OPT) SetDo(do ...bool) {
if len(do) == 1 {
if do[0] {
rr.Hdr.Ttl |= _DO
} else {
rr.Hdr.Ttl &^= _DO
}
} else {
rr.Hdr.Ttl |= _DO
}
}
// Z returns the Z part of the OPT RR as a uint16 with only the 15 least significant bits used.
func (rr *OPT) Z() uint16 {
return uint16(rr.Hdr.Ttl & 0x7FFF)
}
// SetZ sets the Z part of the OPT RR, note only the 15 least significant bits of z are used.
func (rr *OPT) SetZ(z uint16) {
rr.Hdr.Ttl = rr.Hdr.Ttl&^0x7FFF | uint32(z&0x7FFF)
}
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
type EDNS0 interface {
// Option returns the option code for the option.
Option() uint16
// pack returns the bytes of the option data.
pack() ([]byte, error)
// unpack sets the data as found in the buffer. Is also sets
// the length of the slice as the length of the option data.
unpack([]byte) error
// String returns the string representation of the option.
String() string
// copy returns a deep-copy of the option.
copy() EDNS0
}
// EDNS0_NSID option is used to retrieve a nameserver
// identifier. When sending a request Nsid must be set to the empty string
// The identifier is an opaque string encoded as hex.
// Basic use pattern for creating an nsid option:
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_NSID)
// e.Code = dns.EDNS0NSID
// e.Nsid = "AA"
// o.Option = append(o.Option, e)
type EDNS0_NSID struct {
Code uint16 // Always EDNS0NSID
Nsid string // This string needs to be hex encoded
}
func (e *EDNS0_NSID) pack() ([]byte, error) {
h, err := hex.DecodeString(e.Nsid)
if err != nil {
return nil, err
}
return h, nil
}
// Option implements the EDNS0 interface.
func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code.
func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
func (e *EDNS0_NSID) String() string { return e.Nsid }
func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} }
// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
// an idea of where the client lives. See RFC 7871. It can then give back a different
// answer depending on the location or network topology.
// Basic use pattern for creating an subnet option:
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_SUBNET)
// e.Code = dns.EDNS0SUBNET
// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6
// e.SourceScope = 0
// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4
// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6
// o.Option = append(o.Option, e)
//
// This code will parse all the available bits when unpacking (up to optlen).
// When packing it will apply SourceNetmask. If you need more advanced logic,
// patches welcome and good luck.
type EDNS0_SUBNET struct {
Code uint16 // Always EDNS0SUBNET
Family uint16 // 1 for IP, 2 for IP6
SourceNetmask uint8
SourceScope uint8
Address net.IP
}
// Option implements the EDNS0 interface.
func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET }
func (e *EDNS0_SUBNET) pack() ([]byte, error) {
b := make([]byte, 4)
binary.BigEndian.PutUint16(b[0:], e.Family)
b[2] = e.SourceNetmask
b[3] = e.SourceScope
switch e.Family {
case 0:
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
// We might don't need to complain either
if e.SourceNetmask != 0 {
return nil, errors.New("dns: bad address family")
}
case 1:
if e.SourceNetmask > net.IPv4len*8 {
return nil, errors.New("dns: bad netmask")
}
if len(e.Address.To4()) != net.IPv4len {
return nil, errors.New("dns: bad address")
}
ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8))
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
b = append(b, ip[:needLength]...)
case 2:
if e.SourceNetmask > net.IPv6len*8 {
return nil, errors.New("dns: bad netmask")
}
if len(e.Address) != net.IPv6len {
return nil, errors.New("dns: bad address")
}
ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8))
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
b = append(b, ip[:needLength]...)
default:
return nil, errors.New("dns: bad address family")
}
return b, nil
}
func (e *EDNS0_SUBNET) unpack(b []byte) error {
if len(b) < 4 {
return ErrBuf
}
e.Family = binary.BigEndian.Uint16(b)
e.SourceNetmask = b[2]
e.SourceScope = b[3]
switch e.Family {
case 0:
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
// It's okay to accept such a packet
if e.SourceNetmask != 0 {
return errors.New("dns: bad address family")
}
e.Address = net.IPv4(0, 0, 0, 0)
case 1:
if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
return errors.New("dns: bad netmask")
}
addr := make(net.IP, net.IPv4len)
copy(addr, b[4:])
e.Address = addr.To16()
case 2:
if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
return errors.New("dns: bad netmask")
}
addr := make(net.IP, net.IPv6len)
copy(addr, b[4:])
e.Address = addr
default:
return errors.New("dns: bad address family")
}
return nil
}
func (e *EDNS0_SUBNET) String() (s string) {
if e.Address == nil {
s = "<nil>"
} else if e.Address.To4() != nil {
s = e.Address.String()
} else {
s = "[" + e.Address.String() + "]"
}
s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope))
return
}
func (e *EDNS0_SUBNET) copy() EDNS0 {
return &EDNS0_SUBNET{
e.Code,
e.Family,
e.SourceNetmask,
e.SourceScope,
e.Address,
}
}
// The EDNS0_COOKIE option is used to add a DNS Cookie to a message.
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_COOKIE)
// e.Code = dns.EDNS0COOKIE
// e.Cookie = "24a5ac.."
// o.Option = append(o.Option, e)
//
// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is
// always 8 bytes. It may then optionally be followed by the server cookie. The server
// cookie is of variable length, 8 to a maximum of 32 bytes. In other words:
//
// cCookie := o.Cookie[:16]
// sCookie := o.Cookie[16:]
//
// There is no guarantee that the Cookie string has a specific length.
type EDNS0_COOKIE struct {
Code uint16 // Always EDNS0COOKIE
Cookie string // Hex-encoded cookie data
}
func (e *EDNS0_COOKIE) pack() ([]byte, error) {
h, err := hex.DecodeString(e.Cookie)
if err != nil {
return nil, err
}
return h, nil
}
// Option implements the EDNS0 interface.
func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE }
func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
func (e *EDNS0_COOKIE) String() string { return e.Cookie }
func (e *EDNS0_COOKIE) copy() EDNS0 { return &EDNS0_COOKIE{e.Code, e.Cookie} }
// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
// an expiration on an update RR. This is helpful for clients that cannot clean
// up after themselves. This is a draft RFC and more information can be found at
// https://tools.ietf.org/html/draft-sekar-dns-ul-02
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_UL)
// e.Code = dns.EDNS0UL
// e.Lease = 120 // in seconds
// o.Option = append(o.Option, e)
type EDNS0_UL struct {
Code uint16 // Always EDNS0UL
Lease uint32
KeyLease uint32
}
// Option implements the EDNS0 interface.
func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
func (e *EDNS0_UL) String() string { return fmt.Sprintf("%d %d", e.Lease, e.KeyLease) }
func (e *EDNS0_UL) copy() EDNS0 { return &EDNS0_UL{e.Code, e.Lease, e.KeyLease} }
// Copied: http://golang.org/src/pkg/net/dnsmsg.go
func (e *EDNS0_UL) pack() ([]byte, error) {
var b []byte
if e.KeyLease == 0 {
b = make([]byte, 4)
} else {
b = make([]byte, 8)
binary.BigEndian.PutUint32(b[4:], e.KeyLease)
}
binary.BigEndian.PutUint32(b, e.Lease)
return b, nil
}
func (e *EDNS0_UL) unpack(b []byte) error {
switch len(b) {
case 4:
e.KeyLease = 0
case 8:
e.KeyLease = binary.BigEndian.Uint32(b[4:])
default:
return ErrBuf
}
e.Lease = binary.BigEndian.Uint32(b)
return nil
}
// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
// Implemented for completeness, as the EDNS0 type code is assigned.
type EDNS0_LLQ struct {
Code uint16 // Always EDNS0LLQ
Version uint16
Opcode uint16
Error uint16
Id uint64
LeaseLife uint32
}
// Option implements the EDNS0 interface.
func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
func (e *EDNS0_LLQ) pack() ([]byte, error) {
b := make([]byte, 18)
binary.BigEndian.PutUint16(b[0:], e.Version)
binary.BigEndian.PutUint16(b[2:], e.Opcode)
binary.BigEndian.PutUint16(b[4:], e.Error)
binary.BigEndian.PutUint64(b[6:], e.Id)
binary.BigEndian.PutUint32(b[14:], e.LeaseLife)
return b, nil
}
func (e *EDNS0_LLQ) unpack(b []byte) error {
if len(b) < 18 {
return ErrBuf
}
e.Version = binary.BigEndian.Uint16(b[0:])
e.Opcode = binary.BigEndian.Uint16(b[2:])
e.Error = binary.BigEndian.Uint16(b[4:])
e.Id = binary.BigEndian.Uint64(b[6:])
e.LeaseLife = binary.BigEndian.Uint32(b[14:])
return nil
}
func (e *EDNS0_LLQ) String() string {
s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
" " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(e.Id, 10) +
" " + strconv.FormatUint(uint64(e.LeaseLife), 10)
return s
}
func (e *EDNS0_LLQ) copy() EDNS0 {
return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife}
}
// EDNS0_DAU implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
type EDNS0_DAU struct {
Code uint16 // Always EDNS0DAU
AlgCode []uint8
}
// Option implements the EDNS0 interface.
func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU }
func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_DAU) String() string {
s := ""
for _, alg := range e.AlgCode {
if a, ok := AlgorithmToString[alg]; ok {
s += " " + a
} else {
s += " " + strconv.Itoa(int(alg))
}
}
return s
}
func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} }
// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
type EDNS0_DHU struct {
Code uint16 // Always EDNS0DHU
AlgCode []uint8
}
// Option implements the EDNS0 interface.
func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU }
func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_DHU) String() string {
s := ""
for _, alg := range e.AlgCode {
if a, ok := HashToString[alg]; ok {
s += " " + a
} else {
s += " " + strconv.Itoa(int(alg))
}
}
return s
}
func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} }
// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
type EDNS0_N3U struct {
Code uint16 // Always EDNS0N3U
AlgCode []uint8
}
// Option implements the EDNS0 interface.
func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U }
func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_N3U) String() string {
// Re-use the hash map
s := ""
for _, alg := range e.AlgCode {
if a, ok := HashToString[alg]; ok {
s += " " + a
} else {
s += " " + strconv.Itoa(int(alg))
}
}
return s
}
func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} }
// EDNS0_EXPIRE implements the EDNS0 option as described in RFC 7314.
type EDNS0_EXPIRE struct {
Code uint16 // Always EDNS0EXPIRE
Expire uint32
}
// Option implements the EDNS0 interface.
func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
func (e *EDNS0_EXPIRE) copy() EDNS0 { return &EDNS0_EXPIRE{e.Code, e.Expire} }
func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
b := make([]byte, 4)
binary.BigEndian.PutUint32(b, e.Expire)
return b, nil
}
func (e *EDNS0_EXPIRE) unpack(b []byte) error {
if len(b) == 0 {
// zero-length EXPIRE query, see RFC 7314 Section 2
return nil
}
if len(b) < 4 {
return ErrBuf
}
e.Expire = binary.BigEndian.Uint32(b)
return nil
}
// The EDNS0_LOCAL option is used for local/experimental purposes. The option
// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND]
// (RFC6891), although any unassigned code can actually be used. The content of
// the option is made available in Data, unaltered.
// Basic use pattern for creating a local option:
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_LOCAL)
// e.Code = dns.EDNS0LOCALSTART
// e.Data = []byte{72, 82, 74}
// o.Option = append(o.Option, e)
type EDNS0_LOCAL struct {
Code uint16
Data []byte
}
// Option implements the EDNS0 interface.
func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
func (e *EDNS0_LOCAL) String() string {
return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
}
func (e *EDNS0_LOCAL) copy() EDNS0 {
b := make([]byte, len(e.Data))
copy(b, e.Data)
return &EDNS0_LOCAL{e.Code, b}
}
func (e *EDNS0_LOCAL) pack() ([]byte, error) {
b := make([]byte, len(e.Data))
copied := copy(b, e.Data)
if copied != len(e.Data) {
return nil, ErrBuf
}
return b, nil
}
func (e *EDNS0_LOCAL) unpack(b []byte) error {
e.Data = make([]byte, len(b))
copied := copy(e.Data, b)
if copied != len(b) {
return ErrBuf
}
return nil
}
// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep
// the TCP connection alive. See RFC 7828.
type EDNS0_TCP_KEEPALIVE struct {
Code uint16 // Always EDNSTCPKEEPALIVE
// Timeout is an idle timeout value for the TCP connection, specified in
// units of 100 milliseconds, encoded in network byte order. If set to 0,
// pack will return a nil slice.
Timeout uint16
// Length is the option's length.
// Deprecated: this field is deprecated and is always equal to 0.
Length uint16
}
// Option implements the EDNS0 interface.
func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE }
func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) {
if e.Timeout > 0 {
b := make([]byte, 2)
binary.BigEndian.PutUint16(b, e.Timeout)
return b, nil
}
return nil, nil
}
func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error {
switch len(b) {
case 0:
case 2:
e.Timeout = binary.BigEndian.Uint16(b)
default:
return fmt.Errorf("dns: length mismatch, want 0/2 but got %d", len(b))
}
return nil
}
func (e *EDNS0_TCP_KEEPALIVE) String() string {
s := "use tcp keep-alive"
if e.Timeout == 0 {
s += ", timeout omitted"
} else {
s += fmt.Sprintf(", timeout %dms", e.Timeout*100)
}
return s
}
func (e *EDNS0_TCP_KEEPALIVE) copy() EDNS0 { return &EDNS0_TCP_KEEPALIVE{e.Code, e.Timeout, e.Length} }
// EDNS0_PADDING option is used to add padding to a request/response. The default
// value of padding SHOULD be 0x0 but other values MAY be used, for instance if
// compression is applied before encryption which may break signatures.
type EDNS0_PADDING struct {
Padding []byte
}
// Option implements the EDNS0 interface.
func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }
func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) }
func (e *EDNS0_PADDING) copy() EDNS0 {
b := make([]byte, len(e.Padding))
copy(b, e.Padding)
return &EDNS0_PADDING{b}
}
// Extended DNS Error Codes (RFC 8914).
const (
ExtendedErrorCodeOther uint16 = iota
ExtendedErrorCodeUnsupportedDNSKEYAlgorithm
ExtendedErrorCodeUnsupportedDSDigestType
ExtendedErrorCodeStaleAnswer
ExtendedErrorCodeForgedAnswer
ExtendedErrorCodeDNSSECIndeterminate
ExtendedErrorCodeDNSBogus
ExtendedErrorCodeSignatureExpired
ExtendedErrorCodeSignatureNotYetValid
ExtendedErrorCodeDNSKEYMissing
ExtendedErrorCodeRRSIGsMissing
ExtendedErrorCodeNoZoneKeyBitSet
ExtendedErrorCodeNSECMissing
ExtendedErrorCodeCachedError
ExtendedErrorCodeNotReady
ExtendedErrorCodeBlocked
ExtendedErrorCodeCensored
ExtendedErrorCodeFiltered
ExtendedErrorCodeProhibited
ExtendedErrorCodeStaleNXDOMAINAnswer
ExtendedErrorCodeNotAuthoritative
ExtendedErrorCodeNotSupported
ExtendedErrorCodeNoReachableAuthority
ExtendedErrorCodeNetworkError
ExtendedErrorCodeInvalidData
)
// ExtendedErrorCodeToString maps extended error info codes to a human readable
// description.
var ExtendedErrorCodeToString = map[uint16]string{
ExtendedErrorCodeOther: "Other",
ExtendedErrorCodeUnsupportedDNSKEYAlgorithm: "Unsupported DNSKEY Algorithm",
ExtendedErrorCodeUnsupportedDSDigestType: "Unsupported DS Digest Type",
ExtendedErrorCodeStaleAnswer: "Stale Answer",
ExtendedErrorCodeForgedAnswer: "Forged Answer",
ExtendedErrorCodeDNSSECIndeterminate: "DNSSEC Indeterminate",
ExtendedErrorCodeDNSBogus: "DNSSEC Bogus",
ExtendedErrorCodeSignatureExpired: "Signature Expired",
ExtendedErrorCodeSignatureNotYetValid: "Signature Not Yet Valid",
ExtendedErrorCodeDNSKEYMissing: "DNSKEY Missing",
ExtendedErrorCodeRRSIGsMissing: "RRSIGs Missing",
ExtendedErrorCodeNoZoneKeyBitSet: "No Zone Key Bit Set",
ExtendedErrorCodeNSECMissing: "NSEC Missing",
ExtendedErrorCodeCachedError: "Cached Error",
ExtendedErrorCodeNotReady: "Not Ready",
ExtendedErrorCodeBlocked: "Blocked",
ExtendedErrorCodeCensored: "Censored",
ExtendedErrorCodeFiltered: "Filtered",
ExtendedErrorCodeProhibited: "Prohibited",
ExtendedErrorCodeStaleNXDOMAINAnswer: "Stale NXDOMAIN Answer",
ExtendedErrorCodeNotAuthoritative: "Not Authoritative",
ExtendedErrorCodeNotSupported: "Not Supported",
ExtendedErrorCodeNoReachableAuthority: "No Reachable Authority",
ExtendedErrorCodeNetworkError: "Network Error",
ExtendedErrorCodeInvalidData: "Invalid Data",
}
// StringToExtendedErrorCode is a map from human readable descriptions to
// extended error info codes.
var StringToExtendedErrorCode = reverseInt16(ExtendedErrorCodeToString)
// EDNS0_EDE option is used to return additional information about the cause of
// DNS errors.
type EDNS0_EDE struct {
InfoCode uint16
ExtraText string
}
// Option implements the EDNS0 interface.
func (e *EDNS0_EDE) Option() uint16 { return EDNS0EDE }
func (e *EDNS0_EDE) copy() EDNS0 { return &EDNS0_EDE{e.InfoCode, e.ExtraText} }
func (e *EDNS0_EDE) String() string {
info := strconv.FormatUint(uint64(e.InfoCode), 10)
if s, ok := ExtendedErrorCodeToString[e.InfoCode]; ok {
info += fmt.Sprintf(" (%s)", s)
}
return fmt.Sprintf("%s: (%s)", info, e.ExtraText)
}
func (e *EDNS0_EDE) pack() ([]byte, error) {
b := make([]byte, 2+len(e.ExtraText))
binary.BigEndian.PutUint16(b[0:], e.InfoCode)
copy(b[2:], []byte(e.ExtraText))
return b, nil
}
func (e *EDNS0_EDE) unpack(b []byte) error {
if len(b) < 2 {
return ErrBuf
}
e.InfoCode = binary.BigEndian.Uint16(b[0:])
e.ExtraText = string(b[2:])
return nil
}
// The EDNS0_ESU option for ENUM Source-URI Extension
type EDNS0_ESU struct {
Code uint16
Uri string
}
// Option implements the EDNS0 interface.
func (e *EDNS0_ESU) Option() uint16 { return EDNS0ESU }
func (e *EDNS0_ESU) String() string { return e.Uri }
func (e *EDNS0_ESU) copy() EDNS0 { return &EDNS0_ESU{e.Code, e.Uri} }
func (e *EDNS0_ESU) pack() ([]byte, error) { return []byte(e.Uri), nil }
func (e *EDNS0_ESU) unpack(b []byte) error {
e.Uri = string(b)
return nil
}

93
vendor/github.com/miekg/dns/format.go generated vendored Normal file
View File

@@ -0,0 +1,93 @@
package dns
import (
"net"
"reflect"
"strconv"
)
// NumField returns the number of rdata fields r has.
func NumField(r RR) int {
return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header
}
// Field returns the rdata field i as a string. Fields are indexed starting from 1.
// RR types that holds slice data, for instance the NSEC type bitmap will return a single
// string where the types are concatenated using a space.
// Accessing non existing fields will cause a panic.
func Field(r RR, i int) string {
if i == 0 {
return ""
}
d := reflect.ValueOf(r).Elem().Field(i)
switch d.Kind() {
case reflect.String:
return d.String()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(d.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return strconv.FormatUint(d.Uint(), 10)
case reflect.Slice:
switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
case `dns:"a"`:
// TODO(miek): Hmm store this as 16 bytes
if d.Len() < net.IPv4len {
return ""
}
if d.Len() < net.IPv6len {
return net.IPv4(byte(d.Index(0).Uint()),
byte(d.Index(1).Uint()),
byte(d.Index(2).Uint()),
byte(d.Index(3).Uint())).String()
}
return net.IPv4(byte(d.Index(12).Uint()),
byte(d.Index(13).Uint()),
byte(d.Index(14).Uint()),
byte(d.Index(15).Uint())).String()
case `dns:"aaaa"`:
if d.Len() < net.IPv6len {
return ""
}
return net.IP{
byte(d.Index(0).Uint()),
byte(d.Index(1).Uint()),
byte(d.Index(2).Uint()),
byte(d.Index(3).Uint()),
byte(d.Index(4).Uint()),
byte(d.Index(5).Uint()),
byte(d.Index(6).Uint()),
byte(d.Index(7).Uint()),
byte(d.Index(8).Uint()),
byte(d.Index(9).Uint()),
byte(d.Index(10).Uint()),
byte(d.Index(11).Uint()),
byte(d.Index(12).Uint()),
byte(d.Index(13).Uint()),
byte(d.Index(14).Uint()),
byte(d.Index(15).Uint()),
}.String()
case `dns:"nsec"`:
if d.Len() == 0 {
return ""
}
s := Type(d.Index(0).Uint()).String()
for i := 1; i < d.Len(); i++ {
s += " " + Type(d.Index(i).Uint()).String()
}
return s
default:
// if it does not have a tag its a string slice
fallthrough
case `dns:"txt"`:
if d.Len() == 0 {
return ""
}
s := d.Index(0).String()
for i := 1; i < d.Len(); i++ {
s += " " + d.Index(i).String()
}
return s
}
}
return ""
}

32
vendor/github.com/miekg/dns/fuzz.go generated vendored Normal file
View File

@@ -0,0 +1,32 @@
// +build fuzz
package dns
import "strings"
func Fuzz(data []byte) int {
msg := new(Msg)
if err := msg.Unpack(data); err != nil {
return 0
}
if _, err := msg.Pack(); err != nil {
return 0
}
return 1
}
func FuzzNewRR(data []byte) int {
str := string(data)
// Do not fuzz lines that include the $INCLUDE keyword and hint the fuzzer
// at avoiding them.
// See GH#1025 for context.
if strings.Contains(strings.ToUpper(str), "$INCLUDE") {
return -1
}
if _, err := NewRR(str); err != nil {
return 0
}
return 1
}

247
vendor/github.com/miekg/dns/generate.go generated vendored Normal file
View File

@@ -0,0 +1,247 @@
package dns
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
)
// Parse the $GENERATE statement as used in BIND9 zones.
// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
// We are called after '$GENERATE '. After which we expect:
// * the range (12-24/2)
// * lhs (ownername)
// * [[ttl][class]]
// * type
// * rhs (rdata)
// But we are lazy here, only the range is parsed *all* occurrences
// of $ after that are interpreted.
func (zp *ZoneParser) generate(l lex) (RR, bool) {
token := l.token
step := int64(1)
if i := strings.IndexByte(token, '/'); i >= 0 {
if i+1 == len(token) {
return zp.setParseError("bad step in $GENERATE range", l)
}
s, err := strconv.ParseInt(token[i+1:], 10, 64)
if err != nil || s <= 0 {
return zp.setParseError("bad step in $GENERATE range", l)
}
step = s
token = token[:i]
}
sx := strings.SplitN(token, "-", 2)
if len(sx) != 2 {
return zp.setParseError("bad start-stop in $GENERATE range", l)
}
start, err := strconv.ParseInt(sx[0], 10, 64)
if err != nil {
return zp.setParseError("bad start in $GENERATE range", l)
}
end, err := strconv.ParseInt(sx[1], 10, 64)
if err != nil {
return zp.setParseError("bad stop in $GENERATE range", l)
}
if end < 0 || start < 0 || end < start || (end-start)/step > 65535 {
return zp.setParseError("bad range in $GENERATE range", l)
}
// _BLANK
l, ok := zp.c.Next()
if !ok || l.value != zBlank {
return zp.setParseError("garbage after $GENERATE range", l)
}
// Create a complete new string, which we then parse again.
var s string
for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
if l.err {
return zp.setParseError("bad data in $GENERATE directive", l)
}
if l.value == zNewline {
break
}
s += l.token
}
r := &generateReader{
s: s,
cur: start,
start: start,
end: end,
step: step,
file: zp.file,
lex: &l,
}
zp.sub = NewZoneParser(r, zp.origin, zp.file)
zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed
zp.sub.generateDisallowed = true
zp.sub.SetDefaultTTL(defaultTtl)
return zp.subNext()
}
type generateReader struct {
s string
si int
cur int64
start int64
end int64
step int64
mod bytes.Buffer
escape bool
eof bool
file string
lex *lex
}
func (r *generateReader) parseError(msg string, end int) *ParseError {
r.eof = true // Make errors sticky.
l := *r.lex
l.token = r.s[r.si-1 : end]
l.column += r.si // l.column starts one zBLANK before r.s
return &ParseError{r.file, msg, l}
}
func (r *generateReader) Read(p []byte) (int, error) {
// NewZLexer, through NewZoneParser, should use ReadByte and
// not end up here.
panic("not implemented")
}
func (r *generateReader) ReadByte() (byte, error) {
if r.eof {
return 0, io.EOF
}
if r.mod.Len() > 0 {
return r.mod.ReadByte()
}
if r.si >= len(r.s) {
r.si = 0
r.cur += r.step
r.eof = r.cur > r.end || r.cur < 0
return '\n', nil
}
si := r.si
r.si++
switch r.s[si] {
case '\\':
if r.escape {
r.escape = false
return '\\', nil
}
r.escape = true
return r.ReadByte()
case '$':
if r.escape {
r.escape = false
return '$', nil
}
mod := "%d"
if si >= len(r.s)-1 {
// End of the string
fmt.Fprintf(&r.mod, mod, r.cur)
return r.mod.ReadByte()
}
if r.s[si+1] == '$' {
r.si++
return '$', nil
}
var offset int64
// Search for { and }
if r.s[si+1] == '{' {
// Modifier block
sep := strings.Index(r.s[si+2:], "}")
if sep < 0 {
return 0, r.parseError("bad modifier in $GENERATE", len(r.s))
}
var errMsg string
mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep])
if errMsg != "" {
return 0, r.parseError(errMsg, si+3+sep)
}
if r.start+offset < 0 || r.end+offset > 1<<31-1 {
return 0, r.parseError("bad offset in $GENERATE", si+3+sep)
}
r.si += 2 + sep // Jump to it
}
fmt.Fprintf(&r.mod, mod, r.cur+offset)
return r.mod.ReadByte()
default:
if r.escape { // Pretty useless here
r.escape = false
return r.ReadByte()
}
return r.s[si], nil
}
}
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
func modToPrintf(s string) (string, int64, string) {
// Modifier is { offset [ ,width [ ,base ] ] } - provide default
// values for optional width and type, if necessary.
var offStr, widthStr, base string
switch xs := strings.Split(s, ","); len(xs) {
case 1:
offStr, widthStr, base = xs[0], "0", "d"
case 2:
offStr, widthStr, base = xs[0], xs[1], "d"
case 3:
offStr, widthStr, base = xs[0], xs[1], xs[2]
default:
return "", 0, "bad modifier in $GENERATE"
}
switch base {
case "o", "d", "x", "X":
default:
return "", 0, "bad base in $GENERATE"
}
offset, err := strconv.ParseInt(offStr, 10, 64)
if err != nil {
return "", 0, "bad offset in $GENERATE"
}
width, err := strconv.ParseInt(widthStr, 10, 64)
if err != nil || width < 0 || width > 255 {
return "", 0, "bad width in $GENERATE"
}
if width == 0 {
return "%" + base, offset, ""
}
return "%0" + widthStr + base, offset, ""
}

212
vendor/github.com/miekg/dns/labels.go generated vendored Normal file
View File

@@ -0,0 +1,212 @@
package dns
// Holds a bunch of helper functions for dealing with labels.
// SplitDomainName splits a name string into it's labels.
// www.miek.nl. returns []string{"www", "miek", "nl"}
// .www.miek.nl. returns []string{"", "www", "miek", "nl"},
// The root label (.) returns nil. Note that using
// strings.Split(s) will work in most cases, but does not handle
// escaped dots (\.) for instance.
// s must be a syntactically valid domain name, see IsDomainName.
func SplitDomainName(s string) (labels []string) {
if s == "" {
return nil
}
fqdnEnd := 0 // offset of the final '.' or the length of the name
idx := Split(s)
begin := 0
if IsFqdn(s) {
fqdnEnd = len(s) - 1
} else {
fqdnEnd = len(s)
}
switch len(idx) {
case 0:
return nil
case 1:
// no-op
default:
for _, end := range idx[1:] {
labels = append(labels, s[begin:end-1])
begin = end
}
}
return append(labels, s[begin:fqdnEnd])
}
// CompareDomainName compares the names s1 and s2 and
// returns how many labels they have in common starting from the *right*.
// The comparison stops at the first inequality. The names are downcased
// before the comparison.
//
// www.miek.nl. and miek.nl. have two labels in common: miek and nl
// www.miek.nl. and www.bla.nl. have one label in common: nl
//
// s1 and s2 must be syntactically valid domain names.
func CompareDomainName(s1, s2 string) (n int) {
// the first check: root label
if s1 == "." || s2 == "." {
return 0
}
l1 := Split(s1)
l2 := Split(s2)
j1 := len(l1) - 1 // end
i1 := len(l1) - 2 // start
j2 := len(l2) - 1
i2 := len(l2) - 2
// the second check can be done here: last/only label
// before we fall through into the for-loop below
if equal(s1[l1[j1]:], s2[l2[j2]:]) {
n++
} else {
return
}
for {
if i1 < 0 || i2 < 0 {
break
}
if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) {
n++
} else {
break
}
j1--
i1--
j2--
i2--
}
return
}
// CountLabel counts the number of labels in the string s.
// s must be a syntactically valid domain name.
func CountLabel(s string) (labels int) {
if s == "." {
return
}
off := 0
end := false
for {
off, end = NextLabel(s, off)
labels++
if end {
return
}
}
}
// Split splits a name s into its label indexes.
// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
// The root name (.) returns nil. Also see SplitDomainName.
// s must be a syntactically valid domain name.
func Split(s string) []int {
if s == "." {
return nil
}
idx := make([]int, 1, 3)
off := 0
end := false
for {
off, end = NextLabel(s, off)
if end {
return idx
}
idx = append(idx, off)
}
}
// NextLabel returns the index of the start of the next label in the
// string s starting at offset.
// The bool end is true when the end of the string has been reached.
// Also see PrevLabel.
func NextLabel(s string, offset int) (i int, end bool) {
if s == "" {
return 0, true
}
for i = offset; i < len(s)-1; i++ {
if s[i] != '.' {
continue
}
j := i - 1
for j >= 0 && s[j] == '\\' {
j--
}
if (j-i)%2 == 0 {
continue
}
return i + 1, false
}
return i + 1, true
}
// PrevLabel returns the index of the label when starting from the right and
// jumping n labels to the left.
// The bool start is true when the start of the string has been overshot.
// Also see NextLabel.
func PrevLabel(s string, n int) (i int, start bool) {
if s == "" {
return 0, true
}
if n == 0 {
return len(s), false
}
l := len(s) - 1
if s[l] == '.' {
l--
}
for ; l >= 0 && n > 0; l-- {
if s[l] != '.' {
continue
}
j := l - 1
for j >= 0 && s[j] == '\\' {
j--
}
if (j-l)%2 == 0 {
continue
}
n--
if n == 0 {
return l + 1, false
}
}
return 0, n > 1
}
// equal compares a and b while ignoring case. It returns true when equal otherwise false.
func equal(a, b string) bool {
// might be lifted into API function.
la := len(a)
lb := len(b)
if la != lb {
return false
}
for i := la - 1; i >= 0; i-- {
ai := a[i]
bi := b[i]
if ai >= 'A' && ai <= 'Z' {
ai |= 'a' - 'A'
}
if bi >= 'A' && bi <= 'Z' {
bi |= 'a' - 'A'
}
if ai != bi {
return false
}
}
return true
}

23
vendor/github.com/miekg/dns/listen_no_reuseport.go generated vendored Normal file
View File

@@ -0,0 +1,23 @@
// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
package dns
import "net"
const supportsReusePort = false
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
if reuseport {
// TODO(tmthrgd): return an error?
}
return net.Listen(network, addr)
}
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
if reuseport {
// TODO(tmthrgd): return an error?
}
return net.ListenPacket(network, addr)
}

44
vendor/github.com/miekg/dns/listen_reuseport.go generated vendored Normal file
View File

@@ -0,0 +1,44 @@
// +build go1.11
// +build aix darwin dragonfly freebsd linux netbsd openbsd
package dns
import (
"context"
"net"
"syscall"
"golang.org/x/sys/unix"
)
const supportsReusePort = true
func reuseportControl(network, address string, c syscall.RawConn) error {
var opErr error
err := c.Control(func(fd uintptr) {
opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
})
if err != nil {
return err
}
return opErr
}
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
var lc net.ListenConfig
if reuseport {
lc.Control = reuseportControl
}
return lc.Listen(context.Background(), network, addr)
}
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
var lc net.ListenConfig
if reuseport {
lc.Control = reuseportControl
}
return lc.ListenPacket(context.Background(), network, addr)
}

1202
vendor/github.com/miekg/dns/msg.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

802
vendor/github.com/miekg/dns/msg_helpers.go generated vendored Normal file
View File

@@ -0,0 +1,802 @@
package dns
import (
"encoding/base32"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"net"
"sort"
"strings"
)
// helper functions called from the generated zmsg.go
// These function are named after the tag to help pack/unpack, if there is no tag it is the name
// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or
// packDataDomainName.
func unpackDataA(msg []byte, off int) (net.IP, int, error) {
if off+net.IPv4len > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking a"}
}
a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
off += net.IPv4len
return a, off, nil
}
func packDataA(a net.IP, msg []byte, off int) (int, error) {
switch len(a) {
case net.IPv4len, net.IPv6len:
// It must be a slice of 4, even if it is 16, we encode only the first 4
if off+net.IPv4len > len(msg) {
return len(msg), &Error{err: "overflow packing a"}
}
copy(msg[off:], a.To4())
off += net.IPv4len
case 0:
// Allowed, for dynamic updates.
default:
return len(msg), &Error{err: "overflow packing a"}
}
return off, nil
}
func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
if off+net.IPv6len > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
}
aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
off += net.IPv6len
return aaaa, off, nil
}
func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
switch len(aaaa) {
case net.IPv6len:
if off+net.IPv6len > len(msg) {
return len(msg), &Error{err: "overflow packing aaaa"}
}
copy(msg[off:], aaaa)
off += net.IPv6len
case 0:
// Allowed, dynamic updates.
default:
return len(msg), &Error{err: "overflow packing aaaa"}
}
return off, nil
}
// unpackHeader unpacks an RR header, returning the offset to the end of the header and a
// re-sliced msg according to the expected length of the RR.
func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) {
hdr := RR_Header{}
if off == len(msg) {
return hdr, off, msg, nil
}
hdr.Name, off, err = UnpackDomainName(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Rrtype, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Class, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Ttl, off, err = unpackUint32(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Rdlength, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
return hdr, off, msg, err
}
// packHeader packs an RR header, returning the offset to the end of the header.
// See PackDomainName for documentation about the compression.
func (hdr RR_Header) packHeader(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
if off == len(msg) {
return off, nil
}
off, err := packDomainName(hdr.Name, msg, off, compression, compress)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Rrtype, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Class, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint32(hdr.Ttl, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint16(0, msg, off) // The RDLENGTH field will be set later in packRR.
if err != nil {
return len(msg), err
}
return off, nil
}
// helper helper functions.
// truncateMsgFromRdLength truncates msg to match the expected length of the RR.
// Returns an error if msg is smaller than the expected size.
func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) {
lenrd := off + int(rdlength)
if lenrd > len(msg) {
return msg, &Error{err: "overflowing header size"}
}
return msg[:lenrd], nil
}
var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
func fromBase32(s []byte) (buf []byte, err error) {
for i, b := range s {
if b >= 'a' && b <= 'z' {
s[i] = b - 32
}
}
buflen := base32HexNoPadEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base32HexNoPadEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase32(b []byte) string {
return base32HexNoPadEncoding.EncodeToString(b)
}
func fromBase64(s []byte) (buf []byte, err error) {
buflen := base64.StdEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base64.StdEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) }
// dynamicUpdate returns true if the Rdlength is zero.
func noRdata(h RR_Header) bool { return h.Rdlength == 0 }
func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
if off+1 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint8"}
}
return msg[off], off + 1, nil
}
func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
if off+1 > len(msg) {
return len(msg), &Error{err: "overflow packing uint8"}
}
msg[off] = i
return off + 1, nil
}
func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) {
if off+2 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint16"}
}
return binary.BigEndian.Uint16(msg[off:]), off + 2, nil
}
func packUint16(i uint16, msg []byte, off int) (off1 int, err error) {
if off+2 > len(msg) {
return len(msg), &Error{err: "overflow packing uint16"}
}
binary.BigEndian.PutUint16(msg[off:], i)
return off + 2, nil
}
func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) {
if off+4 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint32"}
}
return binary.BigEndian.Uint32(msg[off:]), off + 4, nil
}
func packUint32(i uint32, msg []byte, off int) (off1 int, err error) {
if off+4 > len(msg) {
return len(msg), &Error{err: "overflow packing uint32"}
}
binary.BigEndian.PutUint32(msg[off:], i)
return off + 4, nil
}
func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) {
if off+6 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"}
}
// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
i = uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
uint64(msg[off+4])<<8 | uint64(msg[off+5])
off += 6
return i, off, nil
}
func packUint48(i uint64, msg []byte, off int) (off1 int, err error) {
if off+6 > len(msg) {
return len(msg), &Error{err: "overflow packing uint64 as uint48"}
}
msg[off] = byte(i >> 40)
msg[off+1] = byte(i >> 32)
msg[off+2] = byte(i >> 24)
msg[off+3] = byte(i >> 16)
msg[off+4] = byte(i >> 8)
msg[off+5] = byte(i)
off += 6
return off, nil
}
func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) {
if off+8 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint64"}
}
return binary.BigEndian.Uint64(msg[off:]), off + 8, nil
}
func packUint64(i uint64, msg []byte, off int) (off1 int, err error) {
if off+8 > len(msg) {
return len(msg), &Error{err: "overflow packing uint64"}
}
binary.BigEndian.PutUint64(msg[off:], i)
off += 8
return off, nil
}
func unpackString(msg []byte, off int) (string, int, error) {
if off+1 > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
l := int(msg[off])
off++
if off+l > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
var s strings.Builder
consumed := 0
for i, b := range msg[off : off+l] {
switch {
case b == '"' || b == '\\':
if consumed == 0 {
s.Grow(l * 2)
}
s.Write(msg[off+consumed : off+i])
s.WriteByte('\\')
s.WriteByte(b)
consumed = i + 1
case b < ' ' || b > '~': // unprintable
if consumed == 0 {
s.Grow(l * 2)
}
s.Write(msg[off+consumed : off+i])
s.WriteString(escapeByte(b))
consumed = i + 1
}
}
if consumed == 0 { // no escaping needed
return string(msg[off : off+l]), off + l, nil
}
s.Write(msg[off+consumed : off+l])
return s.String(), off + l, nil
}
func packString(s string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1)
off, err := packTxtString(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackStringBase32(msg []byte, off, end int) (string, int, error) {
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking base32"}
}
s := toBase32(msg[off:end])
return s, end, nil
}
func packStringBase32(s string, msg []byte, off int) (int, error) {
b32, err := fromBase32([]byte(s))
if err != nil {
return len(msg), err
}
if off+len(b32) > len(msg) {
return len(msg), &Error{err: "overflow packing base32"}
}
copy(msg[off:off+len(b32)], b32)
off += len(b32)
return off, nil
}
func unpackStringBase64(msg []byte, off, end int) (string, int, error) {
// Rest of the RR is base64 encoded value, so we don't need an explicit length
// to be set. Thus far all RR's that have base64 encoded fields have those as their
// last one. What we do need is the end of the RR!
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking base64"}
}
s := toBase64(msg[off:end])
return s, end, nil
}
func packStringBase64(s string, msg []byte, off int) (int, error) {
b64, err := fromBase64([]byte(s))
if err != nil {
return len(msg), err
}
if off+len(b64) > len(msg) {
return len(msg), &Error{err: "overflow packing base64"}
}
copy(msg[off:off+len(b64)], b64)
off += len(b64)
return off, nil
}
func unpackStringHex(msg []byte, off, end int) (string, int, error) {
// Rest of the RR is hex encoded value, so we don't need an explicit length
// to be set. NSEC and TSIG have hex fields with a length field.
// What we do need is the end of the RR!
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking hex"}
}
s := hex.EncodeToString(msg[off:end])
return s, end, nil
}
func packStringHex(s string, msg []byte, off int) (int, error) {
h, err := hex.DecodeString(s)
if err != nil {
return len(msg), err
}
if off+len(h) > len(msg) {
return len(msg), &Error{err: "overflow packing hex"}
}
copy(msg[off:off+len(h)], h)
off += len(h)
return off, nil
}
func unpackStringAny(msg []byte, off, end int) (string, int, error) {
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking anything"}
}
return string(msg[off:end]), end, nil
}
func packStringAny(s string, msg []byte, off int) (int, error) {
if off+len(s) > len(msg) {
return len(msg), &Error{err: "overflow packing anything"}
}
copy(msg[off:off+len(s)], s)
off += len(s)
return off, nil
}
func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
txt, off, err := unpackTxt(msg, off)
if err != nil {
return nil, len(msg), err
}
return txt, off, nil
}
func packStringTxt(s []string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
off, err := packTxt(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
var edns []EDNS0
Option:
var code uint16
if off+4 > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"}
}
code = binary.BigEndian.Uint16(msg[off:])
off += 2
optlen := binary.BigEndian.Uint16(msg[off:])
off += 2
if off+int(optlen) > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"}
}
e := makeDataOpt(code)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
if off < len(msg) {
goto Option
}
return edns, off, nil
}
func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
for _, el := range options {
b, err := el.pack()
if err != nil || off+4 > len(msg) {
return len(msg), &Error{err: "overflow packing opt"}
}
binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code
binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
off += 4
if off+len(b) > len(msg) {
return len(msg), &Error{err: "overflow packing opt"}
}
// Actual data
copy(msg[off:off+len(b)], b)
off += len(b)
}
return off, nil
}
func unpackStringOctet(msg []byte, off int) (string, int, error) {
s := string(msg[off:])
return s, len(msg), nil
}
func packStringOctet(s string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1)
off, err := packOctetString(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
var nsec []uint16
length, window, lastwindow := 0, 0, -1
for off < len(msg) {
if off+2 > len(msg) {
return nsec, len(msg), &Error{err: "overflow unpacking nsecx"}
}
window = int(msg[off])
length = int(msg[off+1])
off += 2
if window <= lastwindow {
// RFC 4034: Blocks are present in the NSEC RR RDATA in
// increasing numerical order.
return nsec, len(msg), &Error{err: "out of order NSEC block"}
}
if length == 0 {
// RFC 4034: Blocks with no types present MUST NOT be included.
return nsec, len(msg), &Error{err: "empty NSEC block"}
}
if length > 32 {
return nsec, len(msg), &Error{err: "NSEC block too long"}
}
if off+length > len(msg) {
return nsec, len(msg), &Error{err: "overflowing NSEC block"}
}
// Walk the bytes in the window and extract the type bits
for j, b := range msg[off : off+length] {
// Check the bits one by one, and set the type
if b&0x80 == 0x80 {
nsec = append(nsec, uint16(window*256+j*8+0))
}
if b&0x40 == 0x40 {
nsec = append(nsec, uint16(window*256+j*8+1))
}
if b&0x20 == 0x20 {
nsec = append(nsec, uint16(window*256+j*8+2))
}
if b&0x10 == 0x10 {
nsec = append(nsec, uint16(window*256+j*8+3))
}
if b&0x8 == 0x8 {
nsec = append(nsec, uint16(window*256+j*8+4))
}
if b&0x4 == 0x4 {
nsec = append(nsec, uint16(window*256+j*8+5))
}
if b&0x2 == 0x2 {
nsec = append(nsec, uint16(window*256+j*8+6))
}
if b&0x1 == 0x1 {
nsec = append(nsec, uint16(window*256+j*8+7))
}
}
off += length
lastwindow = window
}
return nsec, off, nil
}
// typeBitMapLen is a helper function which computes the "maximum" length of
// a the NSEC Type BitMap field.
func typeBitMapLen(bitmap []uint16) int {
var l int
var lastwindow, lastlength uint16
for _, t := range bitmap {
window := t / 256
length := (t-window*256)/8 + 1
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
l += int(lastlength) + 2
lastlength = 0
}
if window < lastwindow || length < lastlength {
// packDataNsec would return Error{err: "nsec bits out of order"} here, but
// when computing the length, we want do be liberal.
continue
}
lastwindow, lastlength = window, length
}
l += int(lastlength) + 2
return l
}
func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
if len(bitmap) == 0 {
return off, nil
}
var lastwindow, lastlength uint16
for _, t := range bitmap {
window := t / 256
length := (t-window*256)/8 + 1
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
off += int(lastlength) + 2
lastlength = 0
}
if window < lastwindow || length < lastlength {
return len(msg), &Error{err: "nsec bits out of order"}
}
if off+2+int(length) > len(msg) {
return len(msg), &Error{err: "overflow packing nsec"}
}
// Setting the window #
msg[off] = byte(window)
// Setting the octets length
msg[off+1] = byte(length)
// Setting the bit value for the type in the right octet
msg[off+1+int(length)] |= byte(1 << (7 - t%8))
lastwindow, lastlength = window, length
}
off += int(lastlength) + 2
return off, nil
}
func unpackDataSVCB(msg []byte, off int) ([]SVCBKeyValue, int, error) {
var xs []SVCBKeyValue
var code uint16
var length uint16
var err error
for off < len(msg) {
code, off, err = unpackUint16(msg, off)
if err != nil {
return nil, len(msg), &Error{err: "overflow unpacking SVCB"}
}
length, off, err = unpackUint16(msg, off)
if err != nil || off+int(length) > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking SVCB"}
}
e := makeSVCBKeyValue(SVCBKey(code))
if e == nil {
return nil, len(msg), &Error{err: "bad SVCB key"}
}
if err := e.unpack(msg[off : off+int(length)]); err != nil {
return nil, len(msg), err
}
if len(xs) > 0 && e.Key() <= xs[len(xs)-1].Key() {
return nil, len(msg), &Error{err: "SVCB keys not in strictly increasing order"}
}
xs = append(xs, e)
off += int(length)
}
return xs, off, nil
}
func packDataSVCB(pairs []SVCBKeyValue, msg []byte, off int) (int, error) {
pairs = append([]SVCBKeyValue(nil), pairs...)
sort.Slice(pairs, func(i, j int) bool {
return pairs[i].Key() < pairs[j].Key()
})
prev := svcb_RESERVED
for _, el := range pairs {
if el.Key() == prev {
return len(msg), &Error{err: "repeated SVCB keys are not allowed"}
}
prev = el.Key()
packed, err := el.pack()
if err != nil {
return len(msg), err
}
off, err = packUint16(uint16(el.Key()), msg, off)
if err != nil {
return len(msg), &Error{err: "overflow packing SVCB"}
}
off, err = packUint16(uint16(len(packed)), msg, off)
if err != nil || off+len(packed) > len(msg) {
return len(msg), &Error{err: "overflow packing SVCB"}
}
copy(msg[off:off+len(packed)], packed)
off += len(packed)
}
return off, nil
}
func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
var (
servers []string
s string
err error
)
if end > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking domain names"}
}
for off < end {
s, off, err = UnpackDomainName(msg, off)
if err != nil {
return servers, len(msg), err
}
servers = append(servers, s)
}
return servers, off, nil
}
func packDataDomainNames(names []string, msg []byte, off int, compression compressionMap, compress bool) (int, error) {
var err error
for _, name := range names {
off, err = packDomainName(name, msg, off, compression, compress)
if err != nil {
return len(msg), err
}
}
return off, nil
}
func packDataApl(data []APLPrefix, msg []byte, off int) (int, error) {
var err error
for i := range data {
off, err = packDataAplPrefix(&data[i], msg, off)
if err != nil {
return len(msg), err
}
}
return off, nil
}
func packDataAplPrefix(p *APLPrefix, msg []byte, off int) (int, error) {
if len(p.Network.IP) != len(p.Network.Mask) {
return len(msg), &Error{err: "address and mask lengths don't match"}
}
var err error
prefix, _ := p.Network.Mask.Size()
addr := p.Network.IP.Mask(p.Network.Mask)[:(prefix+7)/8]
switch len(p.Network.IP) {
case net.IPv4len:
off, err = packUint16(1, msg, off)
case net.IPv6len:
off, err = packUint16(2, msg, off)
default:
err = &Error{err: "unrecognized address family"}
}
if err != nil {
return len(msg), err
}
off, err = packUint8(uint8(prefix), msg, off)
if err != nil {
return len(msg), err
}
var n uint8
if p.Negation {
n = 0x80
}
// trim trailing zero bytes as specified in RFC3123 Sections 4.1 and 4.2.
i := len(addr) - 1
for ; i >= 0 && addr[i] == 0; i-- {
}
addr = addr[:i+1]
adflen := uint8(len(addr)) & 0x7f
off, err = packUint8(n|adflen, msg, off)
if err != nil {
return len(msg), err
}
if off+len(addr) > len(msg) {
return len(msg), &Error{err: "overflow packing APL prefix"}
}
off += copy(msg[off:], addr)
return off, nil
}
func unpackDataApl(msg []byte, off int) ([]APLPrefix, int, error) {
var result []APLPrefix
for off < len(msg) {
prefix, end, err := unpackDataAplPrefix(msg, off)
if err != nil {
return nil, len(msg), err
}
off = end
result = append(result, prefix)
}
return result, off, nil
}
func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) {
family, off, err := unpackUint16(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
prefix, off, err := unpackUint8(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
nlen, off, err := unpackUint8(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
var ip []byte
switch family {
case 1:
ip = make([]byte, net.IPv4len)
case 2:
ip = make([]byte, net.IPv6len)
default:
return APLPrefix{}, len(msg), &Error{err: "unrecognized APL address family"}
}
if int(prefix) > 8*len(ip) {
return APLPrefix{}, len(msg), &Error{err: "APL prefix too long"}
}
afdlen := int(nlen & 0x7f)
if afdlen > len(ip) {
return APLPrefix{}, len(msg), &Error{err: "APL length too long"}
}
if off+afdlen > len(msg) {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL address"}
}
// Address MUST NOT contain trailing zero bytes per RFC3123 Sections 4.1 and 4.2.
off += copy(ip, msg[off:off+afdlen])
if afdlen > 0 {
last := ip[afdlen-1]
if last == 0 {
return APLPrefix{}, len(msg), &Error{err: "extra APL address bits"}
}
}
ipnet := net.IPNet{
IP: ip,
Mask: net.CIDRMask(int(prefix), 8*len(ip)),
}
return APLPrefix{
Negation: (nlen & 0x80) != 0,
Network: ipnet,
}, off, nil
}

117
vendor/github.com/miekg/dns/msg_truncate.go generated vendored Normal file
View File

@@ -0,0 +1,117 @@
package dns
// Truncate ensures the reply message will fit into the requested buffer
// size by removing records that exceed the requested size.
//
// It will first check if the reply fits without compression and then with
// compression. If it won't fit with compression, Truncate then walks the
// record adding as many records as possible without exceeding the
// requested buffer size.
//
// If the message fits within the requested size without compression,
// Truncate will set the message's Compress attribute to false. It is
// the caller's responsibility to set it back to true if they wish to
// compress the payload regardless of size.
//
// The TC bit will be set if any records were excluded from the message.
// If the TC bit is already set on the message it will be retained.
// TC indicates that the client should retry over TCP.
//
// According to RFC 2181, the TC bit should only be set if not all of the
// "required" RRs can be included in the response. Unfortunately, we have
// no way of knowing which RRs are required so we set the TC bit if any RR
// had to be omitted from the response.
//
// The appropriate buffer size can be retrieved from the requests OPT
// record, if present, and is transport specific otherwise. dns.MinMsgSize
// should be used for UDP requests without an OPT record, and
// dns.MaxMsgSize for TCP requests without an OPT record.
func (dns *Msg) Truncate(size int) {
if dns.IsTsig() != nil {
// To simplify this implementation, we don't perform
// truncation on responses with a TSIG record.
return
}
// RFC 6891 mandates that the payload size in an OPT record
// less than 512 (MinMsgSize) bytes must be treated as equal to 512 bytes.
//
// For ease of use, we impose that restriction here.
if size < MinMsgSize {
size = MinMsgSize
}
l := msgLenWithCompressionMap(dns, nil) // uncompressed length
if l <= size {
// Don't waste effort compressing this message.
dns.Compress = false
return
}
dns.Compress = true
edns0 := dns.popEdns0()
if edns0 != nil {
// Account for the OPT record that gets added at the end,
// by subtracting that length from our budget.
//
// The EDNS(0) OPT record must have the root domain and
// it's length is thus unaffected by compression.
size -= Len(edns0)
}
compression := make(map[string]struct{})
l = headerSize
for _, r := range dns.Question {
l += r.len(l, compression)
}
var numAnswer int
if l < size {
l, numAnswer = truncateLoop(dns.Answer, size, l, compression)
}
var numNS int
if l < size {
l, numNS = truncateLoop(dns.Ns, size, l, compression)
}
var numExtra int
if l < size {
_, numExtra = truncateLoop(dns.Extra, size, l, compression)
}
// See the function documentation for when we set this.
dns.Truncated = dns.Truncated || len(dns.Answer) > numAnswer ||
len(dns.Ns) > numNS || len(dns.Extra) > numExtra
dns.Answer = dns.Answer[:numAnswer]
dns.Ns = dns.Ns[:numNS]
dns.Extra = dns.Extra[:numExtra]
if edns0 != nil {
// Add the OPT record back onto the additional section.
dns.Extra = append(dns.Extra, edns0)
}
}
func truncateLoop(rrs []RR, size, l int, compression map[string]struct{}) (int, int) {
for i, r := range rrs {
if r == nil {
continue
}
l += r.len(l, compression)
if l > size {
// Return size, rather than l prior to this record,
// to prevent any further records being added.
return size, i
}
if l == size {
return l, i + 1
}
}
return l, len(rrs)
}

95
vendor/github.com/miekg/dns/nsecx.go generated vendored Normal file
View File

@@ -0,0 +1,95 @@
package dns
import (
"crypto/sha1"
"encoding/hex"
"strings"
)
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
func HashName(label string, ha uint8, iter uint16, salt string) string {
if ha != SHA1 {
return ""
}
wireSalt := make([]byte, hex.DecodedLen(len(salt)))
n, err := packStringHex(salt, wireSalt, 0)
if err != nil {
return ""
}
wireSalt = wireSalt[:n]
name := make([]byte, 255)
off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
if err != nil {
return ""
}
name = name[:off]
s := sha1.New()
// k = 0
s.Write(name)
s.Write(wireSalt)
nsec3 := s.Sum(nil)
// k > 0
for k := uint16(0); k < iter; k++ {
s.Reset()
s.Write(nsec3)
s.Write(wireSalt)
nsec3 = s.Sum(nsec3[:0])
}
return toBase32(nsec3)
}
// Cover returns true if a name is covered by the NSEC3 record.
func (rr *NSEC3) Cover(name string) bool {
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
owner := strings.ToUpper(rr.Hdr.Name)
labelIndices := Split(owner)
if len(labelIndices) < 2 {
return false
}
ownerHash := owner[:labelIndices[1]-1]
ownerZone := owner[labelIndices[1]:]
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
return false
}
nextHash := rr.NextDomain
// if empty interval found, try cover wildcard hashes so nameHash shouldn't match with ownerHash
if ownerHash == nextHash && nameHash != ownerHash { // empty interval
return true
}
if ownerHash > nextHash { // end of zone
if nameHash > ownerHash { // covered since there is nothing after ownerHash
return true
}
return nameHash < nextHash // if nameHash is before beginning of zone it is covered
}
if nameHash < ownerHash { // nameHash is before ownerHash, not covered
return false
}
return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash)
}
// Match returns true if a name matches the NSEC3 record
func (rr *NSEC3) Match(name string) bool {
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
owner := strings.ToUpper(rr.Hdr.Name)
labelIndices := Split(owner)
if len(labelIndices) < 2 {
return false
}
ownerHash := owner[:labelIndices[1]-1]
ownerZone := owner[labelIndices[1]:]
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
return false
}
if ownerHash == nameHash {
return true
}
return false
}

113
vendor/github.com/miekg/dns/privaterr.go generated vendored Normal file
View File

@@ -0,0 +1,113 @@
package dns
import "strings"
// PrivateRdata is an interface used for implementing "Private Use" RR types, see
// RFC 6895. This allows one to experiment with new RR types, without requesting an
// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
type PrivateRdata interface {
// String returns the text presentation of the Rdata of the Private RR.
String() string
// Parse parses the Rdata of the private RR.
Parse([]string) error
// Pack is used when packing a private RR into a buffer.
Pack([]byte) (int, error)
// Unpack is used when unpacking a private RR from a buffer.
Unpack([]byte) (int, error)
// Copy copies the Rdata into the PrivateRdata argument.
Copy(PrivateRdata) error
// Len returns the length in octets of the Rdata.
Len() int
}
// PrivateRR represents an RR that uses a PrivateRdata user-defined type.
// It mocks normal RRs and implements dns.RR interface.
type PrivateRR struct {
Hdr RR_Header
Data PrivateRdata
generator func() PrivateRdata // for copy
}
// Header return the RR header of r.
func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
// Private len and copy parts to satisfy RR interface.
func (r *PrivateRR) len(off int, compression map[string]struct{}) int {
l := r.Hdr.len(off, compression)
l += r.Data.Len()
return l
}
func (r *PrivateRR) copy() RR {
// make new RR like this:
rr := &PrivateRR{r.Hdr, r.generator(), r.generator}
if err := r.Data.Copy(rr.Data); err != nil {
panic("dns: got value that could not be used to copy Private rdata: " + err.Error())
}
return rr
}
func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
n, err := r.Data.Pack(msg[off:])
if err != nil {
return len(msg), err
}
off += n
return off, nil
}
func (r *PrivateRR) unpack(msg []byte, off int) (int, error) {
off1, err := r.Data.Unpack(msg[off:])
off += off1
return off, err
}
func (r *PrivateRR) parse(c *zlexer, origin string) *ParseError {
var l lex
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
Fetch:
for {
// TODO(miek): we could also be returning _QUOTE, this might or might not
// be an issue (basically parsing TXT becomes hard)
switch l, _ = c.Next(); l.value {
case zNewline, zEOF:
break Fetch
case zString:
text = append(text, l.token)
}
}
err := r.Data.Parse(text)
if err != nil {
return &ParseError{"", err.Error(), l}
}
return nil
}
func (r *PrivateRR) isDuplicate(r2 RR) bool { return false }
// PrivateHandle registers a private resource record type. It requires
// string and numeric representation of private RR type and generator function as argument.
func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
rtypestr = strings.ToUpper(rtypestr)
TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator(), generator} }
TypeToString[rtype] = rtypestr
StringToType[rtypestr] = rtype
}
// PrivateHandleRemove removes definitions required to support private RR type.
func PrivateHandleRemove(rtype uint16) {
rtypestr, ok := TypeToString[rtype]
if ok {
delete(TypeToRR, rtype)
delete(TypeToString, rtype)
delete(StringToType, rtypestr)
}
}

52
vendor/github.com/miekg/dns/reverse.go generated vendored Normal file
View File

@@ -0,0 +1,52 @@
package dns
// StringToType is the reverse of TypeToString, needed for string parsing.
var StringToType = reverseInt16(TypeToString)
// StringToClass is the reverse of ClassToString, needed for string parsing.
var StringToClass = reverseInt16(ClassToString)
// StringToOpcode is a map of opcodes to strings.
var StringToOpcode = reverseInt(OpcodeToString)
// StringToRcode is a map of rcodes to strings.
var StringToRcode = reverseInt(RcodeToString)
func init() {
// Preserve previous NOTIMP typo, see github.com/miekg/dns/issues/733.
StringToRcode["NOTIMPL"] = RcodeNotImplemented
}
// StringToAlgorithm is the reverse of AlgorithmToString.
var StringToAlgorithm = reverseInt8(AlgorithmToString)
// StringToHash is a map of names to hash IDs.
var StringToHash = reverseInt8(HashToString)
// StringToCertType is the reverseof CertTypeToString.
var StringToCertType = reverseInt16(CertTypeToString)
// Reverse a map
func reverseInt8(m map[uint8]string) map[string]uint8 {
n := make(map[string]uint8, len(m))
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt16(m map[uint16]string) map[string]uint16 {
n := make(map[string]uint16, len(m))
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt(m map[int]string) map[string]int {
n := make(map[string]int, len(m))
for u, s := range m {
n[s] = u
}
return n
}

86
vendor/github.com/miekg/dns/sanitize.go generated vendored Normal file
View File

@@ -0,0 +1,86 @@
package dns
// Dedup removes identical RRs from rrs. It preserves the original ordering.
// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies
// rrs.
// m is used to store the RRs temporary. If it is nil a new map will be allocated.
func Dedup(rrs []RR, m map[string]RR) []RR {
if m == nil {
m = make(map[string]RR)
}
// Save the keys, so we don't have to call normalizedString twice.
keys := make([]*string, 0, len(rrs))
for _, r := range rrs {
key := normalizedString(r)
keys = append(keys, &key)
if mr, ok := m[key]; ok {
// Shortest TTL wins.
rh, mrh := r.Header(), mr.Header()
if mrh.Ttl > rh.Ttl {
mrh.Ttl = rh.Ttl
}
continue
}
m[key] = r
}
// If the length of the result map equals the amount of RRs we got,
// it means they were all different. We can then just return the original rrset.
if len(m) == len(rrs) {
return rrs
}
j := 0
for i, r := range rrs {
// If keys[i] lives in the map, we should copy and remove it.
if _, ok := m[*keys[i]]; ok {
delete(m, *keys[i])
rrs[j] = r
j++
}
if len(m) == 0 {
break
}
}
return rrs[:j]
}
// normalizedString returns a normalized string from r. The TTL
// is removed and the domain name is lowercased. We go from this:
// DomainName<TAB>TTL<TAB>CLASS<TAB>TYPE<TAB>RDATA to:
// lowercasename<TAB>CLASS<TAB>TYPE...
func normalizedString(r RR) string {
// A string Go DNS makes has: domainname<TAB>TTL<TAB>...
b := []byte(r.String())
// find the first non-escaped tab, then another, so we capture where the TTL lives.
esc := false
ttlStart, ttlEnd := 0, 0
for i := 0; i < len(b) && ttlEnd == 0; i++ {
switch {
case b[i] == '\\':
esc = !esc
case b[i] == '\t' && !esc:
if ttlStart == 0 {
ttlStart = i
continue
}
if ttlEnd == 0 {
ttlEnd = i
}
case b[i] >= 'A' && b[i] <= 'Z' && !esc:
b[i] += 32
default:
esc = false
}
}
// remove TTL.
copy(b[ttlStart:], b[ttlEnd:])
cut := ttlEnd - ttlStart
return string(b[:len(b)-cut])
}

1368
vendor/github.com/miekg/dns/scan.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More